input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
Telha - PI', 'pt': 'Cocal de Telha - PI'},
'55863264':{'en': u('Jos\u00e9 de Freitas - PI'), 'pt': u('Jos\u00e9 de Freitas - PI')},
'55863265':{'en': u('Uni\u00e3o - PI'), 'pt': u('Uni\u00e3o - PI')},
'55863267':{'en': 'Lagoa Alegre - PI', 'pt': 'Lagoa Alegre - PI'},
'55863269':{'en': 'Beneditinos - PI', 'pt': 'Beneditinos - PI'},
'55863271':{'en': 'Pedro II - PI', 'pt': 'Pedro II - PI'},
'55863273':{'en': 'Curralinhos - PI', 'pt': 'Curralinhos - PI'},
'55863274':{'en': 'Brasileira - PI', 'pt': 'Brasileira - PI'},
'55863276':{'en': 'Piripiri - PI', 'pt': 'Piripiri - PI'},
'55863277':{'en': u('Capit\u00e3o de Campos - PI'), 'pt': u('Capit\u00e3o de Campos - PI')},
'55863280':{'en': u('S\u00e3o Pedro do Piau\u00ed - PI'), 'pt': u('S\u00e3o Pedro do Piau\u00ed - PI')},
'55863281':{'en': u('Mil<NAME>\u00e3o - PI'), 'pt': u('Milton Brand\u00e3o - PI')},
'55863282':{'en': u('\u00c1gua Branca - PI'), 'pt': u('\u00c1gua Branca - PI')},
'55863284':{'en': 'Barro Duro - PI', 'pt': 'Barro Duro - PI'},
'55863285':{'en': u('Elesb\u00e3o Veloso - PI'), 'pt': u('Elesb\u00e3o Veloso - PI')},
'55863288':{'en': 'Palmeirais - PI', 'pt': 'Palmeirais - PI'},
'55863289':{'en': u('S\u00e3o Gon\u00e7alo do Piau\u00ed - PI'), 'pt': u('S\u00e3o Gon\u00e7alo do Piau\u00ed - PI')},
'55863291':{'en': 'Jardim do Mulato - PI', 'pt': 'Jardim do Mulato - PI'},
'55863292':{'en': 'Amarante - PI', 'pt': 'Amarante - PI'},
'55863293':{'en': u('Regenera\u00e7\u00e3o - PI'), 'pt': u('Regenera\u00e7\u00e3o - PI')},
'55863295':{'en': u('S\u00e3o F\u00e9lix do Piau\u00ed - PI'), 'pt': u('S\u00e3o F\u00e9lix do Piau\u00ed - PI')},
'55863297':{'en': u('Agricol\u00e2ndia - PI'), 'pt': u('Agricol\u00e2ndia - PI')},
'55863298':{'en': u('Angical do Piau\u00ed - PI'), 'pt': u('Angical do Piau\u00ed - PI')},
'55863299':{'en': u('Hugo Napole\u00e3o - PI'), 'pt': u('Hugo Napole\u00e3o - PI')},
'55863301':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863302':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863303':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863304':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55863315':{'en': u('Parna\u00edba - PI'), 'pt': u('Parna\u00edba - PI')},
'55863321':{'en': u('Parna\u00edba - PI'), 'pt': u('Parna\u00edba - PI')},
'55863322':{'en': u('Parna\u00edba - PI'), 'pt': u('Parna\u00edba - PI')},
'55863323':{'en': u('Parna\u00edba - PI'), 'pt': u('Parna\u00edba - PI')},
'55863326':{'en': 'Timon - MA', 'pt': 'Timon - MA'},
'55863332':{'en': u('Caxing\u00f3 - PI'), 'pt': u('Caxing\u00f3 - PI')},
'55863340':{'en': u('Matias Ol\u00edmpio - PI'), 'pt': u('Matias Ol\u00edmpio - PI')},
'55863343':{'en': 'Piracuruca - PI', 'pt': 'Piracuruca - PI'},
'55863345':{'en': u('S\u00e3o Jo\u00e3o da Fronteira - PI'), 'pt': u('S\u00e3o Jo\u00e3o da Fronteira - PI')},
'55863346':{'en': u('S\u00e3o Jos\u00e9 do Divino - PI'), 'pt': u('S\u00e3o Jos\u00e9 do Divino - PI')},
'55863347':{'en': 'Batalha - PI', 'pt': 'Batalha - PI'},
'55863360':{'en': 'Joaquim Pires - PI', 'pt': 'Joaquim Pires - PI'},
'55863362':{'en': 'Cocal - PI', 'pt': 'Cocal - PI'},
'55863363':{'en': 'Buriti dos Lopes - PI', 'pt': 'Buriti dos Lopes - PI'},
'55863366':{'en': u('Lu\u00eds Correia - PI'), 'pt': u('Lu\u00eds Correia - PI')},
'55863367':{'en': u('Lu\u00eds Correia - PI'), 'pt': u('Lu\u00eds Correia - PI')},
'55863369':{'en': 'Cajueiro da Praia - PI', 'pt': 'Cajueiro da Praia - PI'},
'55863383':{'en': 'Esperantina - PI', 'pt': 'Esperantina - PI'},
'55863385':{'en': u('S\u00e3o Jo\u00e3o do Arraial - PI'), 'pt': u('S\u00e3o Jo\u00e3o do Arraial - PI')},
'55863393':{'en': u('Luzil\u00e2ndia - PI'), 'pt': u('Luzil\u00e2ndia - PI')},
'55863474':{'en': 'Pimenteiras - PI', 'pt': 'Pimenteiras - PI'},
'55863477':{'en': 'Inhuma - PI', 'pt': 'Inhuma - PI'},
'55863582':{'en': u('S\u00e3o Raimundo Nonato - PI'), 'pt': u('S\u00e3o Raimundo Nonato - PI')},
'55864009':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'55864020':{'en': 'Teresina - PI', 'pt': 'Teresina - PI'},
'5587':{'en': 'Pernambuco', 'pt': 'Pernambuco'},
'55872101':{'en': 'Petrolina - PE', 'pt': 'Petrolina - PE'},
'55873031':{'en': 'Petrolina - PE', 'pt': 'Petrolina - PE'},
'55873032':{'en': 'Petrolina - PE', 'pt': 'Petrolina - PE'},
'55873035':{'en': 'Petrolina - PE', 'pt': 'Petrolina - PE'},
'55873201':{'en': 'Petrolina - PE', 'pt': 'Petrolina - PE'},
'55873202':{'en': 'Petrolina - PE', 'pt': 'Petrolina - PE'},
'55873221':{'en': 'Garanhuns - PE', 'pt': 'Garanhuns - PE'},
'55873272':{'en': 'Parnamirim - RN', 'pt': 'Parnamirim - RN'},
'55873761':{'en': 'Garanhuns - PE', 'pt': 'Garanhuns - PE'},
'55873763':{'en': 'Garanhuns - PE', 'pt': 'Garanhuns - PE'},
'55873764':{'en': 'Garanhuns - PE', 'pt': 'Garanhuns - PE'},
'55873771':{'en': 'Bom Conselho - PE', 'pt': 'Bom Conselho - PE'},
'55873772':{'en': 'Correntes - PE', 'pt': 'Correntes - PE'},
'55873773':{'en': 'Lajedo - PE', 'pt': 'Lajedo - PE'},
'55873775':{'en': u('\u00c1guas Belas - PE'), 'pt': u('\u00c1guas Belas - PE')},
'55873779':{'en': 'Jupi - PE', 'pt': 'Jupi - PE'},
'55873781':{'en': 'Canhotinho - PE', 'pt': 'Canhotinho - PE'},
'55873782':{'en': u('Salo\u00e1 - PE'), 'pt': u('Salo\u00e1 - PE')},
'55873783':{'en': u('Caet\u00e9s - PE'), 'pt': u('Caet\u00e9s - PE')},
'55873784':{'en': u('S\u00e3o Jo\u00e3o - PE'), 'pt': u('S\u00e3o Jo\u00e3o - PE')},
'55873785':{'en': 'Lagoa do Ouro - PE', 'pt': 'Lagoa do Ouro - PE'},
'55873786':{'en': 'Iati - PE', 'pt': 'Iati - PE'},
'55873787':{'en': 'Paranatama - PE', 'pt': 'Paranatama - PE'},
'55873788':{'en': 'Angelim - PE', 'pt': 'Angelim - PE'},
'55873789':{'en': u('Brej\u00e3o - PE'), 'pt': u('Brej\u00e3o - PE')},
'55873791':{'en': 'Palmeirina - PE', 'pt': 'Palmeirina - PE'},
'55873792':{'en': 'Terezinha - PE', 'pt': 'Terezinha - PE'},
'55873793':{'en': u('Cal\u00e7ado - PE'), 'pt': u('Cal\u00e7ado - PE')},
'55873794':{'en': 'Ibirajuba - PE', 'pt': 'Ibirajuba - PE'},
'55873795':{'en': 'Jurema - PE', 'pt': 'Jurema - PE'},
'55873796':{'en': 'Capoeiras - PE', 'pt': 'Capoeiras - PE'},
'55873798':{'en': 'Bom Conselho - PE', 'pt': 'Bom Conselho - PE'},
'55873803':{'en': 'Pesqueira - PE', 'pt': 'Pesqueira - PE'},
'55873809':{'en': 'Iguaraci - PE', 'pt': 'Iguaraci - PE'},
'55873811':{'en': 'Jirau - PE', 'pt': 'Jirau - PE'},
'55873816':{'en': u('Bu\u00edque - PE'), 'pt': u('Bu\u00edque - PE')},
'55873817':{'en': 'Alagoinha - PE', 'pt': 'Alagoinha - PE'},
'55873821':{'en': 'Arcoverde - PE', 'pt': 'Arcoverde - PE'},
'55873822':{'en': 'Arcoverde - PE', 'pt': 'Arcoverde - PE'},
'55873828':{'en': 'Tuparetama - PE', 'pt': 'Tuparetama - PE'},
'55873829':{'en': 'Ingazeira - PE', 'pt': 'Ingazeira - PE'},
'55873830':{'en': u('Solid\u00e3o - PE'), 'pt': u('Solid\u00e3o - PE')},
'55873831':{'en': 'Serra Talhada - PE', 'pt': 'Serra Talhada - PE'},
'55873833':{'en': 'Venturosa - PE', 'pt': 'Venturosa - PE'},
'55873834':{'en': u('Po\u00e7\u00e3o - PE'), 'pt': u('Po\u00e7\u00e3o - PE')},
'55873835':{'en': 'Pesqueira - PE', 'pt': 'Pesqueira - PE'},
'55873836':{'en': u('Sanhar\u00f3 - PE'), 'pt': u('Sanhar\u00f3 - PE')},
'55873837':{'en': 'Iguaraci - PE', 'pt': 'Iguaraci - PE'},
'55873838':{'en': 'Afogados da Ingazeira - PE', 'pt': 'Afogados da Ingazeira - PE'},
'55873839':{'en': 'Alagoinha - PE', 'pt': 'Alagoinha - PE'},
'55873840':{'en': u('Inaj\u00e1 - PE'), 'pt': u('Inaj\u00e1 - PE')},
'55873841':{'en': u('Sert\u00e2nia - PE'), 'pt': u('Sert\u00e2nia - PE')},
'55873842':{'en': 'Ibimirim - PE', 'pt': 'Ibimirim - PE'},
'55873843':{'en': 'Tacaratu - PE', 'pt': 'Tacaratu - PE'},
'55873844':{'en': u('S\u00e3o Jos\u00e9 do Egito - PE'), 'pt': u('S\u00e3o Jos\u00e9 do Egito - PE')},
'55873845':{'en': 'Calumbi - PE', 'pt': 'Calumbi - PE'},
'55873846':{'en': 'Triunfo - PE', 'pt': 'Triunfo - PE'},
'55873847':{'en': 'Tabira - PE', 'pt': 'Tabira - PE'},
'55873848':{'en': u('Cust\u00f3dia - PE'), 'pt': u('Cust\u00f3dia - PE')},
'55873849':{'en': u('Ita\u00edba - PE'), 'pt': u('Ita\u00edba - PE')},
'55873850':{'en': 'Brejinho - PE', 'pt': 'Brejinho - PE'},
'55873851':{'en': u('Petrol\u00e2ndia - PE'), 'pt': u('Petrol\u00e2ndia - PE')},
'55873852':{'en': u('Bet\u00e2nia - PE'), 'pt': u('Bet\u00e2nia - PE')},
'55873853':{'en': 'Itapetim - PE', 'pt': 'Itapetim - PE'},
'55873854':{'en': u('Carna\u00edba - PE'), 'pt': u('Carna\u00edba - PE')},
'55873855':{'en': u('Bu\u00edque - PE'), 'pt': u('Bu\u00edque - PE')},
'55873856':{'en': 'Tupanatinga - PE', 'pt': 'Tupanatinga - PE'},
'55873857':{'en': 'Flores - PE', 'pt': 'Flores - PE'},
'55873858':{'en': 'Pedra - PE', 'pt': 'Pedra - PE'},
'55873859':{'en': 'Santa Terezinha - PE', 'pt': 'Santa Terezinha - PE'},
'5587386':{'en': 'Petrolina - PE', 'pt': 'Petrolina - PE'},
'55873865':{'en': 'Dormentes - PE', 'pt': 'Dormentes - PE'},
'55873868':{'en': u('Afr\u00e2nio - PE'), 'pt': u('Afr\u00e2nio - PE')},
'55873869':{'en': 'Santa Maria da Boa Vista - PE', 'pt': 'Santa Maria da Boa Vista - PE'},
'55873870':{'en': 'Trindade - PE', 'pt': 'Trindade - PE'},
'55873871':{'en': 'Salgueiro - PE', 'pt': 'Salgueiro - PE'},
'55873872':{'en': 'Araripina - PE', 'pt': 'Araripina - PE'},
'55873873':{'en': 'Araripina - PE', 'pt': 'Araripina - PE'},
'55873874':{'en': 'Ouricuri - PE', 'pt': 'Ouricuri - PE'},
'55873875':{'en': u('Cabrob\u00f3 - PE'), 'pt': u('Cabrob\u00f3 - PE')},
'55873876':{'en': u('Bel\u00e9m de S\u00e3o Francisco - PE'), 'pt': u('Bel\u00e9m de S\u00e3o Francisco - PE')},
'55873877':{'en': 'Floresta - PE', 'pt': 'Floresta - PE'},
'55873878':{'en': u('Bodoc\u00f3 - PE'), 'pt': u('Bodoc\u00f3 - PE')},
'55873879':{'en': 'Exu - PE', 'pt': 'Exu - PE'},
'55873880':{'en': 'Granito - PE', 'pt': 'Granito - PE'},
'55873881':{'en': 'Ipubi - PE', 'pt': 'Ipubi - PE'},
'55873882':{'en': 'Serrita - PE', 'pt': 'Serrita - PE'},
'55873883':{'en': 'Parnamirim - PE', 'pt': 'Parnamirim - PE'},
'55873884':{'en': u('S\u00e3o Jos\u00e9 do Belmonte - PE'), 'pt': u('S\u00e3o Jos\u00e9 do Belmonte - PE')},
'55873885':{'en': 'Mirandiba - PE', 'pt': 'Mirandiba - PE'},
| |
9.23844143e-01,
],
[
1.19368104e00,
1.01462773e00,
8.94331130e-01,
8.51431703e-01,
8.94331130e-01,
1.01462773e00,
1.19368104e00,
],
]
)
if do_tests is True:
np.testing.assert_allclose(rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array(
[
[
4.0129619,
3.93137849,
3.87656959,
3.85702467,
3.87656959,
3.93137849,
4.0129619,
],
[
3.88996841,
3.78671803,
3.71143853,
3.68275081,
3.71143853,
3.78671803,
3.88996841,
],
[
3.80087151,
3.67134376,
3.60166506,
3.58311968,
3.60166506,
3.67134376,
3.80087151,
],
[
3.7671309,
3.62390909,
3.57243062,
3.53580973,
3.57243062,
3.62390909,
3.7671309,
],
[
3.80091105,
3.67136809,
3.60166829,
3.58311968,
3.60166829,
3.67136809,
3.80091105,
],
[
3.89003482,
3.78675428,
3.7114494,
3.68275081,
3.7114494,
3.78675428,
3.89003482,
],
[
4.01304347,
3.9314197,
3.87658093,
3.85702467,
3.87658093,
3.9314197,
4.01304347,
],
]
)
if do_tests is True:
np.testing.assert_allclose(rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region: active
# - Mech is RS
origin.setMechanism("RS")
origin._tectonic_region = "Active Shallow Crust"
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array(
[
[
7.76090807e-01,
6.49225734e-01,
5.63995966e-01,
5.33602932e-01,
5.63995966e-01,
6.49225734e-01,
7.76090807e-01,
],
[
5.84831599e-01,
4.24273624e-01,
3.07211355e-01,
2.62600941e-01,
3.07211355e-01,
4.24273624e-01,
5.84831599e-01,
],
[
4.46282784e-01,
2.44862590e-01,
1.32264468e-01,
9.99797788e-02,
1.32264468e-01,
2.44862590e-01,
4.46282784e-01,
],
[
3.93814955e-01,
1.70987945e-01,
8.13717378e-02,
1.03958777e-05,
8.13717378e-02,
1.70987945e-01,
3.93814955e-01,
],
[
4.46344282e-01,
2.44900424e-01,
1.32270097e-01,
9.99797788e-02,
1.32270097e-01,
2.44900424e-01,
4.46344282e-01,
],
[
5.84934876e-01,
4.24329999e-01,
3.07228262e-01,
2.62600941e-01,
3.07228262e-01,
4.24329999e-01,
5.84934876e-01,
],
[
7.76217650e-01,
6.49289812e-01,
5.64013604e-01,
5.33602932e-01,
5.64013604e-01,
6.49289812e-01,
7.76217650e-01,
],
]
)
if do_tests is True:
np.testing.assert_allclose(rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array(
[
[
3.42235562,
3.338452,
3.28208435,
3.26198358,
3.28208435,
3.338452,
3.42235562,
],
[
3.29586422,
3.18967743,
3.112257,
3.08275341,
3.112257,
3.18967743,
3.29586422,
],
[
3.20423343,
3.07102195,
2.99912626,
2.97986242,
2.99912626,
3.07102195,
3.20423343,
],
[
3.16953325,
3.02223204,
2.96875925,
2.92616469,
2.96875925,
3.02223204,
3.16953325,
],
[
3.2042741,
3.07104698,
2.99912962,
2.97986242,
2.99912962,
3.07104698,
3.2042741,
],
[
3.29593253,
3.18971471,
3.11226818,
3.08275341,
3.11226818,
3.18971471,
3.29593253,
],
[
3.42243951,
3.33849438,
3.28209601,
3.26198358,
3.28209601,
3.33849438,
3.42243951,
],
]
)
if do_tests is True:
np.testing.assert_allclose(rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region: active
# - Mech is NM
origin.setMechanism("NM")
origin._tectonic_region = "Active Shallow Crust"
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array(
[
[
8.32771820e-01,
6.96170087e-01,
6.04399092e-01,
5.71673449e-01,
6.04399092e-01,
6.96170087e-01,
8.32771820e-01,
],
[
6.26833822e-01,
4.53953319e-01,
3.27906737e-01,
2.79872556e-01,
3.27906737e-01,
4.53953319e-01,
6.26833822e-01,
],
[
4.77651641e-01,
2.60772819e-01,
1.38685718e-01,
1.03235484e-01,
1.38685718e-01,
2.60772819e-01,
4.77651641e-01,
],
[
4.21157003e-01,
1.81206068e-01,
8.28029065e-02,
1.03958777e-05,
8.28029065e-02,
1.81206068e-01,
4.21157003e-01,
],
[
4.77717859e-01,
2.60813557e-01,
1.38691898e-01,
1.03235484e-01,
1.38691898e-01,
2.60813557e-01,
4.77717859e-01,
],
[
6.26945025e-01,
4.54014020e-01,
3.27924941e-01,
2.79872556e-01,
3.27924941e-01,
4.54014020e-01,
6.26945025e-01,
],
[
8.32908398e-01,
6.96239083e-01,
6.04418084e-01,
5.71673449e-01,
6.04418084e-01,
6.96239083e-01,
8.32908398e-01,
],
]
)
if do_tests is True:
np.testing.assert_allclose(rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array(
[
[
3.3192606,
3.22072248,
3.15452316,
3.13091641,
3.15452316,
3.22072248,
3.3192606,
],
[
3.17070653,
3.0459986,
2.95507447,
2.92042485,
2.95507447,
3.0459986,
3.17070653,
],
[
3.06309346,
2.90664719,
2.82107391,
2.79752673,
2.82107391,
2.90664719,
3.06309346,
],
[
3.02234086,
2.84931729,
2.78395476,
2.73772697,
2.78395476,
2.84931729,
3.02234086,
],
[
3.06314123,
2.90667658,
2.82107802,
2.79752673,
2.82107802,
2.90667658,
3.06314123,
],
[
3.17078675,
3.04604238,
2.9550876,
2.92042485,
2.9550876,
3.04604238,
3.17078675,
],
[
3.31935913,
3.22077225,
3.15453686,
3.13091641,
3.15453686,
3.22077225,
3.31935913,
],
]
)
if do_tests is True:
np.testing.assert_allclose(rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region: active
# - Mech is SS
origin.setMechanism("SS")
origin._tectonic_region = "Active Shallow Crust"
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array(
[
[
1.95958776e00,
1.66988434e00,
1.47525745e00,
1.40585328e00,
1.47525745e00,
1.66988434e00,
1.95958776e00,
],
[
1.52283677e00,
1.15619376e00,
8.88875589e-01,
7.87005240e-01,
8.88875589e-01,
1.15619376e00,
1.52283677e00,
],
[
1.20645289e00,
7.46498734e-01,
4.23057706e-01,
2.95503135e-01,
4.23057706e-01,
7.46498734e-01,
1.20645289e00,
],
[
1.08663970e00,
5.76051478e-01,
2.21984054e-01,
1.98278110e-05,
2.21984054e-01,
5.76051478e-01,
1.08663970e00,
],
[
1.20659332e00,
7.46585130e-01,
4.23079943e-01,
2.95503135e-01,
4.23079943e-01,
7.46585130e-01,
1.20659332e00,
],
[
1.52307261e00,
1.15632249e00,
8.88914196e-01,
7.87005240e-01,
8.88914196e-01,
1.15632249e00,
1.52307261e00,
],
[
1.95987741e00,
1.67003067e00,
1.47529773e00,
1.40585328e00,
1.47529773e00,
1.67003067e00,
1.95987741e00,
],
]
)
if do_tests is True:
np.testing.assert_allclose(rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array(
[
[
2.54969772,
2.27038241,
2.08273439,
2.01581889,
2.08273439,
2.27038241,
2.54969772,
],
[
2.12860763,
1.77511159,
1.51737884,
1.41916133,
1.51737884,
1.77511159,
2.12860763,
],
[
1.82356854,
1.38010729,
1.08693739,
0.97911408,
1.08693739,
1.38010729,
1.82356854,
],
[
1.70805158,
1.21626476,
0.91696757,
0.78911491,
0.91696757,
1.21626476,
1.70805158,
],
[
1.82370394,
1.38019059,
1.08695619,
0.97911408,
1.08695619,
1.38019059,
1.82370394,
],
[
2.12883501,
1.77523571,
1.51741606,
1.41916133,
1.51741606,
1.77523571,
2.12883501,
],
[
2.54997699,
2.27052349,
2.08277323,
2.01581889,
2.08277323,
2.27052349,
2.54997699,
],
]
)
if do_tests is True:
np.testing.assert_allclose(rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region: stable
# - Mech is all
origin.setMechanism("ALL")
origin._tectonic_region = "Stable Shallow Crust"
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array(
[
[
1.49285078e00,
1.26359361e00,
1.10957536e00,
1.05465228e00,
1.10957536e00,
1.26359361e00,
1.49285078e00,
],
[
1.14722732e00,
8.57083889e-01,
6.45541307e-01,
5.64926073e-01,
6.45541307e-01,
8.57083889e-01,
1.14722732e00,
],
[
8.96856520e-01,
5.32871196e-01,
2.99662245e-01,
2.17185537e-01,
2.99662245e-01,
5.32871196e-01,
8.96856520e-01,
],
[
8.02042196e-01,
3.98587924e-01,
1.69648145e-01,
1.98278110e-05,
1.69648145e-01,
3.98587924e-01,
8.02042196e-01,
],
[
8.96967653e-01,
5.32939565e-01,
2.99676623e-01,
2.17185537e-01,
2.99676623e-01,
5.32939565e-01,
8.96967653e-01,
],
[
1.14741395e00,
8.57185764e-01,
6.45571858e-01,
5.64926073e-01,
6.45571858e-01,
8.57185764e-01,
1.14741395e00,
],
[
1.49308000e00,
1.26370940e00,
1.10960724e00,
1.05465228e00,
1.10960724e00,
1.26370940e00,
1.49308000e00,
],
]
)
if do_tests is True:
np.testing.assert_allclose(rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array(
[
[
4.17967552,
4.07332411,
4.00187571,
3.97639713,
4.00187571,
4.07332411,
4.17967552,
],
[
4.01934229,
3.88474601,
3.78661232,
3.74921526,
3.78661232,
3.88474601,
4.01934229,
],
[
3.90319636,
3.73434515,
3.64558217,
3.62308648,
3.64558217,
3.73434515,
3.90319636,
],
[
3.85921241,
3.67256434,
3.61012056,
3.57133422,
3.61012056,
3.67256434,
3.85921241,
],
[
3.90324792,
3.73437686,
3.64558609,
3.62308648,
3.64558609,
3.73437686,
3.90324792,
],
[
4.01942887,
3.88479327,
3.7866265,
3.74921526,
3.7866265,
3.88479327,
4.01942887,
],
[
4.17978186,
4.07337783,
4.0018905,
3.97639713,
4.0018905,
4.07337783,
4.17978186,
],
]
)
if do_tests is True:
np.testing.assert_allclose(rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region: stable
# - Mech is RS
origin.setMechanism("RS")
origin._tectonic_region = "Stable Shallow Crust"
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array(
[
[
1.11052523e00,
9.25877479e-01,
8.01828481e-01,
7.57592465e-01,
8.01828481e-01,
9.25877479e-01,
1.11052523e00,
],
[
8.32154030e-01,
5.98467416e-01,
4.28087307e-01,
3.63158382e-01,
4.28087307e-01,
5.98467416e-01,
8.32154030e-01,
],
[
6.30500991e-01,
3.37340822e-01,
1.69925286e-01,
1.20068361e-01,
1.69925286e-01,
3.37340822e-01,
6.30500991e-01,
],
[
5.54135870e-01,
2.29725567e-01,
9.13321474e-02,
1.03958777e-05,
9.13321474e-02,
2.29725567e-01,
5.54135870e-01,
],
[
6.30590499e-01,
3.37395888e-01,
1.69933978e-01,
1.20068361e-01,
1.69933978e-01,
3.37395888e-01,
6.30590499e-01,
],
[
8.32304345e-01,
5.98549467e-01,
4.28111914e-01,
3.63158382e-01,
4.28111914e-01,
5.98549467e-01,
8.32304345e-01,
],
[
1.11070985e00,
9.25970743e-01,
8.01854154e-01,
7.57592465e-01,
8.01854154e-01,
9.25970743e-01,
1.11070985e00,
],
]
)
if do_tests is True:
np.testing.assert_allclose(rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array(
[
[
3.4885951,
3.37216961,
3.29395331,
3.26606128,
3.29395331,
3.37216961,
3.4885951,
],
[
3.3130744,
3.16572856,
3.05829921,
3.01735974,
3.05829921,
3.16572856,
3.3130744,
],
[
3.18592661,
3.00108105,
2.90341742,
2.87839095,
2.90341742,
3.00108105,
3.18592661,
],
[
3.1377763,
2.9334351,
2.86396637,
2.81798622,
2.86396637,
2.9334351,
3.1377763,
],
[
3.18598305,
3.00111577,
2.90342178,
2.87839095,
2.90342178,
3.00111577,
3.18598305,
],
[
3.31316918,
3.16578029,
3.05831472,
3.01735974,
3.05831472,
3.16578029,
3.31316918,
],
[
3.48871151,
3.37222842,
3.29396949,
3.26606128,
3.29396949,
3.37222842,
3.48871151,
],
]
)
if do_tests is True:
np.testing.assert_allclose(rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region: stable
# - Mech is NM
origin.setMechanism("NM")
origin._tectonic_region = "Stable Shallow Crust"
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array(
[
[
1.12678662e00,
9.39133949e-01,
8.13066202e-01,
7.68110298e-01,
8.13066202e-01,
9.39133949e-01,
1.12678662e00,
],
[
8.43885262e-01,
6.06395679e-01,
4.33242838e-01,
3.67257274e-01,
4.33242838e-01,
6.06395679e-01,
8.43885262e-01,
],
[
6.38950562e-01,
3.41019564e-01,
1.70913434e-01,
1.20272659e-01,
1.70913434e-01,
3.41019564e-01,
6.38950562e-01,
],
[
5.61342691e-01,
2.31653894e-01,
9.10846554e-02,
1.03958777e-05,
9.10846554e-02,
2.31653894e-01,
5.61342691e-01,
],
[
6.39041527e-01,
3.41075526e-01,
1.70922263e-01,
1.20272659e-01,
1.70922263e-01,
3.41075526e-01,
6.39041527e-01,
],
[
8.44038024e-01,
6.06479066e-01,
4.33267846e-01,
3.67257274e-01,
4.33267846e-01,
6.06479066e-01,
8.44038024e-01,
],
[
1.12697424e00,
9.39228730e-01,
8.13092292e-01,
7.68110298e-01,
8.13092292e-01,
9.39228730e-01,
1.12697424e00,
],
]
)
if do_tests is True:
np.testing.assert_allclose(rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array(
[
[
3.42781739,
3.30181908,
3.21717161,
3.18698623,
3.21717161,
3.30181908,
3.42781739,
],
[
3.23786489,
3.07840387,
2.96214139,
2.91783576,
2.96214139,
3.07840387,
3.23786489,
],
[
3.10026266,
2.9002186,
2.79362772,
2.76581535,
2.79362772,
2.9002186,
3.10026266,
],
[
3.0481533,
2.82698693,
2.74978504,
2.70136713,
2.74978504,
2.82698693,
3.0481533,
],
[
3.10032374,
2.90025617,
2.79363257,
2.76581535,
2.79363257,
2.90025617,
3.10032374,
],
[
3.23796746,
3.07845986,
2.96215818,
2.91783576,
2.96215818,
3.07845986,
3.23796746,
],
[
3.42794337,
3.30188272,
3.21718913,
3.18698623,
3.21718913,
3.30188272,
3.42794337,
],
]
)
if do_tests is True:
np.testing.assert_allclose(rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region: stable
# - Mech is SS
origin.setMechanism("SS")
origin._tectonic_region = "Stable Shallow Crust"
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array(
[
[
1.80104893e00,
1.52092305e00,
1.33273049e00,
1.26562081e00,
1.33273049e00,
1.52092305e00,
1.80104893e00,
],
[
1.37873685e00,
1.02421498e00,
7.65734302e-01,
6.67231768e-01,
7.65734302e-01,
1.02421498e00,
1.37873685e00,
],
[
1.07281256e00,
6.28064399e-01,
3.42919369e-01,
2.41987662e-01,
3.42919369e-01,
6.28064399e-01,
1.07281256e00,
],
[
9.56960370e-01,
4.63980672e-01,
1.83813296e-01,
1.98278110e-05,
1.83813296e-01,
4.63980672e-01,
9.56960370e-01,
],
[
| |
"""
Module for PAMPAC action classes.
"""
from abc import ABC, abstractmethod
from gatenlp import Annotation
from gatenlp.features import Features
class Getter(ABC):
"""
Common base class of all Getter helper classes.
"""
@abstractmethod
def __call__(self, succ, context=None, location=None):
pass
def _get_match(succ, name, resultidx=0, matchidx=0, silent_fail=False):
"""
Helper method to return the match info for the given result index and name, or None.
Args:
succ: success instance
name: name of the match info
resultidx: index of the result in success
matchidx: if there is more than one matching match info with that name, which one to return
silent_fail: if True, return None, if False, raise an exception if the match info is not present
Returns:
the match info or None
"""
if resultidx >= len(succ):
if not silent_fail:
raise Exception(f"No resultidx {resultidx}, only {len(succ)} results")
return None
res = succ[resultidx]
matches = res.matches4name(name)
if not matches:
if not silent_fail:
raise Exception(f"No match info with name {name} in result")
return None
if matchidx >= len(matches):
if not silent_fail:
raise Exception(
f"No match info with index {matchidx}, length is {len(matches)}"
)
return None
return matches[matchidx]
# pylint: disable=R0912
def _get_span(succ, name, resultidx=0, matchidx=0, silent_fail=False):
"""
Helper method to return the span for the given result index and name, or None.
Args:
succ: success instance
name: name of the match info, if None, uses the entire span of the result
resultidx: index of the result in success
matchidx: if there is more than one match info with that name, which one to return, if no name, ignored
silent_fail: if True, return None, if False, raise an exception if the match info is not present
Returns:
the span or None if no Span exists
"""
if resultidx >= len(succ):
if not silent_fail:
raise Exception(f"No resultidx {resultidx}, only {len(succ)} results")
return None
res = succ[resultidx]
if name:
matches = res.matches4name(name)
if not matches:
if not silent_fail:
raise Exception(f"No match info with name {name} in result")
return None
if matchidx >= len(matches):
if not silent_fail:
raise Exception(
f"No match info with index {matchidx}, length is {len(matches)}"
)
return None
ret = matches[matchidx].get("span")
else:
ret = res.span
if ret is None:
if silent_fail:
return None
else:
raise Exception("No span found")
return ret
class Actions:
"""
A container to run several actions for a rule.
"""
def __init__(
self,
*actions,
):
"""
Wrap several actions for use in a rule.
Args:
*actions: any number of actions to run.
"""
self.actions = list(actions)
def __call__(self, succ, context=None, location=None):
"""
Invokes the actions defined for this wrapper in sequence and
returns one of the following: for no wrapped actions, no action is invoked and None is returned;
for exactly one action the return value of that action is returned, for 2 or more actions
a list with the return values of each of those actions is returned.
Args:
succ: the success object
context: the context
location: the location
Returns: None, action return value or list of action return values
"""
if len(self.actions) == 1:
return self.actions[0](succ, context=context, location=location)
elif len(self.actions) == 0:
return None
else:
ret = []
for action in self.actions:
ret.append(action(succ, context=context, location=location))
return ret
def add(self, action, tofront=False):
"""
Add an action to the list of existing actions.
Args:
action: the action to add
tofront: if True, add as first instead of last action
"""
if tofront:
self.actions.insert(0, action)
else:
self.actions.append(action)
class AddAnn:
"""
Action for adding an annotation.
"""
def __init__(
self,
name=None,
ann=None, # create a copy of this ann retrieved with GetAnn
type=None, # or create a new annotation with this type
annset=None, # if not none, create in this set instead of the one used for matching
features=None,
span=None, # use literal span, GetSpan, if none, span from match
resultidx=0,
matchidx=0,
silent_fail=False,
): # pylint: disable=W0622
"""
Create an action for adding a new annotation to the outset.
Args:
name: the name of the match to use for getting the annotation span, if None, use the
whole span of each match
ann: either an Annotation which will be (deep) copied to create the new annotation, or
a GetAnn helper for copying the annoation the helper returns. If this is specified the
other parameters for creating a new annotation are ignored.
type: the type of a new annotation to create
annset: if not None, create the new annotation in this set instead of the one used for matching
features: the features of a new annotation to create. This can be a GetFeatures helper for copying
the features from another annotation in the results
span: the span of the annotation, this can be a GetSpan helper for copying the span from another
annotation in the results
resultidx: the index of the result to use if more than one result is in the Success. If None,
the AddAnn action is performed for all results
matchidx: the index of the match info to use if more than one item matches the given name. If None,
the AddAnn action is performed for all match info items with that name.
silent_fail: if True and the annotation can not be created for some reason, just do silently nothing,
otherwise raises an Exception.
"""
# span is either a span, the index of a match info to take the span from, or a callable that will return the
# span at firing time
assert type is not None or ann is not None
self.name = name
self.anntype = type
self.ann = ann
self.features = features
self.span = span
self.resultidx = resultidx
self.matchidx = matchidx
self.silent_fail = silent_fail
self.annset = annset
# pylint: disable=R0912
def _add4span(self, span, succ, context, location):
if span is None:
return
if self.annset is not None:
outset = self.annset
else:
outset = context.outset
if self.ann:
if isinstance(self.ann, Annotation):
outset.add_ann(self.ann.deepcopy())
else:
ann = self.ann(succ)
if ann is None:
if self.silent_fail:
return
else:
raise Exception("No matching annotation found")
outset.add_ann(ann)
else:
if self.span:
if callable(self.span):
span = self.span(succ, context=context, location=location)
else:
span = self.span
if callable(self.anntype):
anntype = self.anntype(succ, context=context, location=location)
else:
anntype = self.anntype
if self.features:
if callable(self.features):
features = self.features(succ, context=context, location=location)
else:
# NOTE: if we got a dictionary where some values are helpers, we need to run the helper
# and replace the value with the result. However, this would change the original dictionary
# just the first time if there are several matches, so we always shallow copy the features
# first!
features = self.features.copy()
for k, v in features.items():
if isinstance(v, Getter):
features[k] = v(succ, context=context, location=location)
else:
features = None
outset.add(span.start, span.end, anntype, features=features)
def _add4result(self, succ, resultidx, context, location):
if self.matchidx is None:
for matchidx in range(len(succ[resultidx].matches)):
span = _get_span(succ, self.name, resultidx, matchidx, self.silent_fail)
# print(f"DEBUG: midx=None, running for {matchidx}, span={span}")
self._add4span(span, succ, context, location)
else:
span = _get_span(succ, self.name, resultidx, self.matchidx, self.silent_fail)
# print(f"DEBUG: running for {self.matchidx}, span={span}")
self._add4span(span, succ, context, location)
def __call__(self, succ, context=None, location=None):
if self.resultidx is None:
for resultidx in range(len(succ)):
# print(f"DEBUG: ridx=None, running for {resultidx}")
self._add4result(succ, resultidx, context, location)
else:
# print(f"DEBUG: running for {self.resultidx}")
self._add4result(succ, self.resultidx, context, location)
class UpdateAnnFeatures:
"""
Action for updating the features of an annotation.
"""
def __init__(
self,
name=None,
updateann=None,
fromann=None,
features=None,
replace=False, # replace existing features rather than updating
resultidx=0,
matchidx=0,
silent_fail=False,
deepcopy=False
):
"""
Create an UpdateAnnFeatures action. The features to use for updating can either come from
an existing annotation, an annotation fetched with a GetAnn annotation getter, or from a
a features instance, a feature getter or a dictionary.
Args:
name: the name of the match to use for getting the annotation to modify (if updateann is not
specified). This must be None if updateann is specified.
updateann: if specified, update the features of this annotation. This can be either a literal
annotation or a GetAnn help to access another annotation from the result.
fromann: if specified use the | |
recursively.
This method is designed to initialize each layer instance once, even if the
same layer instance occurs in multiple places in the network. This enables
weight sharing to be implemented as layer sharing.
Args:
input_shapes: A tuple representing a shape (if this layer takes one input)
or a tuple of shapes (if this layer takes more than one input).
For example: (210, 160, 3) or ((210, 160, 3), (105, 80, 3)).
input_dtype: Numpy dtype(s) for each of the inputs.
rng: A PRNG key for random number generation.
Returns:
A (params, state) tuple, in which params contains newly created parameters
on the first call and () on all subsequent calls.
"""
try:
# Initialize params once; store them for use when this layer is called.
# Needs to call new_params_and_state regardless of _init_finished because
# state also needs to be initialized. After jitting, graph pruning should
# be able to remove unnecessary computation.
# TODO(lukaszkaiser): Revisit this decision and see whether layers sharing
# params should also share states.
params, state = self.new_params_and_state(input_shapes, input_dtype, rng)
if not self._init_finished:
self._init_finished = True
self._params = params
self._state = state
else:
params = ()
return (params, state)
except Exception:
name, trace = self.__class__.__name__, _short_traceback(skip=3)
raise LayerError(name, 'initialize_once', self._caller, input_shapes,
input_dtype, trace)
# XXX(kitaev):
_STASH_IN = None
_STASH_OUT = None
def __call__(self, x, **kwargs):
"""Makes Layer instances callable; for use in tests or interactive settings.
This convenience method helps library users play with, test, or otherwise
probe the behavior of layers outside of a full training environment. It
presents the layer as callable function from inputs to outputs, with the
option of manually specifying parameters and non-parameter state per
individual call. For convenience, parameters and non-parameter state are
cached per layer instance, starting from default values of () and (), and
acquiring non-empty values either by initialization or from values
explicitly provided via the params and state keyword arguments.
Args:
x: 0 or more input tensors, formatted the same as the inputs to
Layer.forward.
**kwargs: Additional keyword arguments if needed/desired for this layer.
Three possible keyword arguments are especially relevant:
- params=... will override any cached params values
- state=... will override any cached state values
- rng=... will supply a PRNG key for use by the layer
Returns:
0 or more output tensors, formatted the same as the outputs from
Layer.forward.
"""
params = kwargs.pop('params', self.params)
state = kwargs.pop('state', self.state)
outputs, _ = self.apply_forward(x, params=params, state=state, **kwargs)
return outputs
def apply_forward(self, x, params=(), state=(), **kwargs):
"""Applies this layer as part of a forward pass; an internal system method.
This method is reserved for handling plumbing and other internal affairs
as needed by the overall library. Trax library users should use or override
the `forward` method instead.
Args:
x: See Layer.forward inputs.
params: See Layer.forward.
state: See Layer.forward.
**kwargs: See Layer.forward.
Returns:
See Layer.forward.
"""
try:
# If params are nothing, we may be reusing this layer.
# Use the cached parameters to calculate the value.
# Note: to make sure jit tracers can decide this branch in python we
# use "params is ()" instead of, e.g., "not params" or "params == ()".
if params is (): # pylint: disable=literal-comparison
params = self._params
else:
# In this case, we're called for the first time: cache parameters.
self._params = params
if not self.has_backward or Layer._STASH_IN is not None:
outputs, s = self.forward(x, params=params, state=state, **kwargs)
else:
outputs, s = self._do_custom_gradients(x, params, state, **kwargs)
self._state = s
return outputs, s
except Exception:
name, trace = self.__class__.__name__, _short_traceback()
raise LayerError(name, 'apply_forward', self._caller,
shapes(x), None, trace)
def _do_custom_gradients(self, x, params, state, **kwargs):
"""Calls this layer for a forward pass, but with custom gradients."""
assert backend.get_name() == 'jax', (
'Custom gradients are only supported in JAX for now.')
# TODO(wangpeng): JAX doesn't support custom grads for functions with
# auxiliary output yet (https://github.com/google/jax/issues/844). Will
# remove the constraints on state below when this feature is added to
# JAX.
assert not jax.tree_util.tree_leaves(state), (
'Custom gradients require trivial start state. Got %s' % str(state))
def check_end_state(output_state):
output, state = output_state
assert not jax.tree_util.tree_leaves(state), (
'Custom gradients require trivial end state. Got %s' % str(state))
return output
# See this link for how custom transformations are defined in JAX:
# https://jax.readthedocs.io/en/latest/jax.html#jax.custom_transforms
# Note that we capture the kwargs and don't calculate gradients wrt. them.
@jax.custom_transforms
def _do_forward(y, params):
return check_end_state(self.forward(y, params=params, state=state,
**kwargs))
# This is the custom gradient (vector-jacobian product in JAX) function.
# For the exact specification of this custom transformation see this link:
# https://jax.readthedocs.io/en/latest/jax.html#jax.defjvp_all
def do_forward_vjp(y, params):
"""Custom gradient (vjp) function."""
stash = None
if Layer._STASH_IN is None:
Layer._STASH_IN = stash = {}
output = check_end_state(self.forward(y, params=params, state=state,
**kwargs))
if stash is not None:
Layer._STASH_IN = None
def vjpfun(grad):
assert Layer._STASH_OUT is None
Layer._STASH_OUT = stash
res = self.backward(y, output, grad, params, state, **kwargs)
Layer._STASH_OUT = None
return res
return output, vjpfun
jax.defvjp_all(_do_forward, do_forward_vjp)
return _do_forward(x, params), state
class LayerError(Exception):
"""Exception raised in the layer stack.
Attributes:
message: the message corresponding to this exception.
"""
def __init__(self, layer_name, function_name, caller,
input_shapes, input_types, traceback_string):
self._layer_name = layer_name
self._function_name = function_name
self._caller = caller # Python inspect object with init caller info.
self._traceback = traceback_string
self._input_shapes = input_shapes
self._input_types = input_types
super(LayerError, self).__init__(self.message)
@property
def message(self):
"""Create error message."""
prefix = 'Exception passing through layer '
prefix += '%s (in %s):\n' % (self._layer_name, self._function_name)
short_path = '[...]/' + '/'.join(self._caller.filename.split('/')[-3:])
caller = ' layer created in file %s, line %d\n' % (short_path,
self._caller.lineno)
shapes_str = ' layer input shapes: %s\n\n' % str(self._input_shapes)
if self._input_types is not None:
types_str = ' layer input types: %s\n' % str(self._input_types)
shapes_str = types_str + shapes_str
return prefix + caller + shapes_str + self._traceback
def _apply_to_first_n(f, x, n):
"""Helper: apply f to first n elements on the stack x if n > 0."""
if n < 1:
return f(x)
argument, rest = x[:n], x[n:]
if n == 1:
argument = argument[0]
result = f(argument)
if not rest:
return result
if n == 1:
result = [result]
result = list(result) + list(rest)
if isinstance(x, tuple):
result = tuple(result)
return result
def nested_reduce(x, f):
"""Fold the function f to the nested structure x (dicts, tuples, lists)."""
if isinstance(x, list):
return f([nested_reduce(y, f) for y in x])
if isinstance(x, tuple):
return f([nested_reduce(y, f) for y in x])
return x
def shapes(x):
"""Get a structure of shapes for a structure of nested arrays."""
def shape(x):
try:
return tuple([int(i) for i in x.shape])
except Exception: # pylint: disable=broad-except
return []
return nested_map(x, shape)
def sizes(x):
"""Get a structure of sizes for a structure of nested arrays."""
def size(x):
try:
return x.size
except Exception: # pylint: disable=broad-except
return 0
return nested_map(x, size)
def _find_frame(stack, start=0):
"""Find the frame with the caller on the stack."""
# We want to find the first place where the layer was called
# that is *not* an __init__ function of an inheriting layer.
frame = inspect.getframeinfo(stack[start][0])
# If we are in an init, move on.
if frame.function == '__init__':
return _find_frame(stack, start + 1)
return frame
def _shorten_file_path(line):
"""Shorten file path in error lines for more readable tracebacks."""
start = line.lower().find('file')
if start < 0:
return line
first_quote = line.find('"', start)
if first_quote < 0:
return line
second_quote = line.find('"', first_quote + 1)
if second_quote < 0:
return line
path = line[first_quote + 1:second_quote]
new_path = '/'.join(path.split('/')[-3:])
return line[:first_quote] + '[...]/' + new_path + line[second_quote + 1:]
def _short_traceback(skip=3):
"""Cleaned-up form of traceback."""
counter, res = 0, []
# Skipping 3 lines by default: the top (useless) and self-call.
lines = traceback.format_exc().splitlines()[skip:]
for l in lines:
res.append(_shorten_file_path(l))
if counter % 2 == 1:
res.append('')
counter += 1
# If we see a LayerError, the traceback has already been processed.
if l.startswith('LayerError'):
# Skip 4 back except last as these are internal base-layer calls.
| |
<reponame>AshKelly/PyAutoLens<filename>workspace/howtolens/chapter_4_inversions/scripts/tutorial_5_borders.py
from autolens.data import ccd
from autolens.data.array import mask as msk
from autolens.model.profiles import light_profiles as lp
from autolens.model.profiles import mass_profiles as mp
from autolens.model.galaxy import galaxy as g
from autolens.lens import ray_tracing
from autolens.lens import lens_fit
from autolens.lens import lens_data as ld
from autolens.model.inversion import pixelizations as pix
from autolens.model.inversion import regularization as reg
from autolens.data.plotters import ccd_plotters
from autolens.lens.plotters import ray_tracing_plotters
from autolens.model.inversion.plotters import inversion_plotters
from autolens.model.inversion.plotters import mapper_plotters
# Up to now, all our mappers have had their border input as 'None', and you may be wondering what inputting a border
# actually does. Well, it turns out borders are pretty important, and they are what we'll be covering in this tutorial.
# path = '/home/jammy/PyCharm/Projects/AutoLens/workspace'
# conf.instance = conf.Config(config_path=path+'/config/', output_path=path+"/output")
# To begin, lets simulate a simple image and use it to generate a rectangular mapper, as we're now used to doing.
def simulate():
from autolens.data.array import grids
from autolens.model.galaxy import galaxy as g
from autolens.lens import ray_tracing
psf = ccd.PSF.simulate_as_gaussian(shape=(11, 11), sigma=0.05, pixel_scale=0.05)
image_plane_grid_stack = grids.GridStack.grid_stack_for_simulation(shape=(180, 180), pixel_scale=0.05,
psf_shape=(11, 11))
lens_galaxy = g.Galaxy(mass=mp.EllipticalIsothermal(centre=(0.0, 0.0), axis_ratio=0.8, phi=135.0,
einstein_radius=1.6))
source_galaxy = g.Galaxy(light=lp.EllipticalSersic(centre=(0.1, 0.1), axis_ratio=0.8, phi=90.0, intensity=0.2,
effective_radius=0.3, sersic_index=1.0))
tracer = ray_tracing.TracerImageSourcePlanes(lens_galaxies=[lens_galaxy],
source_galaxies=[source_galaxy],
image_plane_grid_stack=image_plane_grid_stack)
return ccd.CCDData.simulate(array=tracer.image_plane_image_for_simulation, pixel_scale=0.05,
exposure_time=300.0, psf=psf, background_sky_level=0.1, add_noise=True)
# Lets have a quick look at the image.
ccd_data = simulate()
ccd_plotters.plot_ccd_subplot(ccd_data=ccd_data)
# So, what is a border? In the image-plane, a border is the set of exterior pixels in a mask that are at, well, its
# border. Lets plot the image with a circular mask, and tell our imaging plotter to plot the border as well.
mask_circular = msk.Mask.circular(shape=ccd_data.shape, pixel_scale=ccd_data.pixel_scale, radius_arcsec=2.5)
ccd_plotters.plot_ccd_subplot(ccd_data=ccd_data, mask=mask_circular, should_plot_border=True)
# As you can see, for a circular mask, the border *is* the edge of our mask (the ring of black dots we're used to seeing
# whenever we plot a mask). For an annular mask, not every pixel on the edge of the mask is necessarily a part of its
# border!
mask_annular = msk.Mask.circular_annular(shape=ccd_data.shape, pixel_scale=ccd_data.pixel_scale,
inner_radius_arcsec=0.8, outer_radius_arcsec=2.5)
ccd_plotters.plot_ccd_subplot(ccd_data=ccd_data, mask=mask_annular, should_plot_border=True)
# Indeed, a border is *only* the pixels at the exterior edge of our mask, which for the annular mask above means non of
# the pixels at the inner radius = 0.8" edge are part of the border.
# So, what does a border actually do? To show you, we'll need to fit this image with a lens model and mapper, and we'll
# do that by using the same function as the previous tutorial (to perform a quick source galaxy fit) but with the
# option to input a mask and use a border.
def perform_fit_with_source_galaxy_mask_and_border(source_galaxy, mask, use_border):
ccd_data = simulate()
lens_data = ld.LensData(ccd_data=ccd_data, mask=mask)
lens_galaxy = g.Galaxy(
mass=mp.EllipticalIsothermal(centre=(0.0, 0.0), axis_ratio=0.8, phi=135.0, einstein_radius=1.6))
if use_border:
border = lens_data.border
else:
border = None
tracer = ray_tracing.TracerImageSourcePlanes(lens_galaxies=[lens_galaxy], source_galaxies=[source_galaxy],
image_plane_grid_stack=lens_data.grid_stack, border=border)
return lens_fit.fit_lens_data_with_tracer(lens_data=lens_data, tracer=tracer)
# Okay, so lets first look at our mapper without using a border, and using our annular mask.
source_galaxy = g.Galaxy(pixelization=pix.Rectangular(shape=(40, 40)), regularization=reg.Constant(coefficients=(1.0,)))
fit = perform_fit_with_source_galaxy_mask_and_border(source_galaxy=source_galaxy, mask=mask_annular, use_border=False)
inversion_plotters.plot_reconstructed_pixelization(inversion=fit.inversion, should_plot_grid=True)
# Everything looks fine - we get a reconstructed source on a visually appeasing source-plane grid. So, why are we
# so worried about borders? Lets see what happens if we use a circular mask instead.
fit = perform_fit_with_source_galaxy_mask_and_border(source_galaxy=source_galaxy, mask=mask_circular, use_border=False)
inversion_plotters.plot_reconstructed_pixelization(inversion=fit.inversion, should_plot_grid=True)
# Woah - whats happened? There are lots of extra points on our source-plane grid, which trace to extremely large radii
# away from the central regions of the source-plane! These points are traced image-pixels (just like all the other
# points) which correspond to the central image-pixels that our annular mask masked, but that our circular mask didn't!
# Lets quickly check this using a mapper plotter
mapper_plotters.plot_image_and_mapper(ccd_data=ccd_data, mapper=fit.inversion.mapper, mask=mask_circular, should_plot_grid=True,
image_pixels=[[range(3765, 3795)], [range(4065, 4095)], [range(3865, 3895)],
[range(3965, 3995)], [range(4165, 4195)]])
# So, whats happening physically? Towards the centre of our EllipticalIsothermal mass profile (and in general most
# other mass profiles), the density profile becomes extremely cuspy (it begins to rise very sharly). This cause
# extremely large deflection angles to be computed - lets have a quick look.
ray_tracing_plotters.plot_deflections_y(tracer=fit.tracer)
ray_tracing_plotters.plot_deflections_x(tracer=fit.tracer)
# This means that our central image pixels are highly demagnified - they trace to extremely large values in the source
# plane! Physically, this isn't a problem - it just means that we don't see a 'central image' in most strong lenses,
# as the light-rays through the centre of the lens are demagnified (although if the lens galaxy had a flat, cored mass
# distribution we would see this central image, and it has been observed in a real strong lens!)
# However, it is a problem for our pixelization and mapper, which in the source-plane fits these demagnified pixels
# like all of the other pixels. This has two devasting consequences:
# 1) The rectangular grid we 'overlay' over the source-plane is much larger than for the annular mask, because it has to
# expand to also include all of the demagnified traced image-pixels. As a result, large source-pixels are used to
# reconstruct the central regions of the source-plane (where the source galaxy is actually located), meaning we
# reconstruct the source-galaxy at a lower effective resolution.
# 2) The rectangular grid reconstructs the flux of the demanigified image pixels, using source-pixels which contain
# *only* demagnified image pixels. However, these source-pixels *should* have other traced image-pixels in
# them, coming from pixels at large radii from the centre of the lens galaxy. Unfortunately, our circular mask
# masks them out, meaning they never make it to our source-plane and are omitted from the source reconstruction.
# Lets quickly use a larger circular mask to confirm that these pixels do exist, if we don't mask them.
mask_circular_large = msk.Mask.circular(shape=ccd_data.shape, pixel_scale=ccd_data.pixel_scale, radius_arcsec=4.0)
fit = perform_fit_with_source_galaxy_mask_and_border(source_galaxy=source_galaxy, mask=mask_circular, use_border=False)
inversion_plotters.plot_reconstructed_pixelization(inversion=fit.inversion, should_plot_grid=True)
# This second point is a *huge* problem, as allowing source-pixels to fit regions of our mask in this completely
# unphysical way introduces extremely dangerous systematics into our source reconstruction and lens model analysis.
# You can see this in the weird patterns these pixels make in the exterior regions of our source-reconstruction!
# Borders are the solution to this problem. All we do, is we take the mask border in the image-plane we showed above,
# trace it to the source-plane, and relocate all traced image-pixels pixels outside this source-plane border to its
# edge. Lets take a look.
fit = perform_fit_with_source_galaxy_mask_and_border(source_galaxy=source_galaxy, mask=mask_circular, use_border=True)
inversion_plotters.plot_reconstructed_pixelization(inversion=fit.inversion, should_plot_grid=True)
mapper_plotters.plot_image_and_mapper(ccd_data=ccd_data, mapper=fit.inversion.mapper, mask=mask_circular, should_plot_grid=True,
image_pixels=[[range(3765, 3795)], [range(4065, 4095)], [range(3865, 3895)],
[range(3965, 3995)], [range(4165, 4195)]])
# This successfully addresses both of the issues above! However, you might be thinking, isn't that a bit of a hack? Its
# not really a physical treatment of the ray-tracing, is it?
# Well, you're right, its certainly not the most physical way to tackle this problem. However, the *only* physical
# way to do this would be to use a mask so large that all demangified central pixels are surrounded by traced
# image-pixels. This would require masks so large our computers would crash, because they run out of memory. That's not
# a good solution, thus borders provide us with a workaround - one that I've extensively tested and have found that,
# provided your mask isn't too small, doesn't lead to systematic biases.
# Next, I'm going to quickly highlight how important borders are when modeling multiple lens galaxies. Their complex
# mass distribution and lensing configuration often produce very nasty edge effects, whereby image pixels not in the
# centre of mask, but anywhere in the mask, trace beyond the source-plane border.
def simulate_image_x2_lenses():
from autolens.data.array import grids
from autolens.model.galaxy import galaxy as g
from autolens.lens import ray_tracing
psf = ccd.PSF.simulate_as_gaussian(shape=(11, 11), sigma=0.05, pixel_scale=0.05)
image_plane_grid_stack = grids.GridStack.grid_stack_for_simulation(shape=(300, 300), pixel_scale=0.05, psf_shape=(11, 11))
lens_galaxy_0 = g.Galaxy(
mass=mp.EllipticalIsothermal(centre=(1.1, 0.51), axis_ratio=0.9, phi=110.0, einstein_radius=1.07))
lens_galaxy_1 = g.Galaxy(
mass=mp.EllipticalIsothermal(centre=(-0.20, -0.35), axis_ratio=0.56, phi=16.0, einstein_radius=0.71))
source_galaxy_0 = g.Galaxy(light=lp.EllipticalSersic(centre=(0.05, 0.05), axis_ratio=0.8, phi=90.0, intensity=0.2,
effective_radius=0.3, sersic_index=1.0))
tracer = ray_tracing.TracerImageSourcePlanes(lens_galaxies=[lens_galaxy_0, lens_galaxy_1],
source_galaxies=[source_galaxy_0],
image_plane_grid_stack=image_plane_grid_stack)
return ccd.CCDData.simulate(array=tracer.image_plane_image_for_simulation, pixel_scale=0.05,
exposure_time=300.0, psf=psf, background_sky_level=0.1, add_noise=True)
# Lets simulate our 2 lens system, define a new circular mask and plot them.
ccd_data = simulate_image_x2_lenses()
mask_circular = msk.Mask.circular(shape=ccd_data.shape, pixel_scale=ccd_data.pixel_scale, radius_arcsec=2.8)
ccd_plotters.plot_ccd_subplot(ccd_data=ccd_data, mask=mask_circular, extract_array_from_mask=True,
zoom_around_mask=True, should_plot_border=True)
# We need to redefine our perform fit function, to use the x2 lens galaxy model.
def perform_fit_x2_lenses_with_source_galaxy_mask_and_border(source_galaxy, mask, use_border):
simulate_image_x2_lenses()
lens_data = ld.LensData(ccd_data=ccd_data, mask=mask)
lens_galaxy_0 = g.Galaxy(
mass=mp.EllipticalIsothermal(centre=(1.1, 0.51), axis_ratio=0.9, phi=110.0, einstein_radius=1.07))
| |
# -*- coding: utf-8 -*-
from openerp import models,fields,api,_
import logging
import base64
import io
import time
import commands
import os
import datetime
from datetime import datetime
from openerp.exceptions import ValidationError, except_orm
from openerp.osv.osv import osv
_logger = logging.getLogger(__name__)
from tempfile import *
import openerp.addons.decimal_precision as dp
from openerp.tools.amount_to_text_en import amount_to_text
from amount_to_text_ec import amount_to_text_ec
class Account_journal_ans(models.Model):
_inherit = "account.journal"
is_anticipo = fields.Boolean('Es un Diario de Anticipo?')
is_cruce = fields.Boolean('Cruce de Anticipo')
class Res_partner_ans(models.Model):
_inherit = "res.partner"
cuenta_anticipo_p = fields.Many2one("account.account",string="Cuenta Anticipo Proveedor",ondelete="cascade",required=True)
cuenta_anticipo_c = fields.Many2one("account.account",string="Cuenta Anticipo Cliente",ondelete="cascade",required=True)
class Anticipos_proveedor_ans(models.Model):
_inherit = "account.voucher"
_order = "date"
@api.multi
def _calcular_anticipo(self):
for voucher in self:
if voucher.journal_id.is_cruce:
total = 0
for l in voucher.line_cr_ids:
total += l.amount
voucher.total_cruce = total
is_cruce = fields.Boolean(related='journal_id.is_cruce')
total_cruce = fields.Float(digits_compute=dp.get_precision('Account'),states={'draft': [('readonly', False)]},compute=_calcular_anticipo,readonly=True)
def cancel_voucher(self, cr, uid, ids, context=None):
reconcile_pool = self.pool.get('account.move.reconcile')
move_pool = self.pool.get('account.move')
move_line_pool = self.pool.get('account.move.line')
for voucher in self.browse(cr, uid, ids, context=context):
# refresh to make sure you don't unlink an already removed move
voucher.refresh()
for line in voucher.move_ids:
# refresh to make sure you don't unreconcile an already unreconciled entry
line.refresh()
if line.reconcile_id:
move_lines = [move_line.id for move_line in line.reconcile_id.line_id]
move_lines.remove(line.id)
reconcile_pool.unlink(cr, uid, [line.reconcile_id.id], context=context)
if len(move_lines) >= 2:
move_line_pool.reconcile_partial(cr, uid, move_lines, 'auto', context=context)
if voucher.move_id:
move_pool.button_cancel(cr, uid, [voucher.move_id.id], context=context)
move_pool.unlink(cr, uid, [voucher.move_id.id], context=context)
res = {
'state': 'cancel',
'move_id': False,
}
self.write(cr, uid, ids, res, context=context)
return True
def proforma_voucher(self, cr, uid, ids, context=None):
voucher = self.pool.get('account.voucher').browse(cr, uid, ids, context)
if voucher.alumno_id:
if voucher.alumno_id.parent_id.id != voucher.partner_id.id:
raise ValidationError('No coinciden Padre e Hijo seleccionado.')
if len(voucher.line_ids) == 0:
if voucher.alumno_id.parent_id.id != voucher.partner_id.id:
raise ValidationError('No puede confirmar este pago')
if voucher.journal_id.is_cruce:
if voucher.writeoff_amount != 0:
raise ValidationError("Debe cuadrar el pago, Corregir Valores")
self.action_move_line_create(cr, uid, ids, context=context)
return True
def first_move_line_get(self, cr, uid, voucher_id, move_id, company_currency, current_currency, context=None):
'''
Return a dict to be use to create the first account move line of given voucher.
:param voucher_id: Id of voucher what we are creating account_move.
:param move_id: Id of account move where this line will be added.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: mapping between fieldname and value of account move line to create
:rtype: dict
'''
voucher = self.pool.get('account.voucher').browse(cr,uid,voucher_id,context)
debit = credit = 0.0
# TODO: is there any other alternative then the voucher type ??
# ANSWER: We can have payment and receipt "In Advance".
# TODO: Make this logic available.
# -for sale, purchase we have but for the payment and receipt we do not have as based on the bank/cash journal we can not know its payment or receipt
if voucher.type in ('purchase', 'payment'):
credit = voucher.paid_amount_in_company_currency
elif voucher.type in ('sale', 'receipt'):
debit = voucher.paid_amount_in_company_currency
if debit < 0: credit = -debit; debit = 0.0
if credit < 0: debit = -credit; credit = 0.0
sign = debit - credit < 0 and -1 or 1
#set the first line of the voucher
move_line = {
'name': voucher.name or '/',
'debit': debit,
'credit': credit,
'account_id': voucher.account_id.id,
'move_id': move_id,
'journal_id': voucher.journal_id.id,
'period_id': voucher.period_id.id,
'partner_id': voucher.partner_id.id,
'currency_id': company_currency <> current_currency and current_currency or False,
'amount_currency': (sign * abs(voucher.amount) # amount < 0 for refunds
if company_currency != current_currency else 0.0),
'date': voucher.date,
'date_maturity': voucher.date_due
}
return move_line
def onchange_journal(self, cr, uid, ids, journal_id, line_ids, tax_id, partner_id, date, amount, ttype, company_id, context=None):
if context is None:
context = {}
if not journal_id:
return False
journal_pool = self.pool.get('account.journal')
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if ttype in ('sale', 'receipt'):
account_id = journal.default_debit_account_id
elif ttype in ('purchase', 'payment'):
account_id = journal.default_credit_account_id
else:
account_id = journal.default_credit_account_id or journal.default_debit_account_id
tax_id = False
if account_id and account_id.tax_ids:
tax_id = account_id.tax_ids[0].id
vals = {'value':{} }
if ttype in ('sale', 'purchase'):
vals = self.onchange_price(cr, uid, ids, line_ids, tax_id, partner_id, context)
vals['value'].update({'tax_id':tax_id,'amount': amount})
currency_id = False
if journal.currency:
currency_id = journal.currency.id
else:
currency_id = journal.company_id.currency_id.id
period_ids = self.pool['account.period'].find(cr, uid, dt=date, context=dict(context, company_id=company_id))
vals['value'].update({
'currency_id': currency_id,
'payment_rate_currency_id': currency_id,
'period_id': period_ids and period_ids[0] or False
})
#in case we want to register the payment directly from an invoice, it's confusing to allow to switch the journal
#without seeing that the amount is expressed in the journal currency, and not in the invoice currency. So to avoid
#this common mistake, we simply reset the amount to 0 if the currency is not the invoice currency.
if context.get('payment_expected_currency') and currency_id != context.get('payment_expected_currency'):
vals['value']['amount'] = 0
amount = 0
if partner_id:
res = self.onchange_partner_id(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context)
for key in res.keys():
vals[key].update(res[key])
if journal.is_anticipo:
if 'value' in vals:
vals['value']['line_dr_ids'] = []
vals['value']['line_cr_ids'] = []
return vals
def onchange_amount(self, cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date,
payment_rate_currency_id, company_id, context=None):
if context is None:
context = {}
ctx = context.copy()
ctx.update({'date': date})
# read the voucher rate with the right date in the context
currency_id = currency_id or self.pool.get('res.company').browse(cr, uid, company_id,
context=ctx).currency_id.id
voucher_rate = self.pool.get('res.currency').read(cr, uid, [currency_id], ['rate'], context=ctx)[0]['rate']
ctx.update({
'voucher_special_currency': payment_rate_currency_id,
'voucher_special_currency_rate': rate * voucher_rate})
res = self.recompute_voucher_lines(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date,
context=ctx)
vals = self.onchange_rate(cr, uid, ids, rate, amount, currency_id, payment_rate_currency_id, company_id,
context=ctx)
for key in vals.keys():
res[key].update(vals[key])
journal_pool = self.pool.get('account.journal')
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if journal.is_anticipo:
return {}
if 'value' in res:
amount = 'amount' in res['value'] and res['value']['amount'] or amount
lang = context.get('lang')
amount_in_word = ''
if (lang and len(lang) >= 2 and lang[:2] == 'es'):
amount_in_word = amount_to_text_ec().amount_to_text_cheque(amount, '', '')
else:
amount_in_word = amount_to_text(amount)
res['value'].update({'amount_in_word': amount_in_word})
if journal_id:
#INTEGRACION: SE COMENTA POR QUEMIUESTRA UN ERROR QUE NO EXISTE LA VARIABLE SE DEBE INSTALAR EL MODULO ESCRITURA
allow_check_writing = self.pool.get('account.journal').browse(cr, uid, journal_id,context=context).allow_check_writing
res['value'].update({'allow_check': allow_check_writing})
return res
def onchange_partner_id(self, cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=None):
if not journal_id:
return {}
if context is None:
context = {}
#TODO: comment me and use me directly in the sales/purchases views
res = self.basic_onchange_partner(cr, uid, ids, partner_id, journal_id, ttype, context=context)
if ttype in ['sale', 'purchase']:
return res
ctx = context.copy()
# not passing the payment_rate currency and the payment_rate in the context but it's ok because they are reset in recompute_payment_rate
ctx.update({'date': date})
vals = self.recompute_voucher_lines(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=ctx)
vals2 = self.recompute_payment_rate(cr, uid, ids, vals, currency_id, date, ttype, journal_id, amount, context=context)
for key in vals.keys():
res[key].update(vals[key])
for key in vals2.keys():
res[key].update(vals2[key])
#TODO: can probably be removed now
#TODO: onchange_partner_id() should not returns [pre_line, line_dr_ids, payment_rate...] for type sale, and not
# [pre_line, line_cr_ids, payment_rate...] for type purchase.
# We should definitively split account.voucher object in two and make distinct on_change functions. In the
# meanwhile, bellow lines must be there because the fields aren't present in the view, what crashes if the
# onchange returns a value for them
if ttype == 'sale':
del(res['value']['line_dr_ids'])
del(res['value']['pre_line'])
del(res['value']['payment_rate'])
elif ttype == 'purchase':
del(res['value']['line_cr_ids'])
del(res['value']['pre_line'])
del(res['value']['payment_rate'])
journal_pool = self.pool.get('account.journal')
journal = journal_pool.browse(cr, uid, journal_id, context=context)
if journal.is_anticipo:
del (res['value']['line_cr_ids'])
del (res['value']['line_dr_ids'])
del (res['value']['pre_line'])
del (res['value']['payment_rate'])
return res
def recompute_voucher_lines(self, cr, uid, ids, partner_id, journal_id, price, currency_id, ttype, date, context=None):
"""
Returns a dict that contains new values and context
@param partner_id: latest value from user input for field partner_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
def _remove_noise_in_o2m():
"""if the line is partially reconciled, then we must pay attention to display it only once and
in the good o2m.
This function returns True if the line is considered as noise and should not be displayed
"""
if line.reconcile_partial_id:
if currency_id == line.currency_id.id:
if line.amount_residual_currency <= 0:
return True
else:
if line.amount_residual <= 0:
return True
return False
if context is None:
context = {}
context_multi_currency = context.copy()
currency_pool = self.pool.get('res.currency')
move_line_pool = self.pool.get('account.move.line')
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
line_pool = self.pool.get('account.voucher.line')
#set default values
default = {
'value': {'line_dr_ids': [], 'line_cr_ids': [], 'pre_line': | |
<filename>tests/retirement_constants.py
FONDO_PARA_RETIRO_JSON_0 = {
"reg_wdr": 2000,
"num_of_years": 2,
"freq": 12,
"rate": 5,
"wdr_when": 0,
}
FONDO_PARA_RETIRO_JSON_1 = {
"reg_wdr": 2000,
"num_of_years": 2,
"freq": 12,
"rate": 5,
"wdr_when": 1,
}
# noinspection DuplicatedCode
FONDO_PARA_RETIRO_RESULT_0 = {
"a_interests": [
189.95,
372.36,
547.19,
714.42,
874.01,
1025.93,
1170.15,
1306.64,
1435.37,
1556.3,
1669.4,
1774.64,
1871.98,
1961.4,
2042.86,
2116.32,
2181.75,
2239.12,
2288.4,
2329.55,
2362.54,
2387.33,
2403.89,
2412.19,
],
"a_withdrawals": [
2000.0,
4000.0,
6000.0,
8000.0,
10000.0,
12000.0,
14000.0,
16000.0,
18000.0,
20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0,
44000.0,
46000.0,
47999.99,
],
"balances": [
43777.75,
41960.16,
40134.990000000005,
38302.22,
36461.810000000005,
34613.73,
32757.950000000004,
30894.440000000002,
29023.170000000002,
27144.100000000002,
25257.200000000004,
23362.440000000002,
21459.780000000002,
19549.200000000004,
17630.660000000003,
15704.120000000003,
13769.550000000003,
11826.920000000002,
9876.200000000003,
7917.350000000003,
5950.340000000003,
3975.130000000003,
1991.6900000000028,
0,
],
"periods": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
],
"ret_fund": 45587.8,
"table": [
{"b": "43,777.75", "i": "189.95", "p": "1", "w": "2,000.00"},
{"b": "41,960.16", "i": "182.41", "p": "2", "w": "2,000.00"},
{"b": "40,134.99", "i": "174.83", "p": "3", "w": "2,000.00"},
{"b": "38,302.22", "i": "167.23", "p": "4", "w": "2,000.00"},
{"b": "36,461.81", "i": "159.59", "p": "5", "w": "2,000.00"},
{"b": "34,613.73", "i": "151.92", "p": "6", "w": "2,000.00"},
{"b": "32,757.95", "i": "144.22", "p": "7", "w": "2,000.00"},
{"b": "30,894.44", "i": "136.49", "p": "8", "w": "2,000.00"},
{"b": "29,023.17", "i": "128.73", "p": "9", "w": "2,000.00"},
{"b": "27,144.10", "i": "120.93", "p": "10", "w": "2,000.00"},
{"b": "25,257.20", "i": "113.10", "p": "11", "w": "2,000.00"},
{"b": "23,362.44", "i": "105.24", "p": "12", "w": "2,000.00"},
{"b": "21,459.78", "i": "97.34", "p": "13", "w": "2,000.00"},
{"b": "19,549.20", "i": "89.42", "p": "14", "w": "2,000.00"},
{"b": "17,630.66", "i": "81.46", "p": "15", "w": "2,000.00"},
{"b": "15,704.12", "i": "73.46", "p": "16", "w": "2,000.00"},
{"b": "13,769.55", "i": "65.43", "p": "17", "w": "2,000.00"},
{"b": "11,826.92", "i": "57.37", "p": "18", "w": "2,000.00"},
{"b": "9,876.20", "i": "49.28", "p": "19", "w": "2,000.00"},
{"b": "7,917.35", "i": "41.15", "p": "20", "w": "2,000.00"},
{"b": "5,950.34", "i": "32.99", "p": "21", "w": "2,000.00"},
{"b": "3,975.13", "i": "24.79", "p": "22", "w": "2,000.00"},
{"b": "1,991.69", "i": "16.56", "p": "23", "w": "2,000.00"},
{"b": "0.00", "i": "8.30", "p": "24", "w": "1,999.99"},
],
"table_a": [
{"b": "23,362.44", "i": "1,774.64", "p": "1", "w": "24,000.00"},
{"b": "0.00", "i": "637.55", "p": "2", "w": "23,999.99"},
],
"table_m": [
{"b": "43,777.75", "i": "189.95", "p": "1", "w": "2,000.00"},
{"b": "41,960.16", "i": "182.41", "p": "2", "w": "2,000.00"},
{"b": "40,134.99", "i": "174.83", "p": "3", "w": "2,000.00"},
{"b": "38,302.22", "i": "167.23", "p": "4", "w": "2,000.00"},
{"b": "36,461.81", "i": "159.59", "p": "5", "w": "2,000.00"},
{"b": "34,613.73", "i": "151.92", "p": "6", "w": "2,000.00"},
{"b": "32,757.95", "i": "144.22", "p": "7", "w": "2,000.00"},
{"b": "30,894.44", "i": "136.49", "p": "8", "w": "2,000.00"},
{"b": "29,023.17", "i": "128.73", "p": "9", "w": "2,000.00"},
{"b": "27,144.10", "i": "120.93", "p": "10", "w": "2,000.00"},
{"b": "25,257.20", "i": "113.10", "p": "11", "w": "2,000.00"},
{"b": "23,362.44", "i": "105.24", "p": "12", "w": "2,000.00"},
{"b": "21,459.78", "i": "97.34", "p": "13", "w": "2,000.00"},
{"b": "19,549.20", "i": "89.42", "p": "14", "w": "2,000.00"},
{"b": "17,630.66", "i": "81.46", "p": "15", "w": "2,000.00"},
{"b": "15,704.12", "i": "73.46", "p": "16", "w": "2,000.00"},
{"b": "13,769.55", "i": "65.43", "p": "17", "w": "2,000.00"},
{"b": "11,826.92", "i": "57.37", "p": "18", "w": "2,000.00"},
{"b": "9,876.20", "i": "49.28", "p": "19", "w": "2,000.00"},
{"b": "7,917.35", "i": "41.15", "p": "20", "w": "2,000.00"},
{"b": "5,950.34", "i": "32.99", "p": "21", "w": "2,000.00"},
{"b": "3,975.13", "i": "24.79", "p": "22", "w": "2,000.00"},
{"b": "1,991.69", "i": "16.56", "p": "23", "w": "2,000.00"},
{"b": "0.00", "i": "8.30", "p": "24", "w": "1,999.99"},
],
"time_scale": "Mes",
"total_int": 2412.19,
"total_wdr": 47999.990000000005,
}
# noinspection DuplicatedCode
FONDO_PARA_RETIRO_RESULT_1 = {
"a_interests": [
182.41,
357.24,
524.47,
684.06,
835.98,
980.2,
1116.69,
1245.42,
1366.35,
1479.45,
1584.69,
1682.03,
1771.45,
1852.91,
1926.37,
1991.8,
2049.17,
2098.45,
2139.6,
2172.59,
2197.38,
2213.94,
2222.24,
2222.24,
],
"a_withdrawals": [
2000.0,
4000.0,
6000.0,
8000.0,
10000.0,
12000.0,
14000.0,
16000.0,
18000.0,
20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0,
44000.0,
46000.0,
47999.99,
],
"balances": [
43960.16,
42134.99,
40302.22,
38461.81,
36613.73,
34757.95,
32894.44,
31023.17,
29144.1,
27257.2,
25362.44,
23459.78,
21549.2,
19630.66,
17704.12,
15769.55,
13826.92,
11876.2,
9917.35,
7950.34,
5975.13,
3991.69,
1999.9900000000002,
2.2737367544323206e-12,
],
"periods": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
],
"ret_fund": 45777.75,
"table": [
{"b": "43,960.16", "i": "182.41", "p": "1", "w": "2,000.00"},
{"b": "42,134.99", "i": "174.83", "p": "2", "w": "2,000.00"},
{"b": "40,302.22", "i": "167.23", "p": "3", "w": "2,000.00"},
{"b": "38,461.81", "i": "159.59", "p": "4", "w": "2,000.00"},
{"b": "36,613.73", "i": "151.92", "p": "5", "w": "2,000.00"},
{"b": "34,757.95", "i": "144.22", "p": "6", "w": "2,000.00"},
{"b": "32,894.44", "i": "136.49", "p": "7", "w": "2,000.00"},
{"b": "31,023.17", "i": "128.73", "p": "8", "w": "2,000.00"},
{"b": "29,144.10", "i": "120.93", "p": "9", "w": "2,000.00"},
{"b": "27,257.20", "i": "113.10", "p": "10", "w": "2,000.00"},
{"b": "25,362.44", "i": "105.24", "p": "11", "w": "2,000.00"},
{"b": "23,459.78", "i": "97.34", "p": "12", "w": "2,000.00"},
{"b": "21,549.20", "i": "89.42", "p": "13", "w": "2,000.00"},
{"b": "19,630.66", "i": "81.46", "p": "14", "w": "2,000.00"},
{"b": "17,704.12", "i": "73.46", "p": "15", "w": "2,000.00"},
{"b": "15,769.55", "i": "65.43", "p": "16", "w": "2,000.00"},
{"b": "13,826.92", "i": "57.37", "p": "17", "w": "2,000.00"},
{"b": "11,876.20", "i": "49.28", "p": "18", "w": "2,000.00"},
{"b": "9,917.35", "i": "41.15", "p": "19", "w": "2,000.00"},
{"b": "7,950.34", "i": "32.99", "p": "20", "w": "2,000.00"},
{"b": "5,975.13", "i": "24.79", "p": "21", "w": "2,000.00"},
{"b": "3,991.69", "i": "16.56", "p": "22", "w": "2,000.00"},
{"b": "1,999.99", "i": "8.30", "p": "23", "w": "2,000.00"},
{"b": "0.00", "i": "0.00", "p": "24", "w": "1,999.99"},
],
"table_a": [
{"b": "23,459.78", "i": "1,682.03", "p": "1", "w": "24,000.00"},
{"b": "0.00", "i": "540.21", "p": "2", "w": "23,999.99"},
],
"table_m": [
{"b": "43,960.16", "i": "182.41", "p": "1", "w": "2,000.00"},
{"b": "42,134.99", "i": "174.83", "p": "2", "w": "2,000.00"},
{"b": "40,302.22", "i": "167.23", "p": "3", "w": "2,000.00"},
{"b": "38,461.81", "i": "159.59", "p": "4", "w": "2,000.00"},
{"b": "36,613.73", "i": "151.92", "p": "5", "w": "2,000.00"},
{"b": "34,757.95", "i": "144.22", "p": "6", "w": "2,000.00"},
{"b": "32,894.44", "i": "136.49", "p": "7", "w": "2,000.00"},
{"b": "31,023.17", "i": "128.73", "p": "8", "w": "2,000.00"},
{"b": "29,144.10", "i": "120.93", "p": "9", "w": "2,000.00"},
{"b": "27,257.20", "i": "113.10", "p": "10", "w": "2,000.00"},
{"b": "25,362.44", "i": "105.24", "p": "11", "w": "2,000.00"},
{"b": "23,459.78", "i": "97.34", "p": "12", "w": "2,000.00"},
{"b": "21,549.20", "i": "89.42", "p": "13", "w": "2,000.00"},
{"b": "19,630.66", "i": "81.46", "p": "14", "w": "2,000.00"},
{"b": "17,704.12", "i": "73.46", "p": "15", "w": "2,000.00"},
{"b": "15,769.55", "i": "65.43", "p": "16", "w": "2,000.00"},
{"b": "13,826.92", "i": "57.37", "p": "17", "w": "2,000.00"},
{"b": "11,876.20", "i": "49.28", "p": "18", "w": "2,000.00"},
{"b": "9,917.35", "i": "41.15", "p": "19", "w": "2,000.00"},
{"b": "7,950.34", "i": "32.99", "p": "20", "w": "2,000.00"},
{"b": "5,975.13", "i": "24.79", "p": "21", "w": "2,000.00"},
{"b": "3,991.69", "i": "16.56", "p": "22", "w": "2,000.00"},
{"b": "1,999.99", "i": "8.30", "p": "23", "w": "2,000.00"},
{"b": "0.00", "i": "0.00", "p": "24", "w": "1,999.99"},
],
"time_scale": "Mes",
"total_int": 2222.2400000000002,
"total_wdr": 47999.99,
}
RETIRO_PARA_FONDO_JSON_0 = {
"ret_fund": 100000,
"num_of_years": 2,
"freq": 12,
"rate": 4,
"wdr_when": 0,
}
RETIRO_PARA_FONDO_JSON_1 = {
"ret_fund": 100000,
"num_of_years": 2,
"freq": 12,
"rate": 4,
"wdr_when": 1,
}
# noinspection DuplicatedCode
RETIRO_PARA_FONDO_RESULT_0 = {
"a_interests": [
333.33,
653.3,
959.86,
1252.97,
1532.58,
1798.65,
2051.13,
2289.98,
2515.15,
2726.59,
2924.26,
3108.12,
3278.11,
3434.2,
3576.33,
3704.46,
3818.54,
3918.53,
4004.38,
4076.04,
4133.46,
4176.6,
4205.41,
4219.84,
],
"a_withdrawals": [
4342.49,
8684.98,
13027.47,
17369.96,
21712.45,
26054.94,
30397.43,
34739.92,
39082.41,
43424.9,
47767.39,
52109.88,
56452.37,
60794.86,
65137.35,
69479.84,
73822.33,
78164.82,
82507.31,
86849.8,
91192.29,
95534.78,
99877.27,
104219.76,
],
"balances": [
95990.84,
91968.32,
87932.39,
83883.01000000001,
79820.13,
75743.70999999999,
71653.70000000001,
67550.06000000001,
63432.74000000001,
59301.69000000001,
55156.87000000002,
50998.24000000002,
46825.74000000002,
42639.34000000002,
38438.980000000025,
34224.62000000002,
29996.210000000014,
25753.710000000006,
21497.070000000003,
17226.239999999998,
12941.169999999991,
8641.819999999985,
4328.139999999981,
0.07999999997628038,
],
"periods": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
],
"reg_wdr": 4342.49,
"ret_fund": 100000.0,
"table": [
{"b": "95,990.84", "i": "333.33", "p": "1", "w": "4,342.49"},
{"b": "91,968.32", "i": "319.97", "p": "2", "w": "4,342.49"},
{"b": "87,932.39", "i": "306.56", "p": "3", "w": "4,342.49"},
{"b": "83,883.01", "i": "293.11", "p": "4", "w": "4,342.49"},
{"b": "79,820.13", "i": "279.61", "p": "5", "w": "4,342.49"},
{"b": "75,743.71", "i": "266.07", "p": "6", "w": "4,342.49"},
{"b": "71,653.70", "i": "252.48", "p": "7", "w": | |
#!/usr/bin/python
##------------------------------------------------------------------------------------------------------------------
## Module: demoPPP.py
## Release Information:
## V1.0.0 (<NAME>, 06/29/2012) : Initial release
## V1.1.0 (<NAME>, 11/12/2012) : Added connection testing
## V1.1.1 (<NAME>, 11/13/2012) : Bug: While attempting to determine
## if the script was already
## running, the script detected
## itself and exited.
##
## Fix: Re-coded to check for multiple
## pid's.
## V1.2.0 (<NAME>, 11/14/2012) : Added Start | Stop command line options
## V1.3.0 (<NAME>, 11/21/2012) : Bug: Serial port opened before
## checks for script already
## running, and pppd running
## checks. Caused serial port
## to lock on a blocking read.
##
## Fix: Re-coded to check for script
## already running, and pppd
## running before opening serial
## port. Future release will
## handle lock files.
## V1.4.0 (<NAME>, 01/11/2013) : Remove locks on ttyUSB0 and ttyACM0
## Code cleanup
## V1.4.1 (<NAME>, 01/17/2013) : Added delay before powering on Plug-in Terminus, allows
## PWRMON to drop after UART set to high-Z and VBUS disabled
## V2.1.0 (<NAME>, 01/29/2013) : Demo Release V2.1
##------------------------------------------------------------------------------------------------------------------
##------------------------------------------------------------------------------------------------------------------
## Description:
##
## This script starts or stops a PPP connection via the Cellular network via the Janus
## Plug-in Terminus embedded in the 400AP. This is one way to handle a PPP connection
## with the 400AP and was created as an example. It is recommend to execute this at
## start-up via an S script in the /etc/init.d directory. Your application can also
## execute this script when needed.
##------------------------------------------------------------------------------------------------------------------
##------------------------------------------------------------------------------------------------------------------
## Notes:
## 1.) Works with the following Plug-in Terminus models
## GSM865CF
## CDMA864CF
## HSPA910CF
##------------------------------------------------------------------------------------------------------------------
#
#Copyright 2013, Janus Remote Communications
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions
#are met:
#
#Redistributions of source code must retain the above copyright notice,
#this list of conditions and the following disclaimer.
#
#Redistributions in binary form must reproduce the above copyright
#notice, this list of conditions and the following disclaimer in
#the documentation and/or other materials provided with the distribution.
#
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS``AS
#IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
#TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
#PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
#PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
#NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def get_pids(process_name):
## TWH, 11/13/2012: Recoded to return a list of pids, return list without whitespace characters
return subprocess.check_output('pidof ' + str(process_name),shell=True).rstrip().split(' ')
def ppp_cleanup():
try:
try:
#kill all running pppd processes
while(1):
pids = get_pids('pppd')
os.kill(int(pids[0]),9)
#print 'Process killed {' + str(pid[0]) + '}'
except subprocess.CalledProcessError, e:
#print 'No PPP process running: ' + str(e)
pass
except:
#print syslog.LOG_ERR, traceback.format_exc()
syslog.syslog(syslog.LOG_ERR, traceback.format_exc())
return -1
#Delete ppp0.pid if exists
subprocess.check_output('rm -f /var/run/ppp0.pid',shell=True)
#Remove Lock if exists
subprocess.check_output('rm -f /var/lock/LCK..ttyS1',shell=True)
subprocess.check_output('rm -f /var/lock/LCK..ttyACM0',shell=True)
subprocess.check_output('rm -f /var/lock/LCK..ttyUSB0',shell=True)
except:
#print syslog.LOG_ERR, traceback.format_exc()
syslog.syslog(syslog.LOG_ERR, traceback.format_exc())
return -1
return 0
def main(appArgv=''):
returnVal = -1
# return status:
# -1: Un-handled Exception occurred
# 0: Method exited without error
# 1: Error occurred
try:
if appArgv == 'start':
#Bring up ppp0 network interface
rtnList = myGPIO.initIO ()
if rtnList[0] == -1: raise UserWarning
#Inhibit VBUS voltage
rtnList = myGPIO.setEnableVbus(0)
if rtnList[0] == -1: raise UserWarning
#High-Z all UART pins driving into Plug-in Terminus
rtnList = myGPIO.setUartEnable(1)
if rtnList[0] == -1: raise UserWarning
#Give the USB host some time to disconnect radio
time.sleep(4)
rtnList = myGPIO.setPoweredState(1,15)
if rtnList[0] == -1 or rtnList[0] == -2: raise UserWarning
#Enable UART I/O
rtnList = myGPIO.setUartEnable(0)
if rtnList[0] == -1: raise UserWarning
#SIM status control - to avoid the 'SIM busy' error
#The following sequence loops forever until SIM card is ready for use
#
# Note: For GSM devices, will loop until a SIM card is installed
# or timeout occurs
#
#Wait for module to turn on
start = time.time()
timeout = 60
print 'SIM Verification Cycle'
rtnList = ATC.sendAtCmd('AT+CPBS?',ATC.properties.CMD_TERMINATOR,1)
if (rtnList[0] == -1): raise UserWarning
if rtnList[1].find("+CPBS")<0:
print 'SIM busy! ....waiting!\n'
while rtnList[1].find("+CPBS:")< 0 :
rtnList = ATC.sendAtCmd('AT+CPBS?',ATC.properties.CMD_TERMINATOR,1)
if (rtnList[0] == -1): raise UserWarning
if (time.time() - start > timeout):
print 'Check if SIM card is installed'
syslog.syslog('Check if SIM card is installed')
raise UserWarning
time.sleep(1)
print 'SIM ready'
#Check if cellular device model identification code matches configuration file
rtnList = ATC.sendAtCmd('AT+CGMM',ATC.properties.CMD_TERMINATOR,5)
if (rtnList[0] == -1) or (rtnList[0] == -2) or (rtnList[0] == 0) or rtnList[1] == "ERROR": raise UserWarning
if rtnList[0] == 1:
if (myApp.CGMM != rtnList[1]):
syslog.syslog('demo400ap.conf file defines CGMM=' + myApp.CGMM + ', but 400AP has ' + rtnList[1] + ' installed.')
syslog.syslog('Please correct the demo400ap.conf file and re-run script!')
print 'demo400ap.conf file defines CGMM=' + myApp.CGMM + ', but 400AP has ' + rtnList[1] + ' installed.'
print 'Please correct the demo400ap.conf file and re-run script!'
raise UserWarning
# Configuration specific AT Command setup
if (myApp.CGMM == 'GE865') or (myApp.CGMM == 'HE910'):
#GSM 2.5G - GSM865CF
#or
#HSPA 3G - HSPA910CF
if (myApp.CGMM == 'HE910'):
rtnList = myGPIO.setEnableVbus (1)
if rtnList[0] == -1: raise UserWarning
#*****Caution: Magic Number need a better solution *****
time.sleep(2)
#load ACM driver
res = subprocess.call('modprobe cdc-acm', shell=True)
#Scan /sys and populate /dev
res = subprocess.call('mdev -s', shell=True)
#Disable Authentication
rtnList = ATC.sendAtCmd('AT#GAUTH=0',ATC.properties.CMD_TERMINATOR,5)
if (rtnList[0] == -1) or (rtnList[0] == -2) or (rtnList[0] == 0) or rtnList[1] == "ERROR": raise UserWarning
#Set Network specific settings
rtnList = NETWORK.initNetwork(myApp.CGMM,myApp.ENS)
if (rtnList[0] == -1) or (rtnList[0] == -2) or rtnList[1] == "ERROR": raise UserWarning
#Initialize SOCKET communications
rtnList = SOCKET_400AP.init(myApp.CGMM,'1',myApp.APN)
if (rtnList[0] == -1) or (rtnList[0] == -2) or rtnList[1] == "ERROR": raise UserWarning
#Wait for Network registration
rtnList = NETWORK.isRegistered(120)
if (rtnList[0] == -1) or (rtnList[0] == -2) or rtnList[1] == "ERROR": raise UserWarning
#Wait for Network registration
rtnList = NETWORK.isDataAttached(myApp.CGMM,120)
if (rtnList[0] == -1) or (rtnList[0] == -2) or rtnList[1] == "ERROR": raise UserWarning
if (myApp.CGMM == 'HE910'):
#Start ppp and create network interface ppp0
res = subprocess.call('pppd -d -detach /dev/ttyACM0 115200 file /etc/ppp/peers/modem_hspa910cf &', shell=True)
else:
#Start ppp and create network interface ppp0
res = subprocess.call('pppd -d -detach /dev/ttyS1 115200 file /etc/ppp/peers/modem_gsm865cf &', shell=True)
elif (myApp.CGMM == 'CC864-DUAL'):
#CDMA 2.5G
#CDMA864CF
rtnList = myGPIO.setEnableVbus (1)
if rtnList[0] == -1: raise UserWarning
#*****Caution: Magic Number need a better solution *****
time.sleep(2)
#load option driver
res = subprocess.call('modprobe option', shell=True)
#Scan /sys and populate /dev
res = subprocess.call('mdev -s', shell=True)
#Check if CDMA is provisioned for use on the network
#Set Network specific settings
rtnList = NETWORK.initNetwork(myApp.CGMM,myApp.ENS)
if (rtnList[0] == -1) or (rtnList[0] == -2) or rtnList[1] == "ERROR": raise UserWarning
#Initialize SOCKET communications
rtnList = SOCKET_400AP.init(myApp.CGMM,'1',myApp.APN)
if (rtnList[0] == -1) or (rtnList[0] == -2) or rtnList[1] == "ERROR": raise UserWarning
#Wait for Network registration
rtnList = NETWORK.isRegistered(120)
if (rtnList[0] == -1) or (rtnList[0] == -2) or rtnList[1] == "ERROR": raise UserWarning
#Wait for Network registration
rtnList = NETWORK.isDataAttached(myApp.CGMM,120)
if (rtnList[0] == -1) or (rtnList[0] == -2) or rtnList[1] == "ERROR": raise UserWarning
#Start ppp and create network interface ppp0
res = subprocess.call('pppd -d -detach file /etc/ppp/peers/modem_cdma864cf &', shell=True)
returnVal = 0
else:
#Inhibit VBUS voltage
rtnList = myGPIO.setEnableVbus(0)
if rtnList[0] == -1: raise UserWarning
#Give the USB host some time to disconnect radio
time.sleep(4)
#High-Z all UART pins driving into Plug-in Terminus
rtnList = myGPIO.setUartEnable(1)
if rtnList[0] == -1: raise UserWarning
#Turn off Plug-in Terminus
rtnList = myGPIO.setPoweredState(0,15)
if rtnList[0] == -1: raise UserWarning
#Test if radio turned off
if rtnList[0] == -2:
#Power state change timed out, Force power-off, Initialize Radio I/0
print 'Radio failed to turn off, Forcing power-down'
#Disable Plug-in Terminus on-board regulator
rtnList = myGPIO.setEnableSupply (1)
if rtnList[0] == -1: raise UserWarning
#RESET | |
"""Generate training input/output pairs."""
raise NotImplementedError('Subclasses must override this method.')
def fidelity_test(self, *args):
"""Test the fidelity function using a different method."""
raise NotImplementedError('Subclasses must override fidelity_test().')
def fidelity(self, *args):
"""Compute the cost function of the model."""
raise NotImplementedError('Subclasses must override fidelity().')
class QubitNetworkGateModel(QubitNetworkModel):
"""Model to be used for training network to reproduce a gate.
This is the class to be used to train the network to reproduce a
target gate on a subset of the qubits (the "system" qubits).
"""
# pylint: disable=W0221
def __init__(self, num_qubits=None, num_system_qubits=None,
interactions=None,
net_topology=None,
sympy_expr=None,
free_parameters_order=None,
ancillae_state=None,
initial_values=None,
target_gate=None):
super().__init__(
num_qubits=num_qubits,
interactions=interactions,
net_topology=net_topology,
sympy_expr=sympy_expr,
free_parameters_order=free_parameters_order,
initial_values=initial_values)
# parameters initialization
self.ancillae_state = None # initial values for ancillae (if any)
self.num_system_qubits = None # number of input/output qubits
self.target_gate = target_gate
self.outputs_size = None # size of complex output ket states
# If num_system_qubits has not been given, then either there are no
# ancillae, or there are ancillae whose number is implicitly given
# through the `ancillae_state` parameter
if num_system_qubits is None:
if ancillae_state is None:
self.num_system_qubits = self.num_qubits
else:
num_ancillae = int(np.log2(ancillae_state.shape[0]))
self.num_system_qubits = self.num_qubits - num_ancillae
else:
self.num_system_qubits = num_system_qubits
# Initialise the ancillae, if any
if self.num_system_qubits < self.num_qubits:
self._initialize_ancillae(ancillae_state)
# set size of complex output ket states
self.outputs_size = 2**(self.num_qubits - self.num_system_qubits)
def __repr__(self):
message = 'QubitNetworkModel object:'
message += '\n Number of system qubits: {}'.format(
self.num_system_qubits)
message += '\n Number of ancillary qubits: {}'.format(
self.num_qubits - self.num_system_qubits)
return message
def _initialize_ancillae(self, ancillae_state):
"""Initialize ancillae states, as a qutip.Qobj object.
The generated state has every ancillary qubit in the 0 state,
unless otherwise specified.
"""
# the number of system qubits should have already been extracted and
# stored in `num_system_qubits`
num_ancillae = self.num_qubits - self.num_system_qubits
if ancillae_state is not None:
self.ancillae_state = ancillae_state
else:
state = qutip.tensor([qutip.basis(2, 0)
for _ in range(num_ancillae)])
self.ancillae_state = state
def _target_outputs_from_inputs_open_map(self, input_states):
raise NotImplementedError('Not implemented yet')
# Note that in case of an open map target, all target states are
# density matrices, instead of just kets like they would when the
# target is a unitary gate.
target_states = []
for psi in input_states:
# the open evolution is implemented vectorizing density
# matrices and maps: `A * rho * B` becomes
# `unvec(vec(tensor(A, B.T)) * vec(rho))`.
vec_dm_ket = qutip.operator_to_vector(qutip.ket2dm(psi))
evolved_ket = self.target_gate * vec_dm_ket
evolved_ket = qutip.vector_to_operator(evolved_ket)
target_states.append(evolved_ket)
return target_states
def _target_outputs_from_inputs(self, input_states):
# defer operation to other method for open maps
if self.target_gate.issuper:
return self._target_outputs_from_inputs_open_map(input_states)
# unitary evolution of input states. `target_gate` is qutip obj
return [self.target_gate * psi for psi in input_states]
def fidelity_test(self, n_samples=10, return_mean=True):
"""Compute fidelity with current interaction values with qutip.
This can be used to compute the fidelity avoiding the
compilation of the theano graph done by `self.fidelity`.
Raises
------
TargetGateNotGivenError if not target gate has been specified.
"""
if self.target_gate is None:
raise TargetGateNotGivenError('You must give a target gate'
' first.')
target_gate = self.target_gate
gate = qutip.Qobj(self.get_current_gate(),
dims=[[2] * self.num_qubits] * 2)
# each element of `fidelities` will contain the fidelity obtained with
# a single randomly generated input state
fidelities = np.zeros(n_samples)
for idx in range(fidelities.shape[0]):
# generate random input state (over system qubits only)
psi_in = qutip.rand_ket_haar(2 ** self.num_system_qubits)
psi_in.dims = [
[2] * self.num_system_qubits, [1] * self.num_system_qubits]
# embed it into the bigger system+ancilla space (if necessary)
if self.num_system_qubits < self.num_qubits:
Psi_in = qutip.tensor(psi_in, self.ancillae_state)
else:
Psi_in = psi_in
# evolve input state
Psi_out = gate * Psi_in
# trace out ancilla (if there is an ancilla to trace)
if self.num_system_qubits < self.num_qubits:
dm_out = Psi_out.ptrace(range(self.num_system_qubits))
else:
dm_out = qutip.ket2dm(Psi_out)
# compute fidelity
# fidelity = (psi_in.dag() * target_gate.dag() *
# dm_out * target_gate * psi_in)
fidelities[idx] = qutip.fidelity(target_gate * psi_in, dm_out)**2
if return_mean:
return fidelities.mean()
else:
return fidelities
def average_fidelity(self):
"""Compute average fidelity using exact formula."""
if self.num_qubits > self.num_system_qubits:
dim_system = 2**self.num_system_qubits
map_as_tensor = nat.big_unitary_to_map(self.get_current_gate(),
dim_system)
return nat.exact_average_fidelity_mapVSunitary(map_as_tensor,
self.target_gate)
else:
return nat.exact_average_fidelity_unitaryVSunitary(
self.get_current_gate(), self.target_gate
)
def fidelity(self, return_mean=True):
"""Return theano graph for fidelity given training states.
In the output theano expression `fidelities`, the tensors
`output_states` and `target_states` are left "hanging", and will
be replaced during the training through the `givens` parameter
of `theano.function`.
"""
# `output_states` are the obtained output states, while
# `self.outputs` are the output states we want (the training ones).
states = TheanoQstates(self.inputs)
states.evolve_all_kets(self.compute_evolution_matrix())
num_ancillae = self.num_qubits - self.num_system_qubits
fidelities = states.fidelities(self.outputs, num_ancillae)
if return_mean:
return T.mean(fidelities)
else:
return fidelities
def generate_training_states(self, num_states):
"""Create training states for the training.
This function generates every time it is called a set of
input and corresponding target output states, to be used during
training. These values will be used during the computation
through the `givens` parameter of `theano.function`.
Returns
-------
A tuple with two elements: training vectors and labels.
NOTE: The training and target vectors have different lengths!
The former span the whole space while the latter only the
system one.
training_states: an array of vectors.
Each vector represents a state in the full system+ancilla space,
in big real form. These states span the whole space simply
out of convenience, but are obtained as tensor product of
the target states over the system qubits with the initial
states of the ancillary qubits.
target_states: an array of vectors.
Each vector represents a state spanning only the system qubits,
in big real form. Every such state is generated by evolving
the corresponding `training_state` through the matrix
`target_unitary`.
This generation method is highly non-optimal. However, it takes
about ~250ms to generate a (standard) training set of 100 states,
which amounts to ~5 minutes over 1000 epochs with a training dataset
size of 100, making this factor not particularly important.
"""
if self.target_gate is None:
raise TargetGateNotGivenError('Target gate not set yet.')
# 1) Generate random input states OVER SYSTEM QUBITS
training_inputs = _random_input_states(num_states,
self.num_system_qubits)
# 2) Compute corresponding output states
target_outputs = self._target_outputs_from_inputs(training_inputs)
# 3) Tensor product of training input states with ancillae
for idx, ket in enumerate(training_inputs):
if self.num_system_qubits < self.num_qubits:
ket = qutip.tensor(ket, self.ancillae_state)
training_inputs[idx] = complex2bigreal(ket)
training_inputs = np.asarray(training_inputs)
# 4) Convert target outputs in big real form.
# NOTE: the target states are kets if the target gate is unitary,
# and density matrices for target open maps.
target_outputs = np.asarray(
[complex2bigreal(st) for st in target_outputs])
# return results as matrices
_, len_inputs, _ = training_inputs.shape
_, len_outputs, _ = target_outputs.shape
training_inputs = training_inputs.reshape((num_states, len_inputs))
target_outputs = target_outputs.reshape((num_states, len_outputs))
return training_inputs, target_outputs
class QubitNetworkDecisionProblemModel(QubitNetworkModel):
"""Model to be used to train network to solve decision problems.
Example of target_function:
```
def decision_fn(input1, input2):
if qutip.fidelity(input1, input2) > 0.8:
return qutip.basis(2, 1)
else:
return qutip.basis(2, 0)
```
"""
# pylint: disable=W0221
def __init__(self,
num_qubits=None,
num_qubits_per_input=None,
num_qubits_answer=None,
target_function=None,
interactions=None,
sympy_expr=None,
free_parameters_order=None,
initial_values=None):
super().__init__(
num_qubits=num_qubits,
interactions=interactions,
sympy_expr=sympy_expr,
free_parameters_order=free_parameters_order,
initial_values=initial_values)
# define new attributes
self.ancillae_state = None # initial values for ancillae
self.num_inputs = None # the number of inputs of the problem
self.num_qubits_per_input = None # number of qubits used for inputs
self.num_qubits_answer = None # number of qubits used for answer
self.num_qubits_ancillae = None # total number of qubits to trace out
self.num_qubits_processor = None # num of non-answer non-input qubits
self.target_function = None # the function to compute answers
self.processor_state = None # initial state of processor qubits
self.outputs_size = None # size of output state ket vectors
# ---- assign values and define defaults ----
if num_qubits_per_input is None:
raise ValueError('The number of qubits to be used as inputs'
' have to be specified.')
if isinstance(num_qubits_per_input, numbers.Number):
self.num_qubits_per_input = [num_qubits_per_input]
elif isinstance(num_qubits_per_input, (list, tuple)):
self.num_qubits_per_input = num_qubits_per_input
else:
raise ValueError('The value of num_qubits_per_input should'
' be an integer of list of positive integers.')
self.num_inputs = len(self.num_qubits_per_input)
if num_qubits_answer is None:
num_qubits_answer = 1
self.num_qubits_answer = num_qubits_answer
self.num_qubits_ancillae = self.num_qubits - self.num_qubits_answer
# check that the specified numbers of qubits make sense
self.num_qubits_processor = | |
= []
for mi, atoms1 in enumerate(g) :
ress1 = atoms1[0].residue
ressN = atoms1[-1].residue
print " - %d/%d, %d-%d" % (mi+1, numProc, ress1.id.position, ressN.id.position)
procAtomsPath = os.path.join ( tempPath, "%d_atoms.txt" % mi )
fout = open ( procAtomsPath, "w" )
for at in atoms1 :
r = at.residue
altLoc = '_' if at.altLoc == '' else at.altLoc
p = at.coord()
#fout.write ( "%d.%s.%s.%s %.3f %.3f %.3f\n" % (r.id.position,r.id.chainId,at.name,altLoc,p.x,p.y,p.z) )
fout.write ( "%d.%s.%s.%s\n" % (r.id.position,r.id.chainId,at.name,altLoc) )
fout.close()
nmap_path = os.path.join ( tempPath, "%d_map.mrc" % mi )
if 1 :
nmap = MaskMapResize ( atoms1, 6, dmap, nmap_path )
else :
import shutil
shutil.copyfile ( dmap.data.path, nmap_path )
#args = [chimeraPath, '--nogui', '--silent', '--nostatus', mol.openedAs[0], nmap_path, mapQPPath]
#args = [chimeraPath, '--nogui', '--silent', '--nostatus', nmap_path, dmap.data.path, mapQPPath]
args = [chimeraPath, '--nogui', '--silent', '--nostatus', nmap_path, mapQPPath]
if mi == 0 :
print "running proc:",
for arg in args :
print arg,
print ""
fout = open ( os.path.join(tempPath, "%d.log" % mi), "w" )
foute = open ( os.path.join(tempPath, "%d_err.log" % mi), "w" )
p = subprocess.Popen(args, stdout=fout, stderr=foute, cwd=inDir)
procs.append ( [mi, p, fout, foute] )
print ""
print "Waiting...",
for mi, p, fout, foute in procs :
p.wait()
fout.close()
foute.close()
print "%d" % mi,
print ""
atids = {}
for r in mol.residues :
for at in r.atoms :
r = at.residue
altLoc = '_' if at.altLoc == '' else at.altLoc
atids["%d.%s.%s.%s" % (r.id.position,r.id.chainId,at.name,altLoc)] = at
print ""
print "Getting...",
for mi, p, fout, foute in procs :
fin = os.path.join(tempPath, "%d_out.txt" % mi)
#print " - getting from: ", fin
fp = open ( fin )
for l in fp :
#print " - ", l
try :
atId, Q = l.split()
except :
print " - err line: ", l
at = atids[atId.strip()]
#at = r.atomsMap[atName][0]
at.Q = float(Q)
#at.CC = float(cc)
at.bfactor = at.Q
fp.close()
if mi == 0 :
print ""
print ""
print "__StdOut for process %d__" % mi
foute = open ( os.path.join(tempPath, "%d.log" % mi), "r" )
for l in foute :
print l,
print ""
foute.close()
print "__StdErr file for process %d__" % mi
foute = open ( os.path.join(tempPath, "%d_err.log" % mi), "r" )
for l in foute :
print l,
print ""
foute.close()
if 1 :
for mi, p, fout, foute in procs :
print "Removing temp files",
os.remove ( os.path.join(tempPath, "%d_out.txt" % mi) )
try :
os.remove ( os.path.join(tempPath, "%d_stat.txt" % mi) )
except :
print " - did not find _stat file"
pass
os.remove ( os.path.join(tempPath, "%d_atoms.txt" % mi) )
os.remove ( os.path.join(tempPath, "%d_map.mrc" % mi) )
os.remove ( os.path.join(tempPath, "%d.log" % mi) )
os.remove ( os.path.join(tempPath, "%d_err.log" % mi) )
print "%d" % mi,
print ""
os.remove ( os.path.join(tempPath, "all_atoms.txt") )
os.rmdir ( tempPath )
end = time.time()
print ""
print " - done, time: %f" % ( end-start )
totSec = end - start
totMin = numpy.floor ( totSec / 60.0 )
totSec = totSec - totMin * 60.0
print " - done, time: %.0f min, %.1f sec" % ( totMin, totSec )
SaveQFile ( mol, cid, dmap, sigma )
Qavg = QStats1 ( mol, cid )
return Qavg
def QStats1 ( mol, chainId='All', doCalcResQ=True ) :
totQ, totN = 0.0, 0.0
#QT, QN = { "Protein":0.0, "Nucleic":0.0, "Other":0.0 }, { "Protein":0.0, "Nucleic":0.0, "Other":0.0}
QT, QN = {}, {}
QT_, QN_ = {}, {}
QH, QL = {}, {}
if chainId == None :
chainId = "All"
print "Q for %d res, chain %s" % ( len(mol.residues), chainId )
for r in mol.residues :
if r.id.chainId == chainId or chainId == "All" :
if doCalcResQ :
CalcResQ (r, None, None, useOld=True )
for at in r.atoms :
if at.element.name == "H" :
continue
if hasattr ( at, "Q") :
totQ += at.Q
totN += 1.0
tp = "Other"
if at.residue.isProt : tp = "Protein"
elif at.residue.isNA : tp = "Nucleic"
else : tp = at.residue.type
if tp in QT :
QT[tp] += at.Q; QN[tp] += 1.0;
QH[tp] = max(QH[tp], at.Q); QL[tp] = min(QL[tp], at.Q)
else :
QT[tp] = at.Q; QN[tp] = 1.0
QH[tp] = at.Q; QL[tp] = at.Q
tps = r.id.chainId + ":" + tp
if tps in QT_ :
QT_[tps] += at.Q; QN_[tps] += 1.0
else :
QT_[tps] = at.Q; QN_[tps] = 1.0
#for tp in ["Other", "Protein", "Nucleic"] :
print ""
print "Chain\tAvg.Q-score\tEst.Res.(A)"
tpk = QT_.keys()
tpk.sort()
for tp in tpk :
if QN_[tp] > 0 :
avgQ = QT_[tp]/QN_[tp]
avgR = 0
if "nucleic" in tp.lower() :
avgR = (avgQ-1.0673)/-0.1574
else :
avgR = (avgQ-1.1244)/-0.1794
print " %s\t%.3f\t%.2f" % (tp, avgQ, avgR )
else :
print " %s\tn/a" % (tp)
Q__ = { " protein":0, " nucleic":0, " water":0, " ion":0 }
#for tp in ["Other", "Protein", "Nucleic"] :
print ""
print "Type\tAvg.Q-score\tEst.Res.(A)"
for tp in QT.keys() :
if QN[tp] > 0 :
avgQ = QT[tp]/QN[tp]
avgR = 0
if "nucleic" in tp.lower() :
avgR = (avgQ-1.0673)/-0.1574
Q__[" nucleic"] = avgQ
elif "protein" in tp.lower() :
avgR = (avgQ-1.1244)/-0.1794
Q__[" protein"] = avgQ
elif "hoh" in tp.lower() :
avgR = (avgQ-1.1244)/-0.1794
Q__[" water"] = avgQ
elif tp.upper() in chargedIons :
avgR = (avgQ-1.1244)/-0.1794
Q__[" ion"] = avgQ
else :
avgR = (avgQ-1.1244)/-0.1794
Q__[tp] = avgQ
print " %s\t%.3f\t%.2f" % (tp, avgQ, avgR )
else :
print " %s\tn/a" % (tp)
print ""
for tp in QT.keys() :
if QN[tp] > 0 :
print "\t%s" % tp,
print ""
print "Avg.Q.",
for tp in QT.keys() :
if QN[tp] > 0 :
avgQ = QT[tp]/QN[tp]
print "\t%.3f" % avgQ,
print ""
print "Max.Q.",
for tp in QT.keys() :
if QN[tp] > 0 :
print "\t%.3f" % QH[tp],
print ""
print "Min.Q.",
for tp in QT.keys() :
if QN[tp] > 0 :
print "\t%.3f" % QL[tp],
print ""
print ""
#return Q__
return totQ/totN
def QStatsProt ( mol, dmap, chainId ) :
SetBBAts ( mol )
ress = []
for r in mol.residues :
if r.id.chainId == chainId and r.isProt :
ress.append ( r )
if len(ress) == 0 :
print "QstatsProt - no protein residues in chain %s" % chainId
return
sByType = {}
rByType = {}
def addType (tp, r, score) :
if not tp in sByType :
rByType[tp] = []
sByType[tp] = []
rByType[tp].append ( [score, r] )
sByType[tp].append ( [score] )
for r in ress :
if r.isProt and r.type == "LEU" :
avg = (r.atomsMap["CD1"][0].Q + r.atomsMap["CD2"][0].Q)/2.0
addType ( "LEU(CD)", r, avg )
if r.isProt and r.type == "LEU" and r.id.position==114 :
avg = (r.atomsMap["CD1"][0].Q + r.atomsMap["CD2"][0].Q)/2.0
addType ( "LEU_114(CD)", r, avg )
if r.isProt and r.type == "VAL" :
avg = (r.atomsMap["CG1"][0].Q + r.atomsMap["CG2"][0].Q)/2.0
addType ( "VAL(CG)", r, avg )
if r.isProt and r.type == "VAL" and r.id.position==33 :
avg = (r.atomsMap["CG1"][0].Q + r.atomsMap["CG2"][0].Q)/2.0
addType ( "VAL_33(CG)", r, avg )
if r.isProt and r.type == "ARG" :
avg = (r.atomsMap["NH1"][0].Q + r.atomsMap["NH2"][0].Q)/2.0
addType ( "ARG(NH)", r, avg )
if r.isProt and r.type == "ARG" and r.id.position==76 :
avg = (r.atomsMap["NH1"][0].Q + r.atomsMap["NH2"][0].Q)/2.0
addType ( "ARG_76(NH)", r, avg )
if r.isProt and r.type == "ARG" and r.id.position==9 :
avg = (r.atomsMap["NH1"][0].Q + r.atomsMap["NH2"][0].Q)/2.0
addType ( "ARG_9(NH)", r, avg )
if r.isProt and r.type == "LYS" :
avg = r.atomsMap["NZ"][0].Q
addType ( "LYS(NZ)", r, avg )
if r.isProt and r.type == "ASP" :
avg = (r.atomsMap["OD1"][0].Q + r.atomsMap["OD2"][0].Q)/2.0
addType ( "ASP(OD)", r, avg )
if r.isProt and r.type == "ASP" and r.id.position==42 :
avg = (r.atomsMap["OD1"][0].Q + r.atomsMap["OD2"][0].Q)/2.0
addType ( "ASP_42(OD)", r, avg )
if r.isProt and r.type == "ASP" and r.id.position==131 :
avg = (r.atomsMap["OD1"][0].Q + r.atomsMap["OD2"][0].Q)/2.0
addType ( "ASP_131(OD)", r, avg )
if r.isProt and r.type == "ASP" and r.id.position==171 :
avg = (r.atomsMap["OD1"][0].Q + r.atomsMap["OD2"][0].Q)/2.0
addType ( "ASP_171(OD)", r, avg )
if r.isProt and r.type == "GLU" :
avg = (r.atomsMap["OE1"][0].Q + r.atomsMap["OE2"][0].Q)/2.0
addType ( | |
from pyflamegpu import *
import sys, random, math
"""
FLAME GPU 2 implementation of the Boids model, using spatial3D messaging.
This is based on the FLAME GPU 1 implementation, but with dynamic generation of agents.
Agents are also clamped to be within the environment bounds, rather than wrapped as in FLAME GPU 1.
@todo - Should the agent's velocity change when it is clamped to the environment?
"""
"""
Get the length of a vector
@param x x component of the vector
@param y y component of the vector
@param z z component of the vector
@return the length of the vector
"""
def vec3Length(x, y, z):
return math.sqrt(x * x + y * y + z * z);
"""
Add a scalar to a vector in-place
@param x x component of the vector
@param y y component of the vector
@param z z component of the vector
@param value scalar value to add
"""
def vec3Add(x, y, z, value):
x += value;
y += value;
z += value;
"""
Subtract a scalar from a vector in-place
@param x x component of the vector
@param y y component of the vector
@param z z component of the vector
@param value scalar value to subtract
"""
def vec3Sub(x, y, z, value):
x -= value;
y -= value;
z -= value;
"""
Multiply a vector by a scalar value in-place
@param x x component of the vector
@param y y component of the vector
@param z z component of the vector
@param multiplier scalar value to multiply by
"""
def vec3Mult(x, y, z, multiplier):
x *= multiplier;
y *= multiplier;
z *= multiplier;
"""
Divide a vector by a scalar value in-place
@param x x component of the vector
@param y y component of the vector
@param z z component of the vector
@param divisor scalar value to divide by
"""
def vec3Div(x, y, z, divisor):
x /= divisor;
y /= divisor;
z /= divisor;
"""
Normalize a 3 component vector in-place
@param x x component of the vector
@param y y component of the vector
@param z z component of the vector
"""
def vec3Normalize(x, y, z):
# Get the length
length = vec3Length(x, y, z);
vec3Div(x, y, z, length);
"""
Clamp each component of a 3-part position to lie within a minimum and maximum value.
Performs the operation in place
Unlike the FLAME GPU 1 example, this is a clamping operation, rather than wrapping.
@param x x component of the vector
@param y y component of the vector
@param z z component of the vector
@param MIN_POSITION the minimum value for each component
@param MAX_POSITION the maximum value for each component
"""
def clampPosition(x, y, z, MIN_POSITION, MAX_POSITION):
x = MIN_POSITION if (x < MIN_POSITION) else x;
x = MAX_POSITION if (x > MAX_POSITION) else x;
y = MIN_POSITION if (y < MIN_POSITION) else y;
y = MAX_POSITION if (y > MAX_POSITION) else y;
z = MIN_POSITION if (z < MIN_POSITION) else z;
z = MAX_POSITION if (z > MAX_POSITION) else z;
# Change to false if pyflamegpu has not been built with visualisation support
VISUALISATION = True;
"""
outputdata agent function for Boid agents, which outputs publicly visible properties to a message list
"""
outputdata = r"""
FLAMEGPU_AGENT_FUNCTION(outputdata, flamegpu::MessageNone, flamegpu::MessageSpatial3D) {
// Output each agents publicly visible properties.
FLAMEGPU->message_out.setVariable<flamegpu::id_t>("id", FLAMEGPU->getID());
FLAMEGPU->message_out.setVariable<float>("x", FLAMEGPU->getVariable<float>("x"));
FLAMEGPU->message_out.setVariable<float>("y", FLAMEGPU->getVariable<float>("y"));
FLAMEGPU->message_out.setVariable<float>("z", FLAMEGPU->getVariable<float>("z"));
FLAMEGPU->message_out.setVariable<float>("fx", FLAMEGPU->getVariable<float>("fx"));
FLAMEGPU->message_out.setVariable<float>("fy", FLAMEGPU->getVariable<float>("fy"));
FLAMEGPU->message_out.setVariable<float>("fz", FLAMEGPU->getVariable<float>("fz"));
return flamegpu::ALIVE;
}
"""
"""
inputdata agent function for Boid agents, which reads data from neighbouring Boid agents, to perform the boid flocking model.
"""
inputdata = r"""
// Vector utility functions, see top of file for versions with commentary
FLAMEGPU_HOST_DEVICE_FUNCTION float vec3Length(const float x, const float y, const float z) {
return sqrtf(x * x + y * y + z * z);
}
FLAMEGPU_HOST_DEVICE_FUNCTION void vec3Add(float &x, float &y, float &z, const float value) {
x += value;
y += value;
z += value;
}
FLAMEGPU_HOST_DEVICE_FUNCTION void vec3Sub(float &x, float &y, float &z, const float value) {
x -= value;
y -= value;
z -= value;
}
FLAMEGPU_HOST_DEVICE_FUNCTION void vec3Mult(float &x, float &y, float &z, const float multiplier) {
x *= multiplier;
y *= multiplier;
z *= multiplier;
}
FLAMEGPU_HOST_DEVICE_FUNCTION void vec3Div(float &x, float &y, float &z, const float divisor) {
x /= divisor;
y /= divisor;
z /= divisor;
}
FLAMEGPU_HOST_DEVICE_FUNCTION void vec3Normalize(float &x, float &y, float &z) {
// Get the length
float length = vec3Length(x, y, z);
vec3Div(x, y, z, length);
}
FLAMEGPU_HOST_DEVICE_FUNCTION void clampPosition(float &x, float &y, float &z, const float MIN_POSITION, const float MAX_POSITION) {
x = (x < MIN_POSITION)? MIN_POSITION: x;
x = (x > MAX_POSITION)? MAX_POSITION: x;
y = (y < MIN_POSITION)? MIN_POSITION: y;
y = (y > MAX_POSITION)? MAX_POSITION: y;
z = (z < MIN_POSITION)? MIN_POSITION: z;
z = (z > MAX_POSITION)? MAX_POSITION: z;
}
// Agent function
FLAMEGPU_AGENT_FUNCTION(inputdata, flamegpu::MessageSpatial3D, flamegpu::MessageNone) {
// Agent properties in local register
const flamegpu::id_t id = FLAMEGPU->getID();
// Agent position
float agent_x = FLAMEGPU->getVariable<float>("x");
float agent_y = FLAMEGPU->getVariable<float>("y");
float agent_z = FLAMEGPU->getVariable<float>("z");
// Agent velocity
float agent_fx = FLAMEGPU->getVariable<float>("fx");
float agent_fy = FLAMEGPU->getVariable<float>("fy");
float agent_fz = FLAMEGPU->getVariable<float>("fz");
// Boids percieved center
float perceived_centre_x = 0.0f;
float perceived_centre_y = 0.0f;
float perceived_centre_z = 0.0f;
int perceived_count = 0;
// Boids global velocity matching
float global_velocity_x = 0.0f;
float global_velocity_y = 0.0f;
float global_velocity_z = 0.0f;
// Boids short range avoidance centre
float collision_centre_x = 0.0f;
float collision_centre_y = 0.0f;
float collision_centre_z = 0.0f;
int collision_count = 0;
const float INTERACTION_RADIUS = FLAMEGPU->environment.getProperty<float>("INTERACTION_RADIUS");
const float SEPARATION_RADIUS = FLAMEGPU->environment.getProperty<float>("SEPARATION_RADIUS");
// Iterate location messages, accumulating relevant data and counts.
for (const auto &message : FLAMEGPU->message_in(agent_x, agent_y, agent_z)) {
// Ignore self messages.
if (message.getVariable<flamegpu::id_t>("id") != id) {
// Get the message location and velocity.
const float message_x = message.getVariable<float>("x");
const float message_y = message.getVariable<float>("y");
const float message_z = message.getVariable<float>("z");
// Check interaction radius
float separation = vec3Length(agent_x - message_x, agent_y - message_y, agent_z - message_z);
if (separation < (INTERACTION_RADIUS)) {
// Update the percieved centre
perceived_centre_x += message_x;
perceived_centre_y += message_y;
perceived_centre_z += message_z;
perceived_count++;
// Update percieved velocity matching
const float message_fx = message.getVariable<float>("fx");
const float message_fy = message.getVariable<float>("fy");
const float message_fz = message.getVariable<float>("fz");
global_velocity_x += message_fx;
global_velocity_y += message_fy;
global_velocity_z += message_fz;
// Update collision centre
if (separation < (SEPARATION_RADIUS)) { // dependant on model size
collision_centre_x += message_x;
collision_centre_y += message_y;
collision_centre_z += message_z;
collision_count += 1;
}
}
}
}
// Divide positions/velocities by relevant counts.
vec3Div(perceived_centre_x, perceived_centre_y, perceived_centre_z, perceived_count);
vec3Div(global_velocity_x, global_velocity_y, global_velocity_z, perceived_count);
vec3Div(global_velocity_x, global_velocity_y, global_velocity_z, collision_count);
// Total change in velocity
float velocity_change_x = 0.f;
float velocity_change_y = 0.f;
float velocity_change_z = 0.f;
// Rule 1) Steer towards perceived centre of flock (Cohesion)
float steer_velocity_x = 0.f;
float steer_velocity_y = 0.f;
float steer_velocity_z = 0.f;
if (perceived_count > 0) {
const float STEER_SCALE = FLAMEGPU->environment.getProperty<float>("STEER_SCALE");
steer_velocity_x = (perceived_centre_x - agent_x) * STEER_SCALE;
steer_velocity_y = (perceived_centre_y - agent_y) * STEER_SCALE;
steer_velocity_z = (perceived_centre_z - agent_z) * STEER_SCALE;
}
velocity_change_x += steer_velocity_x;
velocity_change_y += steer_velocity_y;
velocity_change_z += steer_velocity_z;
// Rule 2) Match neighbours speeds (Alignment)
float match_velocity_x = 0.f;
float match_velocity_y = 0.f;
float match_velocity_z = 0.f;
if (collision_count > 0) {
const float MATCH_SCALE = FLAMEGPU->environment.getProperty<float>("MATCH_SCALE");
match_velocity_x = global_velocity_x * MATCH_SCALE;
match_velocity_y = global_velocity_y * MATCH_SCALE;
match_velocity_z = global_velocity_z * MATCH_SCALE;
}
velocity_change_x += match_velocity_x;
velocity_change_y += match_velocity_y;
velocity_change_z += match_velocity_z;
// Rule 3) Avoid close range neighbours (Separation)
float avoid_velocity_x = 0.0f;
float avoid_velocity_y = 0.0f;
float avoid_velocity_z = 0.0f;
if (collision_count > 0) {
const float COLLISION_SCALE = FLAMEGPU->environment.getProperty<float>("COLLISION_SCALE");
avoid_velocity_x = (agent_x - collision_centre_x) * COLLISION_SCALE;
avoid_velocity_y = (agent_y - collision_centre_y) * COLLISION_SCALE;
avoid_velocity_z = (agent_z - collision_centre_z) * COLLISION_SCALE;
}
velocity_change_x += avoid_velocity_x;
velocity_change_y += avoid_velocity_y;
velocity_change_z += avoid_velocity_z;
// Global scale of velocity change
vec3Mult(velocity_change_x, velocity_change_y, velocity_change_z, FLAMEGPU->environment.getProperty<float>("GLOBAL_SCALE"));
// Update agent velocity
agent_fx += velocity_change_x;
agent_fy += velocity_change_y;
agent_fz += velocity_change_z;
// Bound velocity
float agent_fscale = vec3Length(agent_fx, agent_fy, agent_fz);
if (agent_fscale > 1) {
vec3Div(agent_fx, agent_fy, agent_fz, agent_fscale);
}
// Apply the velocity
const float TIME_SCALE = FLAMEGPU->environment.getProperty<float>("TIME_SCALE");
agent_x += agent_fx * TIME_SCALE;
agent_y += agent_fy | |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import flavor_manage
from nova.api.openstack.compute import flavors
from nova.api.openstack.compute import flavors_extraspecs
from nova.api.openstack.compute import servers
from nova.compute import vm_states
from nova import objects
from nova.policies import flavor_extra_specs as policies
from nova.policies import flavor_manage as fm_policies
from nova.policies import servers as s_policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
"""Test Flavor Extra Specs APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(FlavorExtraSpecsPolicyTest, self).setUp()
self.controller = flavors_extraspecs.FlavorExtraSpecsController()
self.flavor_ctrl = flavors.FlavorsController()
self.fm_ctrl = flavor_manage.FlavorManageController()
self.server_ctrl = servers.ServersController()
self.req = fakes.HTTPRequest.blank('')
self.server_ctrl._view_builder._add_security_grps = mock.MagicMock()
self.server_ctrl._view_builder._get_metadata = mock.MagicMock()
self.server_ctrl._view_builder._get_addresses = mock.MagicMock()
self.server_ctrl._view_builder._get_host_id = mock.MagicMock()
self.server_ctrl._view_builder._get_fault = mock.MagicMock()
self.server_ctrl._view_builder._add_host_status = mock.MagicMock()
self.instance = fake_instance.fake_instance_obj(
self.project_member_context,
id=1, uuid=uuids.fake_id, project_id=self.project_id,
vm_state=vm_states.ACTIVE)
self.mock_get = self.useFixture(
fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock
self.mock_get.return_value = self.instance
fakes.stub_out_secgroup_api(
self, security_groups=[{'name': 'default'}])
self.mock_get_all = self.useFixture(fixtures.MockPatchObject(
self.server_ctrl.compute_api, 'get_all')).mock
self.mock_get_all.return_value = objects.InstanceList(
objects=[self.instance])
def get_flavor_extra_specs(context, flavor_id):
return fake_flavor.fake_flavor_obj(
self.project_member_context,
id=1, uuid=uuids.fake_id, project_id=self.project_id,
is_public=False, extra_specs={'hw:cpu_policy': 'shared'},
expected_attrs='extra_specs')
self.stub_out('nova.api.openstack.common.get_flavor',
get_flavor_extra_specs)
# Check that all are able to get flavor extra specs.
self.all_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context,
self.other_project_member_context,
self.other_project_reader_context
]
self.all_unauthorized_contexts = []
# Check that all system scoped are able to get flavor extra specs.
self.all_system_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context,
self.other_project_member_context,
self.other_project_reader_context
]
self.all_system_unauthorized_contexts = []
# Check that admin is able to create, update and delete flavor
# extra specs.
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
# Check that non-admin is not able to create, update and
# delete flavor extra specs.
self.admin_unauthorized_contexts = [
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
self.other_project_reader_context
]
@mock.patch('nova.objects.Flavor.save')
def test_create_flavor_extra_specs_policy(self, mock_save):
body = {'extra_specs': {'hw:numa_nodes': '1'}}
rule_name = policies.POLICY_ROOT % 'create'
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name,
self.controller.create,
self.req, '1234',
body=body)
@mock.patch('nova.objects.Flavor._flavor_extra_specs_del')
@mock.patch('nova.objects.Flavor.save')
def test_delete_flavor_extra_specs_policy(self, mock_save, mock_delete):
rule_name = policies.POLICY_ROOT % 'delete'
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name,
self.controller.delete,
self.req, '1234', 'hw:cpu_policy')
@mock.patch('nova.objects.Flavor.save')
def test_update_flavor_extra_specs_policy(self, mock_save):
body = {'hw:cpu_policy': 'shared'}
rule_name = policies.POLICY_ROOT % 'update'
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name,
self.controller.update,
self.req, '1234', 'hw:cpu_policy',
body=body)
def test_show_flavor_extra_specs_policy(self):
rule_name = policies.POLICY_ROOT % 'show'
self.common_policy_check(self.all_authorized_contexts,
self.all_unauthorized_contexts,
rule_name,
self.controller.show,
self.req, '1234',
'hw:cpu_policy')
def test_index_flavor_extra_specs_policy(self):
rule_name = policies.POLICY_ROOT % 'index'
self.common_policy_check(self.all_authorized_contexts,
self.all_unauthorized_contexts,
rule_name,
self.controller.index,
self.req, '1234')
def test_flavor_detail_with_extra_specs_policy(self):
fakes.stub_out_flavor_get_all(self)
rule_name = policies.POLICY_ROOT % 'index'
req = fakes.HTTPRequest.blank('', version='2.61')
authorize_res, unauthorize_res = self.common_policy_check(
self.all_authorized_contexts, self.all_unauthorized_contexts,
rule_name, self.flavor_ctrl.detail, req,
fatal=False)
for resp in authorize_res:
self.assertIn('extra_specs', resp['flavors'][0])
for resp in unauthorize_res:
self.assertNotIn('extra_specs', resp['flavors'][0])
def test_flavor_show_with_extra_specs_policy(self):
fakes.stub_out_flavor_get_by_flavor_id(self)
rule_name = policies.POLICY_ROOT % 'index'
req = fakes.HTTPRequest.blank('', version='2.61')
authorize_res, unauthorize_res = self.common_policy_check(
self.all_authorized_contexts, self.all_unauthorized_contexts,
rule_name, self.flavor_ctrl.show, req, '1',
fatal=False)
for resp in authorize_res:
self.assertIn('extra_specs', resp['flavor'])
for resp in unauthorize_res:
self.assertNotIn('extra_specs', resp['flavor'])
def test_flavor_create_with_extra_specs_policy(self):
rule_name = policies.POLICY_ROOT % 'index'
# 'create' policy is checked before flavor extra specs 'index' policy
# so we have to allow it for everyone otherwise it will fail first
# for unauthorized contexts.
rule = fm_policies.POLICY_ROOT % 'create'
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.61')
def fake_create(newflavor):
newflavor['flavorid'] = uuids.fake_id
newflavor["name"] = 'test'
newflavor["memory_mb"] = 512
newflavor["vcpus"] = 2
newflavor["root_gb"] = 1
newflavor["ephemeral_gb"] = 1
newflavor["swap"] = 512
newflavor["rxtx_factor"] = 1.0
newflavor["is_public"] = True
newflavor["disabled"] = False
newflavor["extra_specs"] = {}
self.stub_out("nova.objects.Flavor.create", fake_create)
body = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
}
}
authorize_res, unauthorize_res = self.common_policy_check(
self.all_system_authorized_contexts,
self.all_system_unauthorized_contexts,
rule_name, self.fm_ctrl._create, req, body=body,
fatal=False)
for resp in authorize_res:
self.assertIn('extra_specs', resp['flavor'])
for resp in unauthorize_res:
self.assertNotIn('extra_specs', resp['flavor'])
@mock.patch('nova.objects.Flavor.save')
def test_flavor_update_with_extra_specs_policy(self, mock_save):
fakes.stub_out_flavor_get_by_flavor_id(self)
rule_name = policies.POLICY_ROOT % 'index'
# 'update' policy is checked before flavor extra specs 'index' policy
# so we have to allow it for everyone otherwise it will fail first
# for unauthorized contexts.
rule = fm_policies.POLICY_ROOT % 'update'
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.61')
authorize_res, unauthorize_res = self.common_policy_check(
self.all_system_authorized_contexts,
self.all_system_unauthorized_contexts,
rule_name, self.fm_ctrl._update, req, '1',
body={'flavor': {'description': None}},
fatal=False)
for resp in authorize_res:
self.assertIn('extra_specs', resp['flavor'])
for resp in unauthorize_res:
self.assertNotIn('extra_specs', resp['flavor'])
def test_server_detail_with_extra_specs_policy(self):
rule = s_policies.SERVERS % 'detail'
# server 'detail' policy is checked before flavor extra specs 'index'
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.47')
rule_name = policies.POLICY_ROOT % 'index'
authorize_res, unauthorize_res = self.common_policy_check(
self.all_authorized_contexts, self.all_unauthorized_contexts,
rule_name, self.server_ctrl.detail, req,
fatal=False)
for resp in authorize_res:
self.assertIn('extra_specs', resp['servers'][0]['flavor'])
for resp in unauthorize_res:
self.assertNotIn('extra_specs', resp['servers'][0]['flavor'])
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch('nova.compute.api.API.get_instance_host_status')
def test_server_show_with_extra_specs_policy(self, mock_get, mock_block):
rule = s_policies.SERVERS % 'show'
# server 'show' policy is checked before flavor extra specs 'index'
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.47')
rule_name = policies.POLICY_ROOT % 'index'
authorize_res, unauthorize_res = self.common_policy_check(
self.all_authorized_contexts,
self.all_unauthorized_contexts,
rule_name, self.server_ctrl.show, req, 'fake',
fatal=False)
for resp in authorize_res:
self.assertIn('extra_specs', resp['server']['flavor'])
for resp in unauthorize_res:
self.assertNotIn('extra_specs', resp['server']['flavor'])
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch('nova.compute.api.API.get_instance_host_status')
@mock.patch('nova.compute.api.API.rebuild')
def test_server_rebuild_with_extra_specs_policy(self, mock_rebuild,
mock_get, mock_bdm):
rule = s_policies.SERVERS % 'rebuild'
# server 'rebuild' policy is checked before flavor extra specs 'index'
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.47')
rule_name = policies.POLICY_ROOT % 'index'
authorize_res, unauthorize_res = self.common_policy_check(
self.all_authorized_contexts,
self.all_unauthorized_contexts,
rule_name, self.server_ctrl._action_rebuild,
req, self.instance.uuid,
body={'rebuild': {"imageRef": uuids.fake_id}},
fatal=False)
for resp in authorize_res:
self.assertIn('extra_specs', resp.obj['server']['flavor'])
for resp in unauthorize_res:
self.assertNotIn('extra_specs', resp.obj['server']['flavor'])
@mock.patch('nova.compute.api.API.update_instance')
def test_server_update_with_extra_specs_policy(self, mock_update):
rule = s_policies.SERVERS % 'update'
# server 'update' policy is checked before flavor extra specs 'index'
# policy so we have to allow it for everyone otherwise it will fail
# first for unauthorized contexts.
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.47')
rule_name = policies.POLICY_ROOT % 'index'
authorize_res, unauthorize_res = self.common_policy_check(
self.all_authorized_contexts,
self.all_unauthorized_contexts,
rule_name, self.server_ctrl.update,
req, self.instance.uuid,
body={'server': {'name': 'test'}},
fatal=False)
for resp in authorize_res:
self.assertIn('extra_specs', resp['server']['flavor'])
for resp in unauthorize_res:
self.assertNotIn('extra_specs', resp['server']['flavor'])
class FlavorExtraSpecsScopeTypePolicyTest(FlavorExtraSpecsPolicyTest):
"""Test Flavor Extra Specs APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scoped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(FlavorExtraSpecsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Check that all system scoped are able to get flavor extra specs.
self.all_system_authorized_contexts = [
self.system_admin_context, self.system_member_context,
self.system_reader_context, self.system_foo_context
]
self.all_system_unauthorized_contexts = [
self.legacy_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
self.other_project_reader_context
]
# Check that system admin is able to create, update and delete flavor
# extra specs.
self.admin_authorized_contexts = [
self.system_admin_context]
# Check that non-system admin is not able to create, update and
# delete flavor extra specs.
self.admin_unauthorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
self.other_project_reader_context
]
class FlavorExtraSpecsNoLegacyPolicyTest(FlavorExtraSpecsScopeTypePolicyTest):
"""Test Flavor Extra Specs APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system_admin_or_owner APIs.
"""
without_deprecated_rules = True
def setUp(self):
super(FlavorExtraSpecsNoLegacyPolicyTest, self).setUp()
# Check that system or project reader are able to get flavor
# extra specs.
self.all_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.system_member_context,
self.system_reader_context, self.other_project_member_context,
self.other_project_reader_context
]
self.all_unauthorized_contexts = [
self.project_foo_context, self.system_foo_context
]
# Check that all system scoped reader are able to get flavor
# extra specs.
self.all_system_authorized_contexts = [
self.system_admin_context, self.system_member_context,
self.system_reader_context
]
self.all_system_unauthorized_contexts | |
# Copyright (c) 2021, <NAME>
# May 2021. Started on May 17 for Localized Conflict Modeling.
# First, read GPW population count data as GeoTIFF.
# Write-up and theoretical results in late May.
#-------------------------------------------------------------------
#
# conda activate tf36
# python
# from topoflow.utils import conflict
# pop_grid = conflict.read_geotiff()
#
#-------------------------------------------------------------------
#
# test1()
# test2()
#
# class conflict()
# initialize()
# initialize_U()
# initialize_C1()
# initialize_C2()
#-----------------------
# update()
# update_U()
# update_C1()
# update_C2()
#-----------------------
# update_p()
# update_S()
# update_S1()
# update_S2() # (obsolete soon?)
# update_S_old() # (obsolete soon?)
# update_time()
#--------------------------------------
# get_neighbor_cols_and_rows()
# get_neighbor_values()
# spread_conflicts1()
# spread_conflicts2()
# finalize()
# run_model()
#
# get_raster_cellsize()
# get_raster_bounds()
# bounds_disjoint()
# read_geotiff() # can also create RTG and RTI files
# regrid_geotiff()
#
# read_acled_data()
#
#-------------------------------------------------------------------
import numpy as np
import numpy.random as rn
#### import random as rn
import pandas as pd
import time
# For read_geotiff(), etc.
try:
from osgeo import gdal
except ImportError:
import gdal
import glob, sys
import os, os.path
from . import rti_files
from . import rtg_files
#-------------------------------------------------------------------
def test1():
cfg_file = 'conflict.cfg'
c = conflict()
c.run_model( cfg_file )
# test1()
#-------------------------------------------------------------------
def test2():
pop_grid = read_geotiff()
# test2()
#-------------------------------------------------------------------
def test3( SUBSAMPLE=False ):
#--------------------------------
# Use Horn of Africa as a test.
#--------------------------------
in_dir = '/Users/peckhams/Conflict/Data/GPW-v4/'
in_file = 'gpw_v4_population_count_rev11_2020_30_sec.tif'
in_file = in_dir + in_file
# Bounds = [ minlon, minlat, maxlon, maxlat ]
out_bounds = [ 25.0, -5.0, 55.0, 25.0]
if not(SUBSAMPLE):
#--------------------------------------
# 3600 cols x 3600 rows, 30 arcseconds
#--------------------------------------
out_file = 'Horn_of_Africa_GPW-v4_pop_count_2020_30sec.tif'
out_file = in_dir + out_file
out_xres_sec = None # will default to in_xres_sec
out_yres_sec = None # will default to in_yres_sec
print('Reading & clipping GeoTIFF file...')
else:
#--------------------------------------
# 360 cols x 360 rows, 300 arcseconds
#--------------------------------------
out_file = 'Horn_of_Africa_GPW-v4_pop_count_2020_450sec.tif'
out_file = in_dir + out_file
out_xres_sec = 450.0 # (15 times lower resolution)
out_yres_sec = 450.0 # (15 times lower resolution)
print('Reading, clipping & subsampling GeoTIFF file...')
#--------------------------------------
# 360 cols x 360 rows, 300 arcseconds
#--------------------------------------
# out_file = 'Horn_of_Africa_GPW-v4_pop_count_2020_300sec.tif'
# out_file = in_dir + out_file
# out_xres_sec = 300.0 # (10 times lower resolution)
# out_yres_sec = 300.0 # (10 times lower resolution)
# print('Reading, clipping & subsampling GeoTIFF file...')
regrid_geotiff(in_file=in_file, out_file=out_file,
out_bounds=out_bounds,
out_xres_sec=out_xres_sec,
out_yres_sec=out_yres_sec,
RESAMPLE_ALGO='bilinear', REPORT=True)
# test3()
#-------------------------------------------------------------------
class conflict():
#---------------------------------------------------------------
def initialize( self, cfg_file=None ):
home_dir = os.path.expanduser('~') + os.sep
if (cfg_file is None):
self.in_dir = home_dir + 'Conflict/Data/GPW-v4/'
self.out_dir = home_dir + 'Conflict/Output/'
cfg_file = self.in_dir + 'conflict.cfg'
self.out_file = self.out_dir + 'conflicts.rts'
self.IDs_file = self.out_dir + 'conflict_IDs.rts'
self.C1_file = ''
self.C2_file = ''
#---------------------------
# Was good for pop count U
#---------------------------
# self.U_file = 'Horn_of_Africa_GPW-v4_pop_count_2020_300sec.tif'
# self.nx = 360
# self.ny = 360
# self.c_emerge = 0.01 # (must be in (0,1])
# self.c_spread = 0.1
# ## self.c_spread = 0.03
# ## self.c_spread = 0.05
# ## self.p_geom = 0.2
# self.p_geom = 0.4
#--------------------------
# Is good for pop count U
#--------------------------
# self.U_file = 'Horn_of_Africa_GPW-v4_pop_count_2020_450sec.tif'
# self.nx = 240
# self.ny = 240
# self.c_emerge = 0.5 # (must be in (0,1])
# ## self.c_emerge = 0.1 # (must be in (0,1])
# ## self.c_emerge = 0.01 # (must be in (0,1])
# self.c_spread = 0.5
# ## self.c_spread = 0.1
# self.p_resolve = 0.4
# self.p_geom = 0.4 # (not used now)
#-------------------------
# Was good for uniform U
#-------------------------
self.U_file = '' # (To use uniform U)
self.nx = 240
self.ny = 240
self.c_emerge = 0.001 ####
## self.c_emerge = 0.2
## self.c_emerge = 0.001 # (must be in (0,1])
## self.c_spread = 0.1 ####
self.c_spread = 0.4 ####
## self.c_spread = 0.03
## self.c_spread = 0.05
## self.p_geom = 0.2
self.p_resolve = 0.4
self.p_geom = 0.4
self.spread_method = 1
#--------------------------
self.time_lag = 1 # (not used yet)
self.n_steps = 100
self.REPORT = True
else:
#-----------------------------------
# Read params from the config file
#-----------------------------------
dum = 0
self.cfg_file = cfg_file
self.time_index = 0
self.n_conflict_cells = 0
self.grid_shape = (self.ny, self.nx)
## self.start_time = time.time()
self.start_ID = 1
self.start_index = 0
#----------------------------
# Change to input directory
#----------------------------
os.chdir( self.in_dir )
#-----------------------------
# Open output files to write
#-----------------------------
self.out_unit = open( self.out_file, 'wb')
self.IDs_unit = open( self.IDs_file, 'wb')
#--------------------------------------
# Make grids with col and row numbers
#--------------------------------------
cols = np.arange( self.nx )
rows = np.arange( self.ny )
cg, rg = np.meshgrid( cols, rows )
self.col_grid = cg
self.row_grid = rg
self.initialize_U()
self.initialize_C1()
self.initialize_C2()
#------------------------------------------------------
# Initialize to no conflicts
# S will later contain 1s in grid cells with conflict
# Initialize durations to zero also.
# IDs will contain a unique ID for each conflict.
# Using 'float32' for IDs now for viewing the RTS.
#------------------------------------------------------
self.S = np.zeros( self.grid_shape, dtype='uint8' )
self.durs = np.zeros( self.grid_shape, dtype='uint32')
self.IDs = np.zeros( self.grid_shape, dtype='float32')
#----------------------------------------------------------
# Create a set of random integer IDs, without replacement
# so when we colorize, it will look better.
#----------------------------------------------------------
self.ran_IDs = rn.choice( 10000000, 10000000, replace=False)
# This next method used built-in random & and problems.
### self.ran_IDs = rn.sample( range(1000000), 500000)
self.start_time = time.time()
# initialize()
#---------------------------------------------------------------
def initialize_U( self ):
#-----------------------------------
# Start with U = a population grid
#-----------------------------------
if (self.U_file != ''):
self.U = read_geotiff(in_file=self.U_file,
REPORT=True)
# In case of negative nodata value
np.maximum(self.U, 0.0, self.U) # (in place)
else:
#---------------------
# Use a grid of ones
#---------------------
self.U = np.ones( self.grid_shape, dtype='float32' )
#-----------------------------------
# Disallow conflict on the 4 edges
#-----------------------------------
self.U[0,:] = 0.0
self.U[self.ny - 1,:] = 0.0
self.U[:,0] = 0.0
self.U[:,self.nx - 1] = 0.0
# initialize_U()
#---------------------------------------------------------------
def initialize_C1( self ):
if (self.C1_file != ''):
self.C1 = read_geotiff(in_file=self.C1_file,
REPORT=True)
else:
#---------------------
# Use a grid of ones
#---------------------
self.C1 = np.ones( self.grid_shape, dtype='float32' )
# initialize_C1()
#---------------------------------------------------------------
def initialize_C2( self ):
if (self.C2_file != ''):
self.C2 = read_geotiff(in_file=self.C2_file,
REPORT=True)
else:
#---------------------
# Use a grid of ones
#---------------------
self.C2 = np.ones( self.grid_shape, dtype='float32' )
# initialize_C2()
#---------------------------------------------------------------
def update( self ):
self.update_U()
self.update_C1()
self.update_C2()
#-------------------
self.update_p()
self.update_S() # also updates IDs
## self.update_S1() # same speed as update_S()
## self.update_S2()
self.update_time()
# update()
#---------------------------------------------------------------
def update_U( self ):
pass
# update_U()
#---------------------------------------------------------------
def update_C1( self ):
pass
# update_C1()
#---------------------------------------------------------------
def update_C2( self ):
pass
# update_C2()
#---------------------------------------------------------------
def update_p( self ):
#----------------------------------------------------------
# Note: p is the probability that a conflict emerges in
# a grid cell, and is a function of the unrest, U.
# In order for p to be a probability, in (0,1],
# we need 0 < c_emerge <= 1.
#----------------------------------------------------------
self.p_emerge = (self.c_emerge / self.U.max()) * self.U
# update_p()
#---------------------------------------------------------------
def update_S( self ):
#-----------------------------------------------------------
# Note: The previous version of this method generated
# Geometric random variables to model conflict
# durations. This new version does not track
# durations explicitly, but should produce exactly
# the same result. Here, any conflict ends in the
# kth time interval with fixed probability, p.
# This is modeled with a Bernoulli random variable.
#-----------------------------------------------------------
# Note: A Bernoulli random variable takes the value 1
# with probability p and 0 with probability (1-p).
# It is a special case of a Binomial r.v. (n=1).
# np.random.binomial() allows p to be an array.
#-----------------------------------------------------------
#--------------------------------------------------
# Initiate new conflicts in cells with no conflict
#-----------------------------------------------------
# Generate Bernoulli random variables with parameter
# p_emerge, and initiate conflicts where B1=1.
#-----------------------------------------------------
# Convert b from dtype='int64' to dtype='uint8' ?
# This requires 8 times less memory.
#-----------------------------------------------------
B1 = np.random.binomial(1, self.p_emerge)
w2 = np.logical_and( self.S == 0, B1 == 1 )
n2 = w2.sum()
#------------------------------------------------
# Resolve some conflicts in cells with conflict
#-----------------------------------------------------
# Generate Bernoulli random variables with parameter
# p_resolve and terminate conflicts that get B2=1.
# Conflict durations will then turn out to be
# Geometric random variables, same parameter.
#-----------------------------------------------------
B2 = np.random.binomial(1, self.p_resolve, size=self.grid_shape)
w3 = np.logical_and( self.S == 1, B2 == 1 )
n3 = w3.sum()
#------------------------------------
# Perform the required updates to S
#------------------------------------
i = self.start_index
self.S[ w2 ] = B1[ w2 ]
self.IDs[ | |
dist_reps.shape) [30, 33, 30]
# print("self.opt['use_biw2v']=", self.opt['use_biw2v']) 0
word_feats = []
if not self.opt['use_biw2v']:
word_embeds = self.get_xlmr_reps(combined_task_inputs) # [batch size, seq len, xlmr dim]
word_embeds = self.dropout(word_embeds)
else:
word_embeds = self.biw2v_embedding(biw2v_ids)
# print('word_embeds.shape=', word_embeds.shape) [30, 33, 768]
word_feats.append(word_embeds)
word_feats.append(upos_reps)
word_feats.append(dist_reps)
word_deprel_reps = self.deprel_embedding(deprel_ids)
adj = get_full_adj(head_ids, retrieve_ids, self.opt['device'])
word_deprel_reps, _ = self.gcn_layer(word_deprel_reps, adj)
trigger_deprel_reps = get_trigger_reps(word_deprel_reps, triggers).unsqueeze(1).repeat(1, seq_len, 1) # [batch size, sep len, xlmr dim]
# print('word_deprel_reps.shape=', word_deprel_reps.shape) [30, 33, 200]
# print('adj.shape=', adj.shape) [30, 33, 33]
# print('word_deprel_reps.shape=', word_deprel_reps.shape) [30, 33, 200]
# print('trigger_deprel_reps.shape=', trigger_deprel_reps.shape) [30, 33, 200]
word_feats.append(word_deprel_reps)
word_feats.append(trigger_deprel_reps)
word_feats.append(torch.abs(trigger_deprel_reps - word_deprel_reps))
word_feats.append(trigger_deprel_reps * word_deprel_reps)
# print("self.opt['use_ner']=", self.opt['use_ner']) 0
if self.opt['use_ner']:
ner_reps = self.ner_embedding(ner_ids)
word_feats.append(ner_reps)
word_reps = torch.cat(word_feats, dim=2)
# print('word_reps.shape=', word_reps.shape) [30, 33, 1628]
trigger_reps = get_trigger_reps(word_reps, triggers).unsqueeze(1).repeat(1, seq_len, 1) # [batch size, sep len, xlmr dim]
# print('trigger_reps.shape=', trigger_reps.shape) [30, 33, 1628]
word_reps = trigger_reps * word_reps
# print('word_reps.shape=', word_reps.shape) [30, 33, 1628]
""" When call self.self_att() below
input_masks.shape= torch.Size([30, 33])
slf_attn_mask.shape= torch.Size([30, 33, 33])
non_pad_mask.shape= torch.Size([30, 33, 1])
enc_output.shape= torch.Size([30, 33, 1628])
position_embed_for_satt= 1
position_ids.shape= torch.Size([30, 33])
enc_output.shape= torch.Size([30, 33, 1628])
"""
word_reps, _ = self.self_att(word_reps, pad_masks)
# print('word_reps.shape=', word_reps.shape) [30, 33, 1628]
word_reps = torch.cat(
[word_reps, word_embeds],
dim=2
)
# print('word_reps.shape=', word_reps.shape) [30, 33, 2396]
raw_scores = self.fc_argument(word_reps) # [batch size, seq len, num tags]
# print('raw_scores.shape=', raw_scores.shape) [30, 33, 12]
token_masks = pad_masks.eq(0).float()
entity_preds = torch.argmax(raw_scores, dim=2).long() * token_masks.long()
# print('token_masks.shape=', token_masks.shape) [30, 33]
# print('entity_preds.shape=', entity_preds.shape) [30, 33]
probs = torch.softmax(raw_scores, dim=2) # [batch size, seq len, num classes]
# print('probs.shape=', probs.shape) [30, 33, 12]
"""
entity_preds= tensor([[ 3, 3, 4, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4,
3, 3, 3, 3, 3, 3, 3, 6, 7, 7, 7, 3, 3, 3, 3],
[ 3, 3, 3, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 6, 7, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 6, 7, 7, 7, 7, 7, 3, 0, 0],
[ 3, 3, 3, 3, 3, 6, 7, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 6, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0],
[ 3, 4, 5, 5, 5, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 6,
7, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 4, 3, 3, 3, 3, 3, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 4, 5, 3, 3, 3, 3, 6, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 6, 7, 3, 3, 3, 3, 3,
3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
6, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 6, 7, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 6, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 4, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 4, 5, 5, 5, 3, 3, 3, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 8, 11, 11, 11, 11, 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 8, 11, 11, 11, 11, 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 4, 5, 5, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 6, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 3, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 6,
7, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 4, 3, 3, 3, 3, 3, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
device='cuda:0')
probs= tensor([[[1.1168e-12, 2.5829e-12, 6.1225e-12, ..., 5.6271e-09,
7.6096e-08, 8.8167e-08],
[8.9221e-13, 2.2523e-12, 5.5693e-12, ..., 5.1459e-09,
5.5401e-08, 5.5402e-08],
[4.9063e-09, 4.8340e-09, 1.9968e-09, ..., 1.6185e-05,
| |
None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
plot_stack_1 = plot_stack_1.drop(plot_stack_1.columns[2], axis=1)
plot_stack_1.columns = ['Installment_grade_grp', 'Charged Off', 'Fully Paid']
return plot_stack_1
#=============
# Function 178
def cleaning_func_42(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
return installment_grade
#=============
# Function 179
def cleaning_func_43(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
return plot_stack_1
#=============
# Function 180
def cleaning_func_44(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
return plot_stack_3
#=============
# Function 181
def cleaning_func_45(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
return Fully_Paid_home_status
#=============
# Function 182
def cleaning_func_46(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
plot_stack_3 = plot_stack_3.drop(plot_stack_3.columns[2], axis=1)
plot_stack_3.columns = ['Home Status', 'Charged Off', 'Fully Paid']
return plot_stack_3
#=============
# Function 183
def cleaning_func_47(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
return Charged_off_home_status
#=============
# Function 184
def cleaning_func_48(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 | |
<filename>use_cases/2020_12/run/dispatchers/dereg.py
import os
import sys
from functools import partial
import numpy as np
import pyomo.environ as pyo
from pyomo.opt import SolverStatus, TerminationCondition
# 43 kWh/kgH2 = 4.3e1 kWh/kgH2
e_per_h2 = 43e-6 # GWh / kgH2
def dispatch(info):
"""
Dispatches the components based on user-defined algorithms.
The expected return object is a dict of components, mapped to a dict of
resources that component uses, mapped to the amount consumed/produced
as a numpy array.
Note:
- Negative values mean the component consumes that resource
- Positive values mean the component produces that resource
- The activity doesn't necessarily have to be as long as "time", but it usually should be
@ In, info, dict, information about the state of the system
@ Out, activity, dict, activity of components as described above
"""
heron = info['HERON']
case = info['HERON']['Case']
components = dict((c.name, c) for c in heron['Components'])
sources = heron['Sources']
func = [x for x in sources if x.is_type('Function')][0]._module
# load
load = heron['RAVEN_vars']['TOTALLOAD']
# case
# labels = heron['Case'].get_labels()
# H2 price
price_h2_kg = func.H2_price({}, info)[0]['reference_price'] # $/kg
# effective $/GW for H2 generation
price_h2_GWh = price_h2_kg / e_per_h2 # $/GW = $/kg * kg/GWh
# In this deregulated approach, the NPP gets to submit its bid
# at whatever level it wants, at the risk of not being dispatched.
# The bid is the price ($/GW) and the capacity at that price (GW)
# After that, the ISO decides when to call on it, and the NPP is committed
# for whatever it bid (price and capacity).
# NPP bid is NPP marginal cost + H2 opp cost + "adder"
all_data = func.ds
data = func._load_case(info, all_data)
data = data.where(data.capacity > 1e-6, drop=True)
# since only one nuclear exists (propert of THIS dataset), figure out which one it is
try:
npp = next((val for idx, val in np.ndenumerate(data.component.values) if val.startswith('nucl')))
except StopIteration:
npp = None
# npp marginal cost, just take whichever one is nonzero (cheating)
if npp is None:
NPP_bid =1e9
else:
NPP_mc = float(data['marginal_cost'].loc[npp])
# hydrogen opportunity cost = h2 price $/kg-s * kg-s/MW@HTSE - HTSE $/MW
## but HTSE var O&M is functionally zero since it's on hot standby (Konor)
H2_opp = price_h2_GWh - NPP_mc
NPP_bid = NPP_mc + H2_opp + info['HERON']['RAVEN_vars']['NPP_bid_adjust']
data['marginal_cost'].loc[npp] = float(NPP_bid) # float cuz np 0d arrays
# rebuild the stack, prices based on this nuclear bid
stack, prices, comp_order = func._build_stack(data)
# NOTE this ceases the NPP's control over its future.
## Once it submits bid prices, the ISO takes over and calls on it to
## be dispatched whenever it sees fit. The NPP doesn't get to know the prices
## or load a priori, it just gets to bid in.
# Now we act as the ISO, for actual dispatch requests each hour.
## Note for this dereg case the ISO doesn't care about the hydrogen side of things.
# set up results storage
market_cap = components['H2_market'].get_capacity(info)[0]['H2'] # negative
if npp is None:
npp_cap = 0
else:
npp_cap = components['NPP'].get_capacity(info)[0]['electricity'] # positive
htse_cap_h2 = components['HTSE'].get_capacity(info)[0]['H2'] # positive
store_cap = components['H2_storage'].get_capacity(info)[0]['H2'] # positive
store_initial = components['H2_storage'].get_interaction().get_initial_level(info)
T = len(load)
activity = {
'NPP': {'electricity': np.ones(T) * npp_cap},
'HTSE': {'electricity': None, 'H2': None},
'H2_storage': {'H2': None},
'grid': {'electricity': np.zeros(T)},
'H2_market': {'H2': np.ones(T) * market_cap},
'Secondary': {'electricity': None},
'E_Penalty': {'electricity': None},
}
# dispatch NPP over the 24 hours given
stack_usage_indices = stack.searchsorted(load)
if npp is None:
# we have no nuclear, so grid doesn't use any npp electricity
activity['grid']['electricity'][:] = 0.0
else:
npp_index = next((idx for idx, val in np.ndenumerate(stack) if comp_order[idx] == npp))[0]
# whenever npp is fully committed, we make all the e-
e_mask = stack_usage_indices > npp_index
activity['grid']['electricity'][e_mask] = -1 * npp_cap
# when partially committed, commit what we need to
e_partial = stack_usage_indices == npp_index
activity['grid']['electricity'][e_partial] = -1 * (load[e_partial] - stack[npp_index - 1])
# when not dispatched, we already zero
# to do the rest, we build a little pyomo optimization
# model
m = pyo.ConcreteModel()
# indices
Ts = np.arange(0, T, dtype=int)
m.T = pyo.Set(initialize=Ts)
# optimization variables
## using bounds "as intended"
# htse_bounds = lambda m, i: (0, htse_cap_h2)
# secm_bounds = lambda m, i: (0, 1e4)
# stor_bounds = lambda m, i: (-store_cap/3600, store_cap/3600)
# m.HTSE = pyo.Var(m.T, within=pyo.NonNegativeReals, bounds=htse_bounds, initialize=0) # units kgH2/s
# m.SecM = pyo.Var(m.T, within=pyo.NonNegativeReals, bounds=secm_bounds, initialize=0) # units GW
# m.Stor = pyo.Var(m.T, within=pyo.Reals, bounds=stor_bounds, initialize=0) # units kgH2/s, NOT current amount stored
## manually set constraint bounds
m.HTSE = pyo.Var(m.T, within=pyo.NonNegativeReals, initialize=0) # units kgH2/s
m.SecM = pyo.Var(m.T, within=pyo.NonNegativeReals, initialize=0) # units GW
m.Stor = pyo.Var(m.T, within=pyo.Reals, initialize=0) # units kgH2/s, NOT current amount stored
m.E_dump = pyo.Var(m.T, within=pyo.NonNegativeReals, initialize=0)
m.HTSE_cap = pyo.Constraint(m.T, rule=lambda m, t: m.HTSE[t] <= htse_cap_h2) # kg/s
m.SecM_cap = pyo.Constraint(m.T, rule=lambda m, t: m.SecM[t] <= 1e4) # GW
m.Stor_low = pyo.Constraint(m.T, rule=lambda m, t: m.Stor[t] >= -store_cap/3600) # kg/s
m.Stor_high = pyo.Constraint(m.T, rule=lambda m, t: m.Stor[t] <= store_cap/3600) # kg/s
m.E_dump_cap = pyo.Constraint(m.T, rule=lambda m, t: m.E_dump[t] <= 1e2)
# optimization objective
rule = partial(opt_obj, stack, prices, load, activity['NPP']['electricity'], activity['grid']['electricity'])
m.objective = pyo.Objective(rule=rule, sense=pyo.minimize)
# constraints
## storage bounds, 0 < STORE[t-1] + 3600 * (HTSE[t] - H2_MARKET_CAP) < STORE_CAP
rule = partial(opt_store, store_initial, store_cap)
m.StoreCap = pyo.Constraint(m.T, rule=rule)
## H2 market is satisfied (HTSE_H2 + STORE_H2/3600 == H2_MARKET_CAP)
rule = lambda m, t: m.HTSE[t] + m.Stor[t] + market_cap == 0
m.H2_consv = pyo.Constraint(m.T, rule=rule)
## consv. e-, sinks == sources: E_dump(t) + HTSE_H2(t) * e_per_h2 = NPP_to_h(t) + SecMark(t)
rule = lambda m, t: m.E_dump[t] + m.HTSE[t] * e_per_h2 * 3600 == (npp_cap + activity['grid']['electricity'][t]) + m.SecM[t]
# rule = lambda m, t: m.E_dump[t] + m.HTSE[t] * e_per_h2 * 3600 - activity['grid']['electricity'][t]== npp_cap + m.SecM[t]
m.E_consv = pyo.Constraint(m.T, rule=rule)
#debug_setup_print(activity, m, stack, prices, comp_order, load,
# htse_cap_h2, npp_cap, store_cap, store_initial, market_cap,
# price_h2_kg, price_h2_GW, NPP_bid, H2_opp,info['HERON']['RAVEN_vars']['NPP_bid_adjust'])
soln = pyo.SolverFactory('cbc').solve(m)
if soln.solver.status == SolverStatus.ok and soln.solver.termination_condition == TerminationCondition.optimal:
print('Successful hydrogen optimization solve.')
#debug_pyomo_soln(m)
else:
print('Storage optimization FAILURE!')
print(' ... status:', soln.solver.status)
print(' ... termination:', soln.solver.termination_condition)
m.pprint()
debug_setup_print(activity, m, stack, prices, comp_order, load,
htse_cap_h2, npp_cap, store_cap, store_initial, market_cap,
price_h2_kg, price_h2_GWh, NPP_bid, H2_opp,info['HERON']['RAVEN_vars']['NPP_bid_adjust'])
raise RuntimeError('Failed hydrogen solve!')
# store activity
# NOTE for SecM and E_dump we directly include prices here rather than recalculate them in TEAL,
# since we have the load data now and won't have it (as accessible) then.
for var in m.component_objects(pyo.Var):
data = np.asarray([var[t].value for t in Ts])
if var.name == 'HTSE':
activity['HTSE']['H2'] = data
activity['HTSE']['electricity'] = - data * e_per_h2 * 3600
elif var.name == 'SecM':
new_load = load + data
indices = stack.searchsorted(new_load)
clearing_prices = prices[indices]
activity['Secondary']['electricity'] = - data * clearing_prices # want a cost in the end
elif var.name == 'E_dump':
activity['E_Penalty']['electricity'] = - data * 17000 # $/GW
elif var.name == 'Stor':
levels = store_initial - np.cumsum(data) * 3600
activity['H2_storage']['H2'] = levels
return activity
def opt_obj(stack, prices, loads, npp, grid, m):
""" objective function for pyomo """
# optimization variables -> use of storage, HTSE, secondary market
## costs:
## -> secondary market for HTSE fixups -> SecMark * clearing_price[t]
cost = 0
for t in range(len(npp)):
addl_e = m.SecM[t]
# what clearing price?
## if we didn't change the clearing price, we can buy at price
load = loads[t]
new_load = load # FIXME + addl_e, except searchsorted fails for that!
price_index = np.searchsorted(stack, new_load)
new_price = prices[price_index]
cost += addl_e * new_price * 1.1 # 10% markup for using secondary market
## -> unused NPP -> 17 $/MW = 17000 $/GW for any unused electricity
cost += m.E_dump[t] * 17000
return cost
def opt_store(initial, store_cap, m, t):
# 0 < stored < capacity
lower = 0
# m.Stor > 1 means producing, so we subtract the activity * dt
var = initial - sum(m.Stor[i] for i in range(t+1))*3600
high = store_cap
return pyo.inequality(lower, var, high)
# | |
without path
if not name:
if not hasattr(file, 'name'):
raise click.UsageError('The --file must have a name attribute, or --name must be specified')
name = os.path.basename(file.name)
client = build_client('object_storage', ctx)
total_size = os.fstat(file.fileno()).st_size
if total_size > 0:
upload_manager_kwargs = {}
if disable_parallel_uploads:
upload_manager_kwargs['allow_parallel_uploads'] = False
if parallel_upload_count is not None:
upload_manager_kwargs['parallel_process_count'] = parallel_upload_count
upload_manager = UploadManager(client, **upload_manager_kwargs)
with ProgressBar(total_size, 'Uploading object') as bar:
kwargs['progress_callback'] = bar.update
response = upload_manager.resume_upload_file(namespace, bucket_name, name, file.name, upload_id, **kwargs)
display_headers = filter_object_headers(response.headers, OBJECT_PUT_DISPLAY_HEADERS)
render(None, display_headers, ctx, display_all_headers=True)
@cli_util.copy_params_from_generated_command(objectstorage_cli.restore_objects, params_to_exclude=['namespace_name', 'bucket_name', 'object_name'])
@objectstorage_cli.object_group.command(name='restore', help=objectstorage_cli.restore_objects.help)
@cli_util.option('-ns', '--namespace', '--namespace-name', 'namespace', required=True, help="""The top-level namespace used for the request.""")
@cli_util.option('-bn', '--bucket', '--bucket-name', 'bucket_name', required=True, help="""The name of the bucket. Avoid entering confidential information. Example: `my-new-bucket1`""")
@cli_util.option('--name', required=True, help="""A object which was in an archived state and need to be restored.""")
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@wrap_exceptions
def restore_objects(ctx, **kwargs):
details = {}
namespace = kwargs['namespace']
bucket = kwargs['bucket_name']
name = kwargs['name']
details['objectName'] = name
if kwargs['hours'] is not None:
details['hours'] = kwargs['hours']
client = build_client('object_storage', ctx)
kwargs = {'opc_client_request_id': ctx.obj['request_id']}
result = client.restore_objects(
namespace_name=namespace,
bucket_name=bucket,
restore_objects_details=details,
**kwargs
)
if result.status == 200:
click.echo("This object will be available for download in about 4 hours. Use 'oci os object restore-status -ns {ns} -bn {bn} --name {name}' command to check the status.".format(ns=namespace, bn=bucket, name=name), file=sys.stderr)
else:
render_response(result, ctx)
@objectstorage_cli.object_group.command(name='restore-status')
@cli_util.option('-ns', '--namespace', '--namespace-name', 'namespace', required=True, help='The top-level namespace used for the request.')
@cli_util.option('-bn', '--bucket-name', required=True, help='The name of the bucket.')
@cli_util.option('--name', required=True, help='The name of the object.')
@json_skeleton_utils.get_cli_json_input_option({})
@help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@wrap_exceptions
def restore_status(ctx, from_json, namespace, bucket_name, name):
"""
Gets the restore status for an object.
Example:
oci os object restore-status -ns mynamespace -bn mybucket --name myfile.txt
"""
client = build_client('object_storage', ctx)
response = client.head_object(
namespace,
bucket_name,
name,
opc_client_request_id=ctx.obj['request_id'])
archival_state = response.headers.get('archival-state', None)
if archival_state is None:
msg = "Available, this object is available for download."
elif archival_state.lower() == 'archived':
msg = "Archived, this object is not available for download. Use 'oci os object restore -ns {ns} -bn {bn} --name {name}' command to start restoring the object.".format(ns=namespace, bn=bucket_name, name=name)
elif archival_state.lower() == 'restoring':
msg = "Restoring, this object is being restored and will be available for download in about 4 hours from the time you issued the restore command."
elif archival_state.lower() == 'restored':
try:
# expected format: Literal Z at the end for UTC with milliseconds
time_of_archival = response.headers['time-of-archival']
time_of_archival_dt = arrow.get(time_of_archival, 'YYYY-MM-DDTHH:mm:ss.SSS[Z]')
diff = time_of_archival_dt - arrow.utcnow()
time_left = time_delta(diff.days, diff.seconds)
msg = "Restored. You have {} to download the restored object before it is once again archived.".format(time_left)
except arrow.parser.ParserError:
msg = "Restored. The object will be re-archived at {}.".format(time_of_archival)
else:
msg = "Unknown"
click.echo(msg, file=sys.stderr)
def time_delta(days, remaning_secs_in_day):
hours, seconds = divmod(remaning_secs_in_day, 3600)
minutes, seconds = divmod(seconds, 60)
if days == 0 and hours == 0 and minutes == 0:
return 'less than 1 minute'
days_str = "1 day" if days == 1 else "{} days".format(days)
hours_str = "1 hour" if hours == 1 else "{} hours".format(hours)
minutes_str = "1 min" if minutes == 1 else "{} mins".format(minutes)
return ' '.join([days_str, hours_str, minutes_str])
@click.command(name='multipart', cls=CommandGroupWithAlias)
@help_option_group
def multipart():
pass
@click.command(name='abort')
@cli_util.option('-ns', '--namespace', '--namespace-name', 'namespace', required=True, help='The top-level namespace used for the request.')
@cli_util.option('-bn', '--bucket-name', required=True, help='The name of the bucket.')
@cli_util.option('-on', '--object-name', required=True, help='The name of the object.')
@cli_util.option('--upload-id', required=True, help='Upload ID to abort.')
@cli_util.option('--force', is_flag=True, help='Abort the existing multipart upload without a confirmation prompt.')
@json_skeleton_utils.get_cli_json_input_option({})
@help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@wrap_exceptions
def multipart_abort(ctx, from_json, namespace, bucket_name, object_name, upload_id, force):
"""
Aborts an uncommitted multipart upload
Example:
oci os multipart abort -ns mynamespace -bn mybucket --object-name myfile.txt --upload-id my-upload-id
"""
client = build_client('object_storage', ctx)
if not force:
try:
response = client.list_multipart_upload_parts(namespace, bucket_name, object_name, upload_id, limit=1)
render_response(response, ctx)
if response.status == 200:
if not click.confirm("WARNING: Are you sure you want to permanently remove this incomplete upload?"):
ctx.abort()
except exceptions.ServiceError:
raise
render_response(client.abort_multipart_upload(namespace, bucket_name, object_name, upload_id), ctx)
@cli_util.copy_params_from_generated_command(objectstorage_cli.copy_object, params_to_exclude=['destination_object_name', 'destination_region', 'destination_namespace'])
@objectstorage_cli.object_group.command(name='copy', help=objectstorage_cli.copy_object.help)
@cli_util.option('--destination-region', help="""The destination region object will be copied to.""")
@cli_util.option('--destination-namespace', help="""The destination namespace object will be copied to.""")
@cli_util.option('--destination-object-name', help="""The destination name for the copy object.""")
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler({'destination-object-metadata': {'module': 'object_storage', 'class': 'dict(str, string)'}})
@cli_util.wrap_exceptions
def copy_object(ctx, **kwargs):
if 'source_object_name' in kwargs and ('destination_object_name' not in kwargs or kwargs['destination_object_name'] is None):
kwargs['destination_object_name'] = kwargs['source_object_name']
if 'destination_namespace' not in kwargs or kwargs['destination_namespace'] is None:
client = build_client('object_storage', ctx)
kwargs['destination_namespace'] = client.get_namespace().data
if 'destination_region' not in kwargs or kwargs['destination_region'] is None:
kwargs['destination_region'] = ctx.obj['config']['region']
ctx.invoke(objectstorage_cli.copy_object, **kwargs)
objectstorage_cli.os_root_group.add_command(multipart)
objectstorage_cli.list_multipart_uploads.name = 'list'
get_param(objectstorage_cli.list_multipart_uploads, 'bucket_name').opts.extend(['-bn'])
get_param(objectstorage_cli.list_multipart_uploads, 'namespace_name').opts.extend(['--namespace', '-ns'])
multipart.add_command(objectstorage_cli.list_multipart_uploads)
multipart.add_command(multipart_abort)
# Retrieves a single page of objects, retrying the call if we received a retryable exception. This will return the
# raw response and it is up to the caller to handle pagination etc
def retrying_list_objects_single_page(client, request_id, namespace, bucket_name, prefix, start, end, limit, delimiter, fields):
args = {
'fields': fields,
'opc_client_request_id': request_id,
'limit': limit
}
if delimiter is not None:
args['delimiter'] = delimiter
if prefix is not None:
args['prefix'] = prefix
if start:
args['start'] = start
if end is not None:
args['end'] = end
return _make_retrying_list_call(client, namespace, bucket_name, **args)
# Retrieves multiple pages of objects, retrying each list page call if we received a retryable exception. This will return a list of
# the raw responses we received in the order we received them
#
# This method can retrieve all matching objects or only up to a given limit. The default is only to retrieve up to the given limit
def retrying_list_objects(client, request_id, namespace, bucket_name, prefix, start, end, limit, delimiter, fields, retrieve_all=False):
all_responses = list()
if retrieve_all:
response = retrying_list_objects_single_page(client, request_id, namespace, bucket_name, prefix, start, end, limit, delimiter, fields)
all_responses.append(response)
next_start = response.data.next_start_with
while next_start:
response = retrying_list_objects_single_page(client, request_id, namespace, bucket_name, prefix, next_start, end, limit, delimiter, fields)
all_responses.append(response)
next_start = response.data.next_start_with
else:
next_start = start
while limit > 0:
response = retrying_list_objects_single_page(client, request_id, namespace, bucket_name, prefix, next_start, end, limit, delimiter, fields)
all_responses.append(response)
next_start = response.data.next_start_with
if next_start:
limit -= len(response.data.objects)
else:
limit = 0
return all_responses
# Normalizes the object name path of an object we're going to upload to object storage (e.g. a/b/c/object.txt) so that
# it uses the object storage delimiter character (/)
#
# On Unix filesystems this should be a no-op because the path separator is already the slash but on Windows systems this will replace
# the Windows path separator (\) with /
def normalize_object_name_path_for_object_storage(object_name_path, path_separator=os.sep):
return object_name_path.replace(path_separator, '/')
# Calls list_object with retries:
#
# - Max of 3 attempts
# - Exponential back off of (2 ^ retries) seconds
# - Random jitter between retries of 0-2 seconds
# - Retry on timeouts, connection errors, internal server errors and throttles
@retry(stop_max_attempt_number=3, wait_exponential_multiplier=1000, wait_exponential_max=10000, wait_jitter_max=2000,
retry_on_exception=retry_utils.retry_on_timeouts_connection_internal_server_and_throttles)
def _make_retrying_list_call(client, namespace, bucket_name, **kwargs):
return client.list_objects(
namespace,
bucket_name,
**kwargs
)
# HEADs an object to retrieve its metadata. This has the following retry conditions:
#
# - Max of 3 attempts
# - Exponential back off of (2 ^ retries) seconds
# - Random jitter between retries of 0-2 seconds
# - Retry on timeouts, connection errors, internal server errors and throttles
#
# 404s are not retried but they will also not result in the exception bubbling up. Instead, they will result in None
# being returned
@retry(stop_max_attempt_number=3, wait_exponential_multiplier=1000, wait_exponential_max=10000, wait_jitter_max=2000,
retry_on_exception=retry_utils.retry_on_timeouts_connection_internal_server_and_throttles)
def _make_retrying_head_object_call(client, namespace, bucket_name, name, client_request_id):
kwargs = {'opc_client_request_id': client_request_id}
try:
return client.head_object(namespace, bucket_name, name, **kwargs)
except exceptions.ServiceError as e:
if e.status == 404:
return None
else:
raise
def _get_progress_bar_label(original_label, object_name, prefix='Processing'):
if original_label:
formatted_progress_bar_label = original_label
else:
# If the names are too long then we can end up with multiple progress bars since we overflow a single line. To prevent
# this, make sure that the label won't consume more than half the terminal width
terminal_width = click.termui.get_terminal_size()[0] / 2
remaining_width = terminal_width - (len(prefix) + 1)
if len(object_name) > remaining_width:
object_name_to_use = object_name[(object_name.rfind('/') + 1):]
object_name_length = len(object_name_to_use)
if object_name_length > remaining_width or object_name_length == 0:
object_name_to_use = 'item'
else:
object_name_to_use = object_name
formatted_progress_bar_label = '{} {}'.format(prefix, object_name_to_use)
return formatted_progress_bar_label
def _success_upload_callback_add_item_to_dict(**kwargs):
kwargs['target_dict'].update({kwargs['target_dict_key']: filter_object_headers(kwargs.get('work_pool_task_result').headers, OBJECT_PUT_DISPLAY_HEADERS)})
def _error_callback_add_item_to_dict(**kwargs):
kwargs['target_dict'].update({kwargs['target_dict_key']: str(kwargs.get('callback_exception'))})
def _print_to_console(**kwargs):
click.echo(kwargs['message'], file=sys.stderr)
def _get_file_filter_collection(base_directory, include, exclude, object_prefix):
file_filter_collection = None
if include:
file_filter_collection = SingleTypeFileFilterCollection(base_directory, BaseFileFilterCollection.INCLUDE)
for i in include:
# If objects have a prefix with a path separator, we're going to transform that into part of the path
# so a caller's --include and --exclude filters may not take that into account. Instead, try and do that
# | |
<reponame>armingeiser/KratosSalomePlugin<gh_stars>1-10
# _ __ _ ___ _ ___ _ _
# | |/ /_ _ __ _| |_ ___ __/ __| __ _| |___ _ __ ___| _ \ |_ _ __ _(_)_ _
# | ' <| '_/ _` | _/ _ (_-<__ \/ _` | / _ \ ' \/ -_) _/ | || / _` | | ' \
# |_|\_\_| \__,_|\__\___/__/___/\__,_|_\___/_|_|_\___|_| |_|\_,_\__, |_|_||_|
# |___/
# License: BSD License ; see LICENSE
#
# Main authors: <NAME> (https://github.com/philbucher)
#
# this file contains helpers used in the tests
# set up testing environment (before anything else)
import initialize_testing_environment
# python imports
from pathlib import Path
import unittest
import os
from sys import version_info as py_version_info
from shutil import rmtree
# plugin imports
from kratos_salome_plugin import IsExecutedInSalome
from kratos_salome_plugin.salome_study_utilities import ResetStudy, GetNumberOfObjectsInStudy
# salome imports
import salome
# importing important modules
# not sure if the order is important, but this is how it is done in the dumped studies
import GEOM
from salome.geom import geomBuilder
import SMESH
from salome.smesh import smeshBuilder
def GetTestsPath() -> Path:
"""path to the "tests" folder"""
return Path(__file__).parent.absolute()
def GetTestsDir():
""" !!! DEPRECATED !!! """
return os.path.dirname(os.path.realpath(__file__))
def CheckIfKratosAvailable():
if "KRATOS_AVAILABLE" in os.environ:
# this is intended to be used in the CI
# there "try-except" might lead to an undiscovered failure
return (os.environ["KRATOS_AVAILABLE"] == "1")
else:
try:
import KratosMultiphysics
return True
except:
return False
def CheckIfApplicationsAvailable(*application_names):
raise Exception("This function is untested!")
if not CheckIfKratosAvailable():
return False
from KratosMultiphysics.kratos_utilities import CheckIfApplicationsAvailable
return CheckIfApplicationsAvailable(application_names)
def DeleteFileIfExisting(file_path: Path) -> None:
"""Delete a file if it exists"""
if file_path.is_file():
os.remove(str(file_path))
def DeleteDirectoryIfExisting(directory_path: Path) -> None:
"""Delete a directory if it exists"""
if directory_path.is_dir():
rmtree(directory_path)
def skipUnlessPythonVersionIsAtLeast(min_python_version, reason):
'''Skips the test if the test requires a newer version of Python
Note that this should only be used for functionalities that are used inside
of Salome, otherwise the minimum python version of the plugin is increased
'''
reason_for_skip = 'This test requires at least Python version {}, the current version is: ({},{},{}). Reason: {}'.format(min_python_version, py_version_info[0], py_version_info[1], py_version_info[2], reason)
return unittest.skipIf(min_python_version > py_version_info, reason_for_skip)
def CreateHDFStudyFile(file_name: str, *ignored_args) -> bool:
"""aux function for mocking salome.myStudy.SaveAs
it ignores arguments for multifile and mode (ascii or binary)
TODO do a type check on the "file_name"? => salome seems to only work with "str"
"""
if not file_name.endswith(".hdf"):
file_name+=".hdf"
with open(file_name, "w") as hdf_file:
hdf_file.write("This is a mocked hdf study file created during testing\n")
hdf_file.write("It should be deleted after testing\n")
return True
@unittest.skipUnless(initialize_testing_environment.PYQT_AVAILABLE, "Qt is not available")
class QtTestCase(unittest.TestCase): pass
@unittest.skipUnless(IsExecutedInSalome(), "This test can only be executed in Salome")
class SalomeTestCase(unittest.TestCase):
def setUp(self):
# initializing salome also creates a study.
# clearing the study in order to have a clean study for each test.
# This is much faster than re-launching salome for each test
self.study = salome.myStudy
ResetStudy()
self.assertEqual(GetNumberOfObjectsInStudy(), 0, msg="Resetting the study failed!")
self.geompy = geomBuilder.New()
self.smesh = smeshBuilder.New()
class SalomeTestCaseWithBox(SalomeTestCase):
# a test case that has a simple box with a tetra and hexa mesh as setup
def setUp(self):
super().setUp()
# creating geometry
O = self.geompy.MakeVertex(0, 0, 0)
OX = self.geompy.MakeVectorDXDYDZ(1, 0, 0)
OY = self.geompy.MakeVectorDXDYDZ(0, 1, 0)
OZ = self.geompy.MakeVectorDXDYDZ(0, 0, 1)
self.box = self.geompy.MakeBoxDXDYDZ(200, 200, 200)
[self.face_1, self.face_2] = self.geompy.SubShapes(self.box, [13, 23])
[self.edge_1, self.edge_2] = self.geompy.SubShapes(self.box, [18, 26])
self.group_faces = self.geompy.CreateGroup(self.box, self.geompy.ShapeType["FACE"])
self.geompy.UnionIDs(self.group_faces, [33, 31])
self.group_edges = self.geompy.CreateGroup(self.box, self.geompy.ShapeType["EDGE"])
self.geompy.UnionIDs(self.group_edges, [25, 12, 29, 22])
self.name_main_box = 'main_box'
self.geompy.addToStudy(self.box, self.name_main_box)
# creating mesh
self.mesh_tetra = self.smesh.Mesh(self.box)
Regular_1D = self.mesh_tetra.Segment()
Max_Size_1 = Regular_1D.MaxSize(60)
MEFISTO_2D = self.mesh_tetra.Triangle(algo=smeshBuilder.MEFISTO)
NETGEN_3D = self.mesh_tetra.Tetrahedron()
isDone = self.mesh_tetra.Compute()
self.assertTrue(isDone, msg="Tetra mesh could not be computed!")
self.name_main_mesh_tetra = 'main_mesh_tetra'
self.smesh.SetName(self.mesh_tetra.GetMesh(), self.name_main_mesh_tetra)
self.mesh_hexa = self.smesh.Mesh(self.box)
Regular_1D_1 = self.mesh_hexa.Segment()
Number_of_Segments_1 = Regular_1D_1.NumberOfSegments(8)
Quadrangle_2D = self.mesh_hexa.Quadrangle(algo=smeshBuilder.QUADRANGLE)
Hexa_3D = self.mesh_hexa.Hexahedron(algo=smeshBuilder.Hexa)
isDone = self.mesh_hexa.Compute()
self.assertTrue(isDone, msg="Hexa mesh could not be computed!")
self.name_main_mesh_hexa = 'main_mesh_hexa'
self.smesh.SetName(self.mesh_hexa.GetMesh(), self.name_main_mesh_hexa)
# adding 0D Elements
for i in range(10):
self.mesh_tetra.Add0DElement( i+1 )
self.group_tetra_0D_elements = self.mesh_tetra.CreateEmptyGroup(SMESH.ELEM0D, "subset_0D_elements") # type "SMESH._objref_SMESH_Group"
self.group_tetra_0D_elements.AddFrom(self.mesh_tetra.GetMesh())
for i in range(4):
self.mesh_tetra.Add0DElement( i+15 ) # those are only in the main-mesh
# adding Ball Elements
for i in range(6):
self.mesh_hexa.AddBall(i+1, i*6+1)
self.group_hexa_ball_elements = self.mesh_hexa.CreateEmptyGroup(SMESH.BALL, "subset_ball_elements") # type "SMESH._objref_SMESH_Group"
self.group_hexa_ball_elements.AddFrom(self.mesh_hexa.GetMesh())
for i in range(11):
self.mesh_hexa.AddBall(i+15, i+2) # those are only in the main-mesh
# creating more mesh groups
self.group_tetra_f1_nodes = self.mesh_tetra.GroupOnGeom(self.face_1,'face_1_nodes',SMESH.NODE) # type "SMESH._objref_SMESH_GroupOnGeom"
self.group_tetra_f1_faces = self.mesh_tetra.GroupOnGeom(self.face_1,'face_1_faces',SMESH.FACE) # type "SMESH._objref_SMESH_GroupOnGeom"
criteria = [self.smesh.GetCriterion(SMESH.EDGE, SMESH.FT_Length, SMESH.FT_LessThan, 150)]
filter_1 = self.smesh.GetFilterFromCriteria(criteria)
filter_1.SetMesh(self.mesh_hexa.GetMesh())
self.group_hexa_edges = self.mesh_hexa.GroupOnFilter( SMESH.EDGE, 'group_edges', filter_1) # type "SMESH._objref_SMESH_GroupOnFilter"
# using random names since they are not used so far
self.sub_mesh_tetra_f_1 = self.mesh_tetra.GetSubMesh( self.face_1, 'Sub-mesh_1' )
self.sub_mesh_tetra_f_2 = self.mesh_tetra.GetSubMesh( self.face_2, 'Sub-mesh_2' )
self.sub_mesh_tetra_e_1 = self.mesh_tetra.GetSubMesh( self.edge_1, 'Sub-mesh_3' )
self.sub_mesh_tetra_e_2 = self.mesh_tetra.GetSubMesh( self.edge_2, 'Sub-mesh_4' )
self.sub_mesh_tetra_g_1 = self.mesh_tetra.GetSubMesh( self.group_faces, 'Sub-mesh_5' )
self.sub_mesh_tetra_g_2 = self.mesh_tetra.GetSubMesh( self.group_edges, 'Sub-mesh_6' )
self.sub_mesh_hexa_f_1 = self.mesh_hexa.GetSubMesh( self.face_1, 'Sub-mesh_7' )
self.sub_mesh_hexa_f_2 = self.mesh_hexa.GetSubMesh( self.face_2, 'Sub-mesh_8' )
self.sub_mesh_hexa_e_1 = self.mesh_hexa.GetSubMesh( self.edge_1, 'Sub-mesh_9' )
self.sub_mesh_hexa_e_2 = self.mesh_hexa.GetSubMesh( self.edge_2, 'Sub-mesh_10' )
self.sub_mesh_hexa_g_1 = self.mesh_hexa.GetSubMesh( self.group_faces, 'Sub-mesh_11' )
self.name_mesh_group = "name_mesh_group"
self.sub_mesh_hexa_g_2 = self.mesh_hexa.GetSubMesh( self.group_edges, self.name_mesh_group )
class SalomeTestCaseCantilever2D(SalomeTestCase):
# a test case that has a simple 2D cantilever
def setUp(self):
super().setUp()
debug = False
# creating geometry
self.O = self.geompy.MakeVertex(0, 0, 0)
self.OX = self.geompy.MakeVectorDXDYDZ(1, 0, 0)
self.OY = self.geompy.MakeVectorDXDYDZ(0, 1, 0)
self.OZ = self.geompy.MakeVectorDXDYDZ(0, 0, 1)
self.Vertex_1 = self.geompy.MakeVertex(0, 0, 0)
self.Vertex_2 = self.geompy.MakeVertex(5, 0, 0)
self.Vertex_3 = self.geompy.MakeVertex(5, 1, 0)
self.Vertex_4 = self.geompy.MakeVertex(0, 1, 0)
self.Line_1 = self.geompy.MakeLineTwoPnt(self.Vertex_1, self.Vertex_2)
self.Line_2 = self.geompy.MakeLineTwoPnt(self.Vertex_2, self.Vertex_3)
self.Line_3 = self.geompy.MakeLineTwoPnt(self.Vertex_3, self.Vertex_4)
self.Line_4 = self.geompy.MakeLineTwoPnt(self.Vertex_4, self.Vertex_1)
self.Face_1 = self.geompy.MakeFaceWires([self.Line_1, self.Line_2, self.Line_3, self.Line_4], 1)
[self.Neumann,self.Dirichlet] = self.geompy.SubShapes(self.Face_1, [6, 10])
# publish geometry ( only in debug)
if debug:
self.geompy.addToStudy( self.O, 'O' )
self.geompy.addToStudy( self.OX, 'OX' )
self.geompy.addToStudy( self.OY, 'OY' )
self.geompy.addToStudy( self.OZ, 'OZ' )
self.geompy.addToStudy( self.Vertex_1, 'Vertex_1' )
self.geompy.addToStudy( self.Vertex_2, 'Vertex_2' )
self.geompy.addToStudy( self.Vertex_3, 'Vertex_3' )
self.geompy.addToStudy( self.Vertex_4, 'Vertex_4' )
self.geompy.addToStudy( self.Line_1, 'Line_1' )
self.geompy.addToStudy( self.Line_2, 'Line_2' )
self.geompy.addToStudy( self.Line_3, 'Line_3' )
self.geompy.addToStudy( self.Line_4, 'Line_4' )
self.geompy.addToStudy( self.Face_1, 'domain' )
self.geompy.addToStudyInFather( self.Face_1, self.Neumann, 'Neumann' )
self.geompy.addToStudyInFather( self.Face_1, self.Dirichlet, 'Dirichlet' )
# creating mesh
self.smeshObj_1 = self.smesh.CreateHypothesis('MaxLength')
self.smeshObj_2 = self.smesh.CreateHypothesis('NumberOfSegments')
self.domain_mesh = self.smesh.Mesh(self.Face_1)
self.Regular_1D = self.domain_mesh.Segment()
self.Local_Length_1 = self.Regular_1D.LocalLength(1,None,1e-07)
self.Quadrangle_2D = self.domain_mesh.Quadrangle(algo=smeshBuilder.QUADRANGLE)
self.Local_Length_1.SetLength( 0.2 )
self.Local_Length_1.SetPrecision( 1e-07 )
isDone = self.domain_mesh.Compute()
self.assertTrue(isDone, msg="Mesh could not be computed!")
self.neumann_mesh = self.domain_mesh.GetSubMesh( self.Neumann, 'neumann' )
self.dirichlet_mesh = self.domain_mesh.GetSubMesh( self.Dirichlet, 'dirichlet' )
if debug:
self.smesh.SetName(self.Regular_1D.GetAlgorithm(), 'Regular_1D')
self.smesh.SetName(self.Quadrangle_2D.GetAlgorithm(), 'Quadrangle_2D')
self.smesh.SetName(self.Local_Length_1, 'Local Length_1')
self.smesh.SetName(self.domain_mesh.GetMesh(), 'domain_mesh')
self.smesh.SetName(self.dirichlet_mesh, 'dirichlet')
self.smesh.SetName(self.neumann_mesh, 'neumann')
salome.myStudy.SaveAs("SalomeTestCaseCantilever2D.hdf", False, False) # args: use_multifile, use_acsii
def CompareMdpaWithReferenceFile(mdpa_file_name, test_case):
"""This function compares two mdpa files"""
def GetFileLines(ref_mdpa_file, other_mdpa_file):
"""This function reads the reference and the output file
It returns the lines read from both files and also compares
if they contain the same numer of lines
"""
# check if files are valid
err_msg = 'The specified reference file name "'
err_msg += ref_mdpa_file
err_msg += '" is not valid!'
test_case.assertTrue(os.path.isfile(ref_mdpa_file), msg=err_msg)
err_msg = 'The specified output file name "'
err_msg += other_mdpa_file
err_msg += '" is not valid!'
test_case.assertTrue(os.path.isfile(other_mdpa_file), msg=err_msg)
# "readlines" adds a newline at the end of the line,
# which will be removed with rstrip afterwards
with open(ref_mdpa_file,'r') as ref_file:
lines_ref = ref_file.readlines()
with open(other_mdpa_file,'r') as out_file:
lines_out = out_file.readlines()
# removing trailing newline AND whitespaces (beginning & end) than can mess with the comparison
# furthermore convert tabs to spaces
lines_ref = [line.rstrip().lstrip().replace("\t", " ") for line in lines_ref]
lines_out = [line.rstrip().lstrip().replace("\t", " ") for line in lines_out]
num_lines_ref = len(lines_ref)
num_lines_out = len(lines_out)
err_msg = "Files have different number of lines!"
err_msg += "\nNum Lines Reference File: " + str(num_lines_ref)
err_msg += "\nNum Lines Other File: " + str(num_lines_out)
test_case.assertEqual(num_lines_ref, num_lines_out, msg=err_msg)
return lines_ref, lines_out
def CompareNodes(lines_ref, lines_out, line_index):
line_index += 1 # skip the "Begin" line
while not lines_ref[line_index].split(" ")[0] == "End":
line_ref_splitted = lines_ref[line_index].split(" ")
line_out_splitted = lines_out[line_index].split(" ")
test_case.assertEqual(len(line_ref_splitted), len(line_out_splitted), msg="Line {}: Node format is not correct!".format(line_index+1))
# compare node Id
test_case.assertEqual(int(line_ref_splitted[0]), int(line_out_splitted[0]), msg="Line {}: Node Ids do not match!".format(line_index+1))
# compare node coordinates
for i in range(1,4):
ref_coord = float(line_ref_splitted[i])
out_coord = float(line_out_splitted[i])
test_case.assertAlmostEqual(ref_coord, out_coord, msg="Line {}: Node Coordinates do not match!".format(line_index+1))
line_index += 1
return line_index+1
def CompareGeometricalObjects(lines_ref, lines_out, line_index):
# compare entity types (Elements or Conditions)
| |
Returns
-------
list
List of envs for the given context.
"""
# First, get the context in data
contextData = self._data.get(context, None)
if contextData is None:
return []
# Then, get the env in the precedently queried context dict.
envData = contextData.get('envs', None)
if envData is None:
return []
return envData.keys()
def getBlueprints(self, context, env, forceReload=False):
"""Get the blueprint object for the given context and env.
Try to retrieve the blueprints for the specified env in the specified context. If there is already a blueprints,
don't re-instanciate them if forceReload is `False`.
Parameters
----------
context: str
Context from which retrieve the blueprint in the given env.
env: str
Env from which get the blueprint object.
forceReload: bool
Define if the function should reload its blueprints or not.
Returns
-------
dict
Dict containing all blueprint objects for the given context env.
"""
assert context in self._contexts, '"{0}" Are not registered yet in this Register'.format(context)
self._blueprints = []
self._context = context
# Get the dict for the specified context in self._data
contextData = self._data.get(context, None)
if contextData is None:
return {}
# Get the dict for all envs in self._data[context]
envsData = contextData.get('envs', None)
if envsData is None:
return {}
# Get the dict for the specified env in self._data[context]['envs']
envData = envsData.get(env, None)
if envData is None:
return {}
self._env = env
# Get the blueprint in self._data[context]['envs'][env]. If one is found, return it. #TODO: It seems there is an error
blueprints = envData.get('blueprints', None)
if blueprints is not None and not forceReload: # If not forceReload, return the existing blueprints. #FIXME: self._blueprints is empty outside dev
return blueprints['objects']
# Get the env module to retrieve the blueprint from.
envModule = envData.get('module', None)
if envModule is None:
# Get the string path to the env package in self._data[context]['envs'][env]['import']
envStr = envData.get('import', None)
if envStr is None:
return {}
# Load the env module from the string path stored.
envModule = AtUtils.importFromStr('{}.{}'.format(envStr, envData), verbose=self.verbose)
if envModule is None:
return {}
envData['module'] = envModule
# If force reload are enabled, this will reload the env module.
if forceReload:
reload(envModule)
# Try to access the `blueprints` variable in the env module
blueprints = getattr(envModule, 'register', {})
ID.flush()
# Generate a blueprint object for each process retrieved in the `blueprint` variable of the env module.
self._blueprints = blueprintObjects = []
for i in range(len(blueprints)):
blueprintObjects.append(Blueprint(blueprint=blueprints[i], verbose=self.verbose))
# Default resolve for blueprints if available in batch, call the `resolveLinks` method from blueprints to change the targets functions.
batchLinkResolveBlueprints = [blueprintObject if blueprintObject._inBatch else None for blueprintObject in blueprintObjects]
for blueprint in blueprintObjects:
blueprint.resolveLinks(batchLinkResolveBlueprints, check=Link.CHECK, fix=Link.FIX, tool=Link.TOOL)
# Finally store blueprints in the env dict in data.
envData['blueprints'] = {
'data': blueprints,
'objects': blueprintObjects,
}
return self._blueprints
def reloadBlueprintsModules(self):
"""Reload the Blueprints's source modules to reload the Processes in it
Should better be called in dev mode to simplify devellopment and test of a new Process.
Returns
-------
list(module, ...)
Lis of all reloaded modules.
"""
modules = list(set([blueprint._module for blueprint in self._blueprints]))
for module in modules:
reload(module)
return modules
def getData(self, data):
"""Get a specific data in the register current context and env.
Parameters
----------
data: str
The key of the data to get in the register at [self._context]['envs'][self._env]
Returns
-------
type or NoneType
Data queried if exist, else NoneType.
"""
if not self._context or not self._env:
return None
return self._data[self._context]['envs'][self._env].get(data, None)
def setData(self, key, data):
"""Set the current data at the given key of the register's current env dict.
Parameters
----------
key: type (immutable)
The key for which to add the data in the register current context and env dict.
data: type
The data to store in the register's current env dict of the current context.
Returns
-------
Register
Return the instance of the object to make object fluent.
"""
self._data[self._context]['envs'][self._env][key] = data
return self
def setVerbose(self, value):
"""Set the Verbose state.
Parameters
----------
value: bool
True or False to enable or disable the verbose
Returns
-------
Register
Return the instance of the object to make object fluent.
"""
self.verbose = bool(value)
return self
def getContextIcon(self, context):
"""Get the icon for the given context
Returns
-------
str
Return the icon of the queried context.
"""
return self._packages.get(context, {}).get('icon', None)
def getEnvIcon(self, context, env):
"""Get the icon for the given env of the given context
Returns
-------
str
Return the icon of the queried env of the given context.
"""
return self._data[context]['envs'][env].get('icon', None)
class Blueprint(object):
"""This object will manage a single process instance to be used through an ui.
The blueprint will init all informations it need to wrap a process like the methods that have been overrided,
if it can run a check, a fix, if it has a ui, its name, docstring and a lot more.
"""
def __init__(self, blueprint, verbose=False):
"""Get the software and setup data.
Parameters
-----------
blueprint: dict
Dict containing the process string and the object (optional).
verbose: bool
Define if the function should log informations about its process. (default: False)
"""
self.verbose = verbose
self.blueprint = blueprint
self.processStr = blueprint.get('process', None)
self.category = blueprint.get('category', 'Other')
initArgs, initKwargs = self.getArguments('__init__')
self._module = None
self._process = self.getProcess(initArgs, initKwargs)
self._links = {AtConstants.CHECK: [], AtConstants.FIX: [], AtConstants.TOOL: []}
self._options = blueprint.get('options', {})
self._name = AtUtils.camelCaseSplit(self._process._name)
self._docstring = self.createDocstring()
self._check = None
self._fix = None
self._tool = None
self._isEnabled = True
self._isCheckable = False
self._isFixable = False
self._hasTool = False
self._inUi = True
self._inBatch = True
self._isNonBlocking = False
# setupCore will automatically retrieve the method needed to execute the process.
# And also the base variable necessary to define if theses methods are available.
self.setupCore()
self.setupTags()
def __repr__(self):
"""Return the representation of the object."""
return "<{0} '{1}' object at {2}'>".format(self.__class__.__name__, self._process.__class__.__name__, hex(id(self)))
@property
def options(self):
"""Get the Blueprint's options"""
return self._options
@property
def name(self):
"""Get the Blueprint's name"""
return self._name
@property
def docstring(self):
"""Get the Blueprint's docstring"""
return self._docstring
@property
def isEnabled(self):
"""Get the Blueprint's enabled state"""
return self._isEnabled
@property
def isCheckable(self):
"""Get the Blueprint's checkable state"""
return self._isCheckable
@property
def isFixiable(self):
"""Get the Blueprint's fixable state"""
return self._isFixiable
@property
def hasTool(self):
"""Get if the Blueprint's have a tool"""
return self._hasTool
@property
def inUi(self):
"""Get if the Blueprint should be run in ui"""
return self._inUi
@property
def inBatch(self):
"""Get if the Blueprint should be run in batch"""
return self._inBatch
@property
def isNonBlocking(self):
"""Get the Blueprint's non blocking state"""
return self._isNonBlocking
def check(self, links=True):
"""This is a wrapper for the process check that will automatically execute it with the right parameters.
Parameters
----------
links: bool
Should the wrapper launch the connected links or not.
Returns
-------
type
The check feedback.
bool
True if the check have any feedback, False otherwise.
"""
if self._check is None:
return None, None
args, kwargs = self.getArguments(AtConstants.CHECK)
returnValue = self._check(*args, **kwargs) #TODO: Not used !!
result = self.filterResult(self._process._feedback)
if links:
self.runLinks(AtConstants.CHECK)
return result, bool(result)
def fix(self, links=True):
"""This is a wrapper for the process fix that will automatically execute it with the right parameters.
Parameters
----------
links: bool
Should the wrapper launch the connected links or not.
Returns
-------
type
The value returned by the fix.
"""
if self._fix is None:
return None
args, kwargs = self.getArguments(AtConstants.FIX)
returnValue = self._fix(*args, **kwargs)
if links:
self.runLinks(AtConstants.FIX)
return returnValue
def tool(self, links=True):
"""This is a wrapper for the process tool that will automatically execute it with the right parameters.
Parameters
----------
links: bool
Should the wrapper launch the connected links or not.
Returns
-------
type
The value returned by the tool method.
"""
if self._tool is None:
return
args, kwargs = self.getArguments(AtConstants.TOOL)
result = self._tool(*args, **kwargs)
if links:
self.runLinks(AtConstants.TOOL)
return result
def runLinks(self, which):
links = self._links[which]
for link in links:
link()
def getArguments(self, method):
"""Retrieve arguments for the given method of | |
""""
STRIP Scanning Strategy Tools test module.
"""
import unittest
import healpy as hp
import numpy as np
from ScanningTools import ScanningTools as st
from astropy.time import Time
from astropy.coordinates import SkyCoord, AltAz
from ScanningTools.Quaternions import Quaternion as q
angles = np.array([[-10, 45, 59],
[30, 35, 15],
[-180, 25, 20],
[3, 4, 5]])
hours = np.array([[23, 59, 16],
[7, 56, 59]])
t = np.array([1.546585, -0.56, 0.3333333333333333333, -1.001])
### INSTRUMENT CHARACTERISTICS ###
pointing_accuracy = np.array([0, 0, 25]) #deg (arcsec)
###
### LOCATION INFORMATION ###
LAT = np.array([28, 16, 24]) #deg
LONG = np.array([-16, 38, 32]) #deg
Height = 2400 #m
loc = st.get_location(LAT, LONG, Height)
###
### TIME INFORMATION ###
LCT_start = (0, 0, 0) #h, m, s
LCD_start = (1, 1, 2015) #g, m, y
UTC, DST = (0 , 0) #h
###
### ENGINE ROTATION INFORMATION ###
zenith_distance = 31 #deg
polarization_angle = 60 #deg
###
class TestScanningTools(unittest.TestCase):
def test_period2sec(self):
one_sidereal_year = st.period2sec(years=1, days=0, hours=0, min=0, sec=0, sidereal=True)
one_solar_year = st.period2sec(years=1, days=0, hours=0, min=0, sec=0)
one_sidereal_day = st.period2sec(years=0, days=1, hours=0, min=0, sec=0, sidereal=True)
one_solar_day = st.period2sec(years=0, days=1, hours=0, min=0, sec=0)
period_0 = st.period2sec(years=1, days=1, hours=0, min=0, sec=0, sidereal=True)
period_1 = st.period2sec(years=5, days=30, hours=0, min=0, sec=0, sidereal=True)
period_2 = st.period2sec(years=2, days=17, hours=0, min=0, sec=0, sidereal=True)
period_3 = st.period2sec(years=10, days=21, hours=15, min=3, sec=25, sidereal=True)
self.assertEqual(one_sidereal_year, 31558145)
self.assertEqual(one_solar_year, 31536000)
self.assertEqual(one_sidereal_day, 86164)
self.assertEqual(one_solar_day, 86400)
self.assertEqual(period_0, 31644309)
self.assertEqual(period_1, 160375649)
self.assertEqual(period_2, 64581080)
self.assertEqual(period_3, 317445103)
def test_sex2dec(self):
ang0 = st.sex2dec(angles)
ang1 = st.sex2dec(angles[0], radians=True)
self.assertTrue(np.allclose(ang0, np.array([-10.76638889, 30.587500, -180.422222,
3.06805556])))
self.assertEqual(ang1, np.radians(ang0[0]))
def test_dec2sex(self):
t0 = st.dec2sex(t)
t00 = st.dec2sex(t[0])
self.assertTrue(np.allclose(t0, np.array([[1, 32, 47.706], [-0, 33, 36], [0, 20, 0],
[-1, 0, 3.6]])))
self.assertTrue(np.allclose(t00, np.array([1, 32, 47.706])))
def test_degrees2hours(self):
ang0 = st.degrees2hours(angles)
ang1 = st.degrees2hours(angles[2], decimal=True)
self.assertTrue(np.allclose(ang0, st.dec2sex(st.sex2dec(angles) / 15)))
self.assertTrue(np.allclose(ang1, st.sex2dec(angles)[2] / 15))
def test_hours2degrees(self):
ang0 = st.hours2degrees(hours[1])
ang1 = st.hours2degrees(hours, decimal=True)
self.assertTrue(np.allclose(ang0, st.dec2sex(st.sex2dec(hours[1]) * 15)))
self.assertTrue(np.allclose(ang1, st.sex2dec(hours) * 15))
def test_LocalCivilTime2JulianDay(self):
"Integrated Test: it includes also the LCT2GCD and GCD2JD function conversion"
Jul_1_2013 = st.LocalCivilTime2JulianDay((3, 37, 0), (1, 7, 2013), UTC=4, DST=1)
Jun_19_2009 = st.LocalCivilTime2JulianDay((18, 0, 0), (19, 6, 2009), UTC=0, DST=0)
self.assertTrue(np.allclose(Jul_1_2013, 2456474.442))
self.assertTrue(np.allclose(Jun_19_2009, 2455002.25))
t = Time(['2015-1-1 00:00:10', '2018-1-3 5:15:24.3', '1980-4-22 19:30:2']).jd
T = np.array([st.LocalCivilTime2JulianDay((0, 0, 10), (1, 1, 2015), UTC=0, DST=0),
st.LocalCivilTime2JulianDay((5, 15, 24.3), (3, 1, 2018), UTC=0, DST=0),
st.LocalCivilTime2JulianDay((19, 30, 2), (22, 4, 1980), UTC=0, DST=0)])
self.assertTrue(np.allclose(t, T))
def test_LocalCivilTime2LocalSiderealTime(self):
LONG = st.dec2sex(0.1)
Jun_19_2009 = st.LocalCivilTime2LocalSiderealTime((18, 0, 0),
(19, 6, 2009),
LONG, UTC=0, DST=0)
self.assertTrue(np.allclose(Jun_19_2009, np.array([11, 52, 46.843])))
def test_get_nside_eff(self):
fwhm_beam0 = np.array([0, 5, 0]) #deg (arcmin)
fwhm_beam1 = np.array([0, 21, 0]) #deg (arcmin)
fwhm_beam2 = np.array([0, 32, 0]) #deg (arcmin)
self.assertEqual(st.get_nside_eff(fwhm_beam0), 1024)
self.assertEqual(st.get_nside_eff(fwhm_beam1), 256)
self.assertEqual(st.get_nside_eff(fwhm_beam2), 128)
def test_get_full_fp(self):
def general_test(x_fp, i, j):
self.assertTrue(np.allclose(x_fp[i, 0], x_fp[j, 0]))
self.assertTrue(np.allclose(x_fp[i, 1], -x_fp[j, 1]))
self.assertTrue(np.allclose(x_fp[i, 2], x_fp[j, 2]))
x_fp, n_horns = st.get_full_fp('./ScanningTools/fp_data/fp_theta.txt',
'./ScanningTools/fp_data/fp_phi.txt')
self.assertTrue(np.allclose(np.sum(x_fp**2, axis=1), 1))
self.assertEqual(n_horns, 49)
general_test(x_fp, 7, 42)
general_test(x_fp, 8, 47)
general_test(x_fp, 9, 46)
general_test(x_fp, 10, 45)
general_test(x_fp, 11, 44)
general_test(x_fp, 12, 43)
general_test(x_fp, 13, 48)
general_test(x_fp, 14, 35)
general_test(x_fp, 15, 40)
general_test(x_fp, 16, 39)
general_test(x_fp, 17, 38)
general_test(x_fp, 18, 37)
general_test(x_fp, 19, 36)
general_test(x_fp, 20, 41)
general_test(x_fp, 21, 28)
general_test(x_fp, 22, 33)
general_test(x_fp, 23, 32)
general_test(x_fp, 24, 31)
general_test(x_fp, 25, 30)
general_test(x_fp, 26, 29)
general_test(x_fp, 27, 34)
def get_full_fp_polarization_angles(self):
def general_test(x_fp, i, j):
self.assertTrue(np.allclose(x_fp[i, 0], x_fp[j, 0]))
self.assertTrue(np.allclose(x_fp[i, 1], -x_fp[j, 1]))
self.assertTrue(np.allclose(x_fp[i, 2], x_fp[j, 2]))
full_psi, polarization_versor = st.get_full_fp_polarization_angles(
'./ScanningTools/fp_data/fp_psi.txt')
self.assertTrue(np.allclose(np.sum(polarization_versor**2, axis=1), 1))
self.assertEqual(len(full_psi), 49)
self.assertEqual(len(polarization_versor), 49)
general_test(polarization_versor, 7, 42)
general_test(polarization_versor, 8, 47)
general_test(polarization_versor, 9, 46)
general_test(polarization_versor, 10, 45)
general_test(polarization_versor, 11, 44)
general_test(polarization_versor, 12, 43)
general_test(polarization_versor, 13, 48)
general_test(polarization_versor, 14, 35)
general_test(polarization_versor, 15, 40)
general_test(polarization_versor, 16, 39)
general_test(polarization_versor, 17, 38)
general_test(polarization_versor, 18, 37)
general_test(polarization_versor, 19, 36)
general_test(polarization_versor, 20, 41)
general_test(polarization_versor, 21, 28)
general_test(polarization_versor, 22, 33)
general_test(polarization_versor, 23, 32)
general_test(polarization_versor, 24, 31)
general_test(polarization_versor, 25, 30)
general_test(polarization_versor, 26, 29)
general_test(polarization_versor, 27, 34)
def test_get_timeJD(self):
def general_tests(time, sampling_rate, JD, JD_step, t0, t1):
self.assertTrue(np.allclose(time[1:] - time[0:-1], 1 / sampling_rate))
self.assertEqual(len(JD), len(time))
self.assertEqual(np.sum(np.diff(JD_step)), 0)
self.assertTrue(np.allclose((t1-t0).sec, 1 / sampling_rate, rtol=1e-3))
def tests_1h(obs_t, time, sampling_rate, JD, JD_step, t0, t1):
self.assertEqual(obs_t, 3600)
self.assertEqual(len(time), obs_t * sampling_rate)
general_tests(time, sampling_rate, JD, JD_step, t0, t1)
def tests_1d(LCT_start, LCD_start, obs_t, time, sampling_rate, JD, JD_step, t0, t1, UTC=UTC,
DST=DST):
general_tests(time, sampling_rate, JD, JD_step, t0, t1)
obs_t0, time0, JD0 = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time,
UTC=UTC, DST=DST, day=None)
self.assertEqual(obs_t, 86400)
self.assertEqual(obs_t, obs_t0)
self.assertEqual(len(time), obs_t * sampling_rate)
self.assertTrue(len(time), len(time0))
self.assertTrue(len(JD), len(JD0))
def tests_1y(LCT_start, LCD_start, obs_t, time, sampling_rate, JD, JD_step, t0, t1, UTC=UTC,
DST=DST, day=None):
general_tests(time, sampling_rate, JD, JD_step, t0, t1)
self.assertEqual(obs_t, 86400 * 365)
if day:
self.assertEqual(len(time), 86400 * sampling_rate)
if day > 1:
self.assertTrue(time[0] != 0)
else:
self.assertTrue(time[0] == 0)
else:
self.assertEqual(len(time), obs_t * sampling_rate)
sampling_rate = 50 #Hz
obs_time = (0, 0, 1, 0, 0) #y, d, h, m, s
day = None
obs_t, time, JD = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day)
JD_step = JD[1:] - JD[:-1]
t0 = Time(JD[0], format='jd', location=loc)
t1 = Time(JD[0] + JD_step[0], format='jd', location=loc)
tests_1h(obs_t, time, sampling_rate, JD, JD_step, t0, t1)
sampling_rate = 5 #Hz
obs_t, time, JD = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day)
JD_step = JD[1:] - JD[:-1]
t0 = Time(JD[0], format='jd', location=loc)
t1 = Time(JD[0] + JD_step[0], format='jd', location=loc)
tests_1h(obs_t, time, sampling_rate, JD, JD_step, t0, t1)
sampling_rate = 3 #Hz
obs_time = (0, 1, 0, 0, 0) #y, d, h, m, s
day = 1
obs_t, time, JD = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day)
JD_step = JD[1:] - JD[:-1]
t0 = Time(JD[0], format='jd', location=loc)
t1 = Time(JD[0] + JD_step[0], format='jd', location=loc)
tests_1d(LCT_start, LCD_start, obs_t, time, sampling_rate, JD, JD_step, t0, t1, UTC=UTC,
DST=DST)
sampling_rate = 1 #Hz
obs_time = (1, 0, 0, 0, 0) #y, d, h, m, s
day0, day1, day2, day3, day4 = (1, 5, 364, None, None)
obs_t0, time0, JD0 = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day0)
JD_step0 = JD0[1:] - JD0[:-1]
t00 = Time(JD0[0], format='jd', location=loc)
t10 = Time(JD0[0] + JD_step0[0], format='jd', location=loc)
tests_1y(LCT_start, LCD_start, obs_t0, time0, sampling_rate, JD0, JD_step0, t00, t10,
UTC=UTC, DST=DST, day=day0)
obs_t1, time1, JD1 = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day1)
JD_step1 = JD1[1:] - JD1[:-1]
t01 = Time(JD1[0], format='jd', location=loc)
t11 = Time(JD1[0] + JD_step1[0], format='jd', location=loc)
tests_1y(LCT_start, LCD_start, obs_t1, time1, sampling_rate, JD1, JD_step1, t01, t11,
UTC=UTC, DST=DST, day=day1)
obs_t2, time2, JD2 = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day2)
JD_step2 = JD2[1:] - JD2[:-1]
t02 = Time(JD2[0], format='jd', location=loc)
t12 = Time(JD2[0] + JD_step2[0], format='jd', location=loc)
tests_1y(LCT_start, LCD_start, obs_t2, time2, sampling_rate, JD2, JD_step2, t02, t12,
UTC=UTC, DST=DST, day=day2)
obs_t3, time3, JD3 = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day3)
JD_step3 = JD3[1:] - JD3[:-1]
t03 = Time(JD3[0], format='jd', location=loc)
t13 = Time(JD3[0] + JD_step3[0], format='jd', location=loc)
tests_1y(LCT_start, LCD_start, obs_t3, time3, sampling_rate, JD3, JD_step3, t03, t13,
UTC=UTC, DST=DST, day=day3)
LCT_start4 = (12, 0, 0)
obs_t4, time4, JD4 = st.get_timeJD(LCT_start4, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day4)
JD_step4 = JD4[1:] - JD4[:-1]
t04 = Time(JD4[0], format='jd', location=loc)
t14 = Time(JD4[0] + JD_step4[0], format='jd', location=loc)
tests_1y(LCT_start4, LCD_start, obs_t4, time4, sampling_rate, JD4, JD_step4, t04, t14,
UTC=UTC, DST=DST, day=day4)
def test_spin_generator(self):
def general_spin_tests(phi, obs_time, time, sampling_rate, rpm, day=None):
if day:
self.assertEqual(len(phi), 86400 * sampling_rate)
else:
self.assertEqual(len(phi), obs_time * sampling_rate)
self.assertEqual(
np.sum(np.r_[True, phi[1:] > phi[:-1]] & np.r_[phi[:-1] > phi[1:], True]),
rpm * len(phi) / sampling_rate / 60)
self.assertEqual(phi.min(), 0)
self.assertTrue(phi.max() < 2 * np.pi)
obs_time1, obs_time2, obs_time3 = ((0, 30, 0, 0, 0), (0, 1, 0, 0, 0), (0, 0, 1, 0, 0))
sampling_rate1, sampling_rate2, sampling_rate3 = (1, 3, 50)
rpm1, rpm2, rpm3 = (13, 1, 5)
day1, day2, day3 = (2, None, None)
obs_t1, time1, JD1 = st.get_timeJD(LCT_start, LCD_start, sampling_rate1, obs_time1, UTC=UTC,
DST=DST, day=day1)
phi1 = st.spin_generator(time1, rpm1)
general_spin_tests(phi1, obs_t1, time1, sampling_rate1, rpm1, day=day1)
obs_t2, time2, JD2 = st.get_timeJD(LCT_start, LCD_start, sampling_rate2, obs_time2, UTC=UTC,
DST=DST, day=day2)
phi2 = st.spin_generator(time2, rpm2)
general_spin_tests(phi2, obs_t2, time2, sampling_rate2, rpm2, day=day2)
obs_t3, time3, JD3 = st.get_timeJD(LCT_start, LCD_start, sampling_rate3, obs_time3, UTC=UTC,
DST=DST, day=day3)
phi3 = st.spin_generator(time3, rpm3)
general_spin_tests(phi3, obs_t3, time3, | |
import os, sys
sys.path.append(os.getcwd())
import time
import numpy as np
import tensorflow as tf
import tflib as lib
import tflib.ops.linear
import tflib.ops.conv2d
import tflib.ops.batchnorm
import tflib.ops.deconv2d
import tflib.save_images
import tflib.plot
import tflib.flow_handler as fh
import tflib.SINTELdata as sintel
MODE = 'wgan-gp' # Valid options are dcgan, wgan, or wgan-gp
DIM = 64 # This overfits substantially; you're probably better off with 64 # or 128?
LAMBDA = 10 # Gradient penalty lambda hyperparameter
CRITIC_ITERS = 5 # How many critic iterations per generator iteration
BATCH_SIZE = 64 # Batch size
ITERS = 100000 # How many generator iterations to train for # 200000 takes too long
IM_DIM = 32 # number of pixels along x and y (square assumed)
SQUARE_IM_DIM = IM_DIM*IM_DIM # 32*32 = 1024
OUTPUT_DIM = IM_DIM*IM_DIM*3 # Number of pixels (3*32*32) - rgb color
OUTPUT_DIM_FLOW = IM_DIM*IM_DIM*2 # Number of pixels (2*32*32) - uv direction
CONTINUE = False # Default False, set True if restoring from checkpoint
START_ITER = 0 # Default 0, set accordingly if restoring from checkpoint (100, 200, ...)
CURRENT_PATH = "sintel/flowcganuv5"
restore_path = "/home/linkermann/opticalFlow/opticalFlowGAN/results/" + CURRENT_PATH + "/model.ckpt"
lib.print_model_settings(locals().copy())
if(CONTINUE):
tf.reset_default_graph()
def LeakyReLU(x, alpha=0.2):
return tf.maximum(alpha*x, x)
def ReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs)
return tf.nn.relu(output)
def LeakyReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs)
return LeakyReLU(output)
def Generator(n_samples, conditions, noise=None): # input conds additional to noise
if noise is None:
noise = tf.random_normal([n_samples, SQUARE_IM_DIM])
noise = tf.reshape(noise, [n_samples, 1, IM_DIM, IM_DIM])
# new conditional input: last frames
conds = tf.reshape(conditions, [n_samples, 6, IM_DIM, IM_DIM]) # conditions: (64,2*3072) TO conds: (64,6,32,32)
# for now just concat the inputs: noise as seventh dim of cond image
output = tf.concat([noise, conds], 1) # to: (BATCH_SIZE,7,32,32)
output = tf.reshape(output, [n_samples, SQUARE_IM_DIM*7]) # 32x32x4 = 4096; to: (BATCH_SIZE, 4096)
output = lib.ops.linear.Linear('Generator.Input', SQUARE_IM_DIM*7, 4*4*4*DIM, output) # 4*4*4*DIM = 64*64 = 4096
output = lib.ops.batchnorm.Batchnorm('Generator.BN1', [0], output)
output = tf.nn.relu(output)
output = tf.reshape(output, [-1, 4*DIM, 4, 4])
output = lib.ops.deconv2d.Deconv2D('Generator.2', 4*DIM, 2*DIM, 5, output)
output = lib.ops.batchnorm.Batchnorm('Generator.BN2', [0,2,3], output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator.3', 2*DIM, DIM, 5, output)
output = lib.ops.batchnorm.Batchnorm('Generator.BN3', [0,2,3], output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator.5', DIM, 2, 5, output) # output flow in color --> dim is 2
output = tf.tanh(output)
return tf.reshape(output, [-1, OUTPUT_DIM_FLOW]) # output flow --> dim is 2
def Discriminator(inputs, conditions): # input conds as well
inputs = tf.reshape(inputs, [-1, 2, IM_DIM, IM_DIM]) # input flow --> dim is 2
conds = tf.reshape(conditions, [-1, 6, IM_DIM, IM_DIM]) # new conditional input: last frames
# for now just concat the inputs
ins = tf.concat([inputs, conds], 1) #to: (BATCH_SIZE, 8, 32, 32)
output = lib.ops.conv2d.Conv2D('Discriminator.1', 8, DIM, 5, ins, stride=2) # first dim is different: 8 now
output = LeakyReLU(output)
output = lib.ops.conv2d.Conv2D('Discriminator.2', DIM, 2*DIM, 5, output, stride=2)
if MODE != 'wgan-gp':
output = lib.ops.batchnorm.Batchnorm('Discriminator.BN2', [0,2,3], output)
output = LeakyReLU(output)
output = lib.ops.conv2d.Conv2D('Discriminator.3', 2*DIM, 4*DIM, 5, output, stride=2)
if MODE != 'wgan-gp':
output = lib.ops.batchnorm.Batchnorm('Discriminator.BN3', [0,2,3], output)
output = LeakyReLU(output)
#output = lib.ops.conv2d.Conv2D('Discriminator.4', 4*DIM, 8*DIM, 5, output, stride=2)
# if MODE != 'wgan-gp':
# output = lib.ops.batchnorm.Batchnorm('Discriminator.BN4', [0,2,3], output)
# output = LeakyReLU(output)
output = tf.reshape(output, [-1, 4*4*8*DIM]) # adjusted outcome
output = lib.ops.linear.Linear('Discriminator.Output', 4*4*8*DIM, 1, output)
return tf.reshape(output, [-1])
cond_data_int = tf.placeholder(tf.int32, shape=[BATCH_SIZE, 2*OUTPUT_DIM]) # cond input for G and D, 2 frames!
cond_data = 2*((tf.cast(cond_data_int, tf.float32)/255.)-.5) #normalized [-1,1]!
#real_data_int = tf.placeholder(tf.int32, shape=[BATCH_SIZE, OUTPUT_DIM_FLOW]) # real data is flow, dim 2!
real_data = tf.placeholder(tf.float32, shape=[BATCH_SIZE, OUTPUT_DIM_FLOW]) #already float, normalized [-1,1]!
fake_data = Generator(BATCH_SIZE, cond_data)
disc_real = Discriminator(real_data, cond_data)
disc_fake = Discriminator(fake_data, cond_data)
gen_params = lib.params_with_name('Generator')
disc_params = lib.params_with_name('Discriminator')
if MODE == 'wgan':
gen_cost = -tf.reduce_mean(disc_fake)
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
gen_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(gen_cost, var_list=gen_params)
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(disc_cost, var_list=disc_params)
clip_ops = []
for var in disc_params:
clip_bounds = [-.01, .01]
clip_ops.append(
tf.assign(
var,
tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])
)
)
clip_disc_weights = tf.group(*clip_ops)
elif MODE == 'wgan-gp':
# Standard WGAN loss
gen_cost = -tf.reduce_mean(disc_fake)
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
# Gradient penalty
alpha = tf.random_uniform(
shape=[BATCH_SIZE,1],
minval=0.,
maxval=1.
)
differences = fake_data - real_data
interpolates = real_data + (alpha*differences)
gradients = tf.gradients(Discriminator(interpolates, cond_data), [interpolates])[0] #added cond here
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1.)**2)
disc_cost += LAMBDA*gradient_penalty
gen_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(gen_cost, var_list=gen_params)
disc_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(disc_cost, var_list=disc_params)
elif MODE == 'dcgan':
gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_fake, tf.ones_like(disc_fake)))
disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_fake, tf.zeros_like(disc_fake)))
disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_real, tf.ones_like(disc_real)))
disc_cost /= 2.
gen_train_op = tf.train.AdamOptimizer(learning_rate=2e-4, beta1=0.5).minimize(gen_cost,
var_list=lib.params_with_name('Generator'))
disc_train_op = tf.train.AdamOptimizer(learning_rate=2e-4, beta1=0.5).minimize(disc_cost,
var_list=lib.params_with_name('Discriminator.'))
# Dataset iterators
gen = sintel.load_train_gen(BATCH_SIZE, (IM_DIM,IM_DIM,3), (IM_DIM,IM_DIM,2)) # batch size, im size, im size flow
dev_gen = sintel.load_test_gen(BATCH_SIZE, (IM_DIM,IM_DIM,3), (IM_DIM,IM_DIM,2))
# For generating samples: define fixed noise and conditional input
fixed_cond_samples, fixed_flow_samples = next(gen) # shape: (batchsize, 3072)
fixed_cond_data_int = fixed_cond_samples[:,0:2*OUTPUT_DIM] # earlier frames as condition, cond samples shape (64,3*3072)
fixed_real_data = fixed_flow_samples[:,OUTPUT_DIM_FLOW:] # later flow for discr, flow samples shape (64,2048)
fixed_real_data_norm01 = tf.cast(fixed_real_data+1.0, tf.float32)/2.0 # [0,1]
fixed_cond_data_normalized = 2*((tf.cast(fixed_cond_data_int, tf.float32)/255.)-.5) #normalized [-1,1]!
fixed_viz_data_int = fixed_cond_samples[:,OUTPUT_DIM:2*OUTPUT_DIM] # each later frame for viz
if(CONTINUE):
fixed_noise = tf.get_variable("noise", shape=[BATCH_SIZE, SQUARE_IM_DIM]) # take same noise like saved model
else:
fixed_noise = tf.Variable(tf.random_normal(shape=[BATCH_SIZE, SQUARE_IM_DIM], dtype=tf.float32), name='noise') #variable: saved, for additional channel
fixed_noise_samples = Generator(BATCH_SIZE, fixed_cond_data_normalized, noise=fixed_noise) # Generator(n_samples,conds, noise):
def generate_image(frame, true_dist): # generates 64 (batch-size) samples next to each other in one image!
print("Iteration %d : \n" % frame)
samples = session.run(fixed_noise_samples, feed_dict={real_data: fixed_real_data, cond_data_int: fixed_cond_data_int}) # output range (-1.0,1.0), size=(BATCH_SIZE, OUT_DIM)
#samples_255 = ((samples+1.)*(255./2)).astype('int32') #(-1,1) to [0,255] for displaying
samples_01 = ((samples+1.)/2.).astype('float32') # [0,1] is a np.ndarray shape (64, 2048)
# print(fixed_real_data_norm01.eval()) # shape (64, 2048) # bigger areas with (almost) same flow
images2show = fixed_viz_data_int.reshape(BATCH_SIZE,3,IM_DIM,IM_DIM)
sample_flowimages, real_flowimages = [], []
for i in range(0, BATCH_SIZE):
real_flowimg, flowimg = [],[] # reset to be sure
flowimg = fh.computeFlowImg(samples[i,:].reshape((IM_DIM,IM_DIM,2))) # (32, 32, 3) # now color img!! :)
flowimg_T = np.transpose(flowimg, [2,0,1]) # (3, 32, 32)
# flowimage = flowimage_T.reshape((OUTPUT_DIM,)) # instead of flatten?
sample_flowimages.append(flowimg_T)
real_uvflow = fixed_real_data[i,:]
real_uvflow = real_uvflow.reshape((IM_DIM,IM_DIM,2))
real_flowimg = fh.computeFlowImg(real_uvflow) # (32, 32, 3) color img!
real_flowimg = real_flowimg.reshape(IM_DIM,IM_DIM,3).astype('int32') # (32, 32, 3)
real_flowimg_T = np.transpose(real_flowimg, [2,0,1]) # (3, 32, 32)
real_flowimages.append(real_flowimg_T) # or which one? # also save as .flo?
images2show = np.insert(images2show, i*2+1, flowimg_T, axis=0)
#samples_255[2*i+1,:] = flowimage # sample flow color image
# images2show.shape: (128, 3, 32, 32) = (2*BATCH_SIZE, 3, IM_DIM, IM_DIM)
# images.reshape((2*BATCH_SIZE, 3, IM_DIM, IM_DIM))
lib.save_images.save_images(images2show, 'samples_{}.jpg'.format(frame))
sample_flowims_np = np.asarray(sample_flowimages, np.int32)
real_flowims_np = np.asarray(real_flowimages, np.int32)
sample_flowims = tf.convert_to_tensor(sample_flowims_np, np.int32)
real_flowims = tf.convert_to_tensor(real_flowims_np, np.int32) # turn into tensor to reshape later
# tensor = tf.constant(np_array) # another way to create a tensor
# compare generated flow to real one # float..?
# u-v-component wise
real = tf.reshape(fixed_real_data_norm01, [BATCH_SIZE,IM_DIM,IM_DIM,2]) # use tf.reshape! Tensor! batch!
real_u = tf.slice(real, [0,0,0,0], [real.get_shape()[0],real.get_shape()[1],real.get_shape()[2], 1])
real_v = tf.slice(real, [0,0,0,1], [real.get_shape()[0],real.get_shape()[1],real.get_shape()[2], 1])
pred = tf.reshape(samples_01,[BATCH_SIZE,IM_DIM,IM_DIM,2]) # use tf reshape!
pred_u = tf.slice(pred, [0,0,0,0], [pred.get_shape()[0],pred.get_shape()[1],pred.get_shape()[2], 1])
pred_v = tf.slice(pred, [0,0,0,1], [pred.get_shape()[0],pred.get_shape()[1],pred.get_shape()[2], 1]) # shape (64, 32, 32) all of them
# mse & ssim on components
mseval_per_entry_u = tf.keras.metrics.mse(real_u, pred_u) # on gray, on [0,1], (64,32,32), small vals (^-1,-2,-3)
mseval_u = tf.reduce_mean(mseval_per_entry_u, [1,2]) # shape (64,) # diff numbers
mseval_per_entry_v = tf.keras.metrics.mse(real_v, pred_v) # on gray, on [0,1], (64,32,32), small vals (^-1,-2,-3)
mseval_v = tf.reduce_mean(mseval_per_entry_v, [1,2]) # shape (64,) # diff than per u entry
#ssimval_u = tf.image.ssim(real_u, pred_u, max_val=1.0) # in: tensor 64-batch, out: tensor ssimvals (64,)
#ssimval_v = tf.image.ssim(real_v, pred_v, max_val=1.0) # in: tensor 64-batch, out: tensor ssimvals (64,) # also minus vals, around 0, u and v differ
# avg: add and divide by 2
mseval_uv = tf.add(mseval_u, mseval_v) # tf.cast neccessary?
tensor2 = tf.constant(2.0, shape=[64])
#ssimval_uv = tf.add(ssimval_u, ssimval_v) # (64,)
mseval_uv = tf.div(mseval_uv, tensor2)
#ssimval_uv = tf.div(ssimval_uv, tensor2) # (64,), small around 0, up to 0.3 after first 100 iter
#ssimval_list_uv = ssimval_uv.eval() # to numpy array # (64,)
mseval_list_uv = mseval_uv.eval() # (64,)
print("mseval uv")
print(mseval_list_uv)
#print("ssimval uv")
#print(ssimval_list_uv)
# flow color ims to gray
real_flowims = tf.cast(real_flowims, tf.float32)/255. # to [0,1]
real_color = tf.reshape(real_flowims, [BATCH_SIZE,IM_DIM,IM_DIM,3])
real_gray = tf.image.rgb_to_grayscale(real_color) # tensor batch to gray; returns original dtype = float [0,1]
# print("real gray") # (64, 32, 32, 1)
sample_flowims = | |
<filename>tests/test_cookies.py
import pytest
from flask import Flask
from flask import jsonify
from flask import request
from flask_jwt_extended import create_access_token
from flask_jwt_extended import create_refresh_token
from flask_jwt_extended import jwt_required
from flask_jwt_extended import JWTManager
from flask_jwt_extended import set_access_cookies
from flask_jwt_extended import set_refresh_cookies
from flask_jwt_extended import unset_access_cookies
from flask_jwt_extended import unset_jwt_cookies
from flask_jwt_extended import unset_refresh_cookies
def _get_cookie_from_response(response, cookie_name):
cookie_headers = response.headers.getlist("Set-Cookie")
for header in cookie_headers:
attributes = header.split(";")
if cookie_name in attributes[0]:
cookie = {}
for attr in attributes:
split = attr.split("=")
cookie[split[0].strip().lower()] = split[1] if len(split) > 1 else True
return cookie
return None
@pytest.fixture(scope="function")
def app():
app = Flask(__name__)
app.config["JWT_SECRET_KEY"] = "foobarbaz"
app.config["JWT_TOKEN_LOCATION"] = ["cookies"]
JWTManager(app)
@app.route("/access_token", methods=["GET"])
def access_token():
domain = request.args.get("domain")
resp = jsonify(login=True)
access_token = create_access_token("username")
set_access_cookies(resp, access_token, domain=domain)
return resp
@app.route("/refresh_token", methods=["GET"])
def refresh_token():
domain = request.args.get("domain")
resp = jsonify(login=True)
refresh_token = create_refresh_token("username")
set_refresh_cookies(resp, refresh_token, domain=domain)
return resp
@app.route("/delete_tokens", methods=["GET"])
def delete_tokens():
domain = request.args.get("domain")
resp = jsonify(logout=True)
unset_jwt_cookies(resp, domain=domain)
return resp
@app.route("/delete_access_tokens", methods=["GET"])
def delete_access_tokens():
domain = request.args.get("domain")
resp = jsonify(access_revoked=True)
unset_access_cookies(resp, domain=domain)
return resp
@app.route("/delete_refresh_tokens", methods=["GET"])
def delete_refresh_tokens():
domain = request.args.get("domain")
resp = jsonify(refresh_revoked=True)
unset_refresh_cookies(resp, domain=domain)
return resp
@app.route("/protected", methods=["GET"])
@jwt_required()
def protected():
return jsonify(foo="bar")
@app.route("/post_protected", methods=["POST"])
@jwt_required()
def post_protected():
return jsonify(foo="bar")
@app.route("/refresh_protected", methods=["GET"])
@jwt_required(refresh=True)
def refresh_protected():
return jsonify(foo="bar")
@app.route("/post_refresh_protected", methods=["POST"])
@jwt_required(refresh=True)
def post_refresh_protected():
return jsonify(foo="bar")
@app.route("/optional_post_protected", methods=["POST"])
@jwt_required(optional=True)
def optional_post_protected():
return jsonify(foo="bar")
return app
@pytest.mark.parametrize(
"options",
[
(
"/refresh_token",
"refresh_token_cookie",
"/refresh_protected",
"/delete_refresh_tokens",
), # nopep8
("/access_token", "access_token_cookie", "/protected", "/delete_access_tokens"),
],
)
def test_jwt_refresh_required_with_cookies(app, options):
test_client = app.test_client()
auth_url, cookie_name, protected_url, delete_url = options
# Test without cookies
response = test_client.get(protected_url)
assert response.status_code == 401
assert response.get_json() == {"msg": 'Missing cookie "{}"'.format(cookie_name)}
# Test after receiving cookies
test_client.get(auth_url)
response = test_client.get(protected_url)
assert response.status_code == 200
assert response.get_json() == {"foo": "bar"}
# Test after issuing a 'logout' to delete the cookies
test_client.get(delete_url)
response = test_client.get(protected_url)
assert response.status_code == 401
assert response.get_json() == {"msg": 'Missing cookie "{}"'.format(cookie_name)}
# log back in once more to test that clearing all tokens works
test_client.get(auth_url)
response = test_client.get(protected_url)
assert response.status_code == 200
test_client.get("/delete_tokens")
response = test_client.get(protected_url)
assert response.status_code == 401
assert response.get_json() == {"msg": 'Missing cookie "{}"'.format(cookie_name)}
@pytest.mark.parametrize(
"options",
[
("/refresh_token", "csrf_refresh_token", "/post_refresh_protected"),
("/access_token", "csrf_access_token", "/post_protected"),
],
)
def test_default_access_csrf_protection(app, options):
test_client = app.test_client()
auth_url, csrf_cookie_name, post_url = options
# Get the jwt cookies and csrf double submit tokens
response = test_client.get(auth_url)
csrf_token = _get_cookie_from_response(response, csrf_cookie_name)[csrf_cookie_name]
# Test you cannot post without the additional csrf protection
response = test_client.post(post_url)
assert response.status_code == 401
assert response.get_json() == {"msg": "Missing CSRF token"}
# Test that you can post with the csrf double submit value
csrf_headers = {"X-CSRF-TOKEN": csrf_token}
response = test_client.post(post_url, headers=csrf_headers)
assert response.status_code == 200
assert response.get_json() == {"foo": "bar"}
@pytest.mark.parametrize(
"options",
[
("/refresh_token", "/post_refresh_protected"),
("/access_token", "/post_protected"),
],
)
def test_non_matching_csrf_token(app, options):
test_client = app.test_client()
auth_url, post_url = options
# Get the jwt cookies and csrf double submit tokens
test_client.get(auth_url)
csrf_headers = {"X-CSRF-TOKEN": "<PASSWORD>"}
response = test_client.post(post_url, headers=csrf_headers)
assert response.status_code == 401
assert response.get_json() == {"msg": "CSRF double submit tokens do not match"}
@pytest.mark.parametrize(
"options",
[
("/refresh_token", "/post_refresh_protected"),
("/access_token", "/post_protected"),
],
)
def test_csrf_disabled(app, options):
app.config["JWT_COOKIE_CSRF_PROTECT"] = False
test_client = app.test_client()
auth_url, post_url = options
# Get the jwt cookies and csrf double submit tokens
test_client.get(auth_url)
response = test_client.post(post_url)
assert response.status_code == 200
assert response.get_json() == {"foo": "bar"}
@pytest.mark.parametrize(
"options",
[
("/refresh_token", "csrf_refresh_token", "/post_refresh_protected"),
("/access_token", "csrf_access_token", "/post_protected"),
],
)
def test_csrf_with_custom_header_names(app, options):
app.config["JWT_ACCESS_CSRF_HEADER_NAME"] = "FOO"
app.config["JWT_REFRESH_CSRF_HEADER_NAME"] = "FOO"
test_client = app.test_client()
auth_url, csrf_cookie_name, post_url = options
# Get the jwt cookies and csrf double submit tokens
response = test_client.get(auth_url)
csrf_token = _get_cookie_from_response(response, csrf_cookie_name)[csrf_cookie_name]
# Test that you can post with the csrf double submit value
csrf_headers = {"FOO": csrf_token}
response = test_client.post(post_url, headers=csrf_headers)
assert response.status_code == 200
assert response.get_json() == {"foo": "bar"}
@pytest.mark.parametrize(
"options",
[
("/refresh_token", "csrf_refresh_token", "/post_refresh_protected"),
("/access_token", "csrf_access_token", "/post_protected"),
],
)
def test_csrf_with_default_form_field(app, options):
app.config["JWT_CSRF_CHECK_FORM"] = True
test_client = app.test_client()
auth_url, csrf_cookie_name, post_url = options
# Get the jwt cookies and csrf double submit tokens
response = test_client.get(auth_url)
csrf_token = _get_cookie_from_response(response, csrf_cookie_name)[csrf_cookie_name]
# Test that you can post with the csrf double submit value
csrf_data = {"csrf_token": csrf_token}
response = test_client.post(post_url, data=csrf_data)
assert response.status_code == 200
assert response.get_json() == {"foo": "bar"}
@pytest.mark.parametrize(
"options",
[
("/refresh_token", "csrf_refresh_token", "/post_refresh_protected"),
("/access_token", "csrf_access_token", "/post_protected"),
],
)
def test_csrf_with_custom_form_field(app, options):
app.config["JWT_CSRF_CHECK_FORM"] = True
app.config["JWT_ACCESS_CSRF_FIELD_NAME"] = "FOO"
app.config["JWT_REFRESH_CSRF_FIELD_NAME"] = "FOO"
test_client = app.test_client()
auth_url, csrf_cookie_name, post_url = options
# Get the jwt cookies and csrf double submit tokens
response = test_client.get(auth_url)
csrf_token = _get_cookie_from_response(response, csrf_cookie_name)[csrf_cookie_name]
# Test that you can post with the csrf double submit value
csrf_data = {"FOO": csrf_token}
response = test_client.post(post_url, data=csrf_data)
assert response.status_code == 200
assert response.get_json() == {"foo": "bar"}
@pytest.mark.parametrize(
"options",
[
(
"/refresh_token",
"csrf_refresh_token",
"/refresh_protected",
"/post_refresh_protected",
), # nopep8
("/access_token", "csrf_access_token", "/protected", "/post_protected"),
],
)
def test_custom_csrf_methods(app, options):
app.config["JWT_CSRF_METHODS"] = ["GET"]
test_client = app.test_client()
auth_url, csrf_cookie_name, get_url, post_url = options
# Get the jwt cookies and csrf double submit tokens
response = test_client.get(auth_url)
csrf_token = _get_cookie_from_response(response, csrf_cookie_name)[csrf_cookie_name]
# Ensure we can now do posts without csrf
response = test_client.post(post_url)
assert response.status_code == 200
assert response.get_json() == {"foo": "bar"}
# Ensure GET requests now fail without csrf
response = test_client.get(get_url)
assert response.status_code == 401
assert response.get_json() == {"msg": "Missing CSRF token"}
# Ensure GET requests now succeed with csrf
csrf_headers = {"X-CSRF-TOKEN": csrf_token}
response = test_client.get(get_url, headers=csrf_headers)
assert response.status_code == 200
assert response.get_json() == {"foo": "bar"}
def test_default_cookie_options(app):
test_client = app.test_client()
# Test the default access cookies
response = test_client.get("/access_token")
cookies = response.headers.getlist("Set-Cookie")
assert len(cookies) == 2 # JWT and CSRF value
access_cookie = _get_cookie_from_response(response, "access_token_cookie")
assert access_cookie is not None
assert access_cookie["path"] == "/"
assert access_cookie["httponly"] is True
assert "samesite" not in access_cookie
access_csrf_cookie = _get_cookie_from_response(response, "csrf_access_token")
assert access_csrf_cookie is not None
assert access_csrf_cookie["path"] == "/"
assert "httponly" not in access_csrf_cookie
assert "samesite" not in access_csrf_cookie
# Test the default refresh cookies
response = test_client.get("/refresh_token")
cookies = response.headers.getlist("Set-Cookie")
assert len(cookies) == 2 # JWT and CSRF value
refresh_cookie = _get_cookie_from_response(response, "refresh_token_cookie")
assert refresh_cookie is not None
assert refresh_cookie["path"] == "/"
assert refresh_cookie["httponly"] is True
assert "samesite" not in refresh_cookie
refresh_csrf_cookie = _get_cookie_from_response(response, "csrf_refresh_token")
assert refresh_csrf_cookie is not None
assert refresh_csrf_cookie["path"] == "/"
assert "httponly" not in refresh_csrf_cookie
assert "samesite" not in refresh_csrf_cookie
def test_custom_cookie_options(app):
test_client = app.test_client()
app.config["JWT_COOKIE_SECURE"] = True
app.config["JWT_COOKIE_DOMAIN"] = "test.com"
app.config["JWT_SESSION_COOKIE"] = False
app.config["JWT_COOKIE_SAMESITE"] = "Strict"
# Test access cookies with changed options
response = test_client.get("/access_token")
cookies = response.headers.getlist("Set-Cookie")
assert len(cookies) == 2 # JWT and CSRF value
access_cookie = _get_cookie_from_response(response, "access_token_cookie")
assert access_cookie is not None
assert access_cookie["domain"] == "test.com"
assert access_cookie["path"] == "/"
assert access_cookie["expires"] != ""
assert access_cookie["httponly"] is True
assert access_cookie["secure"] is True
assert access_cookie["samesite"] == "Strict"
access_csrf_cookie = _get_cookie_from_response(response, "csrf_access_token")
assert access_csrf_cookie is not None
assert access_csrf_cookie["path"] == "/"
assert access_csrf_cookie["secure"] is True
assert access_csrf_cookie["domain"] == "test.com"
assert access_csrf_cookie["expires"] != ""
assert access_csrf_cookie["samesite"] == "Strict"
# Test refresh cookies with changed options
response = test_client.get("/refresh_token")
cookies = response.headers.getlist("Set-Cookie")
assert len(cookies) == 2 # JWT and CSRF value
refresh_cookie = _get_cookie_from_response(response, "refresh_token_cookie")
assert refresh_cookie is not None
assert refresh_cookie["domain"] == "test.com"
assert refresh_cookie["path"] == "/"
assert refresh_cookie["httponly"] is True
assert refresh_cookie["secure"] is True
assert refresh_cookie["expires"] != ""
assert refresh_cookie["samesite"] == "Strict"
refresh_csrf_cookie = _get_cookie_from_response(response, "csrf_refresh_token")
assert refresh_csrf_cookie is not None
assert refresh_csrf_cookie["path"] == "/"
assert refresh_csrf_cookie["secure"] is True
assert refresh_csrf_cookie["domain"] == "test.com"
assert refresh_csrf_cookie["expires"] != ""
assert refresh_csrf_cookie["samesite"] == "Strict"
def test_custom_cookie_names_and_paths(app):
test_client = app.test_client()
app.config["JWT_ACCESS_CSRF_COOKIE_NAME"] = "access_foo_csrf"
app.config["JWT_REFRESH_CSRF_COOKIE_NAME"] = "refresh_foo_csrf"
app.config["JWT_ACCESS_CSRF_COOKIE_PATH"] = "/protected"
app.config["JWT_REFRESH_CSRF_COOKIE_PATH"] = "/refresh_protected"
app.config["JWT_ACCESS_COOKIE_NAME"] = "access_foo"
app.config["JWT_REFRESH_COOKIE_NAME"] = "refresh_foo"
app.config["JWT_ACCESS_COOKIE_PATH"] = "/protected"
app.config["JWT_REFRESH_COOKIE_PATH"] = "/refresh_protected"
# Test the default access cookies
response = test_client.get("/access_token")
cookies = response.headers.getlist("Set-Cookie")
assert len(cookies) == 2 # JWT and CSRF value
access_cookie = _get_cookie_from_response(response, "access_foo")
access_csrf_cookie = _get_cookie_from_response(response, "access_foo_csrf")
assert access_cookie is not None
assert access_csrf_cookie is not None
assert access_cookie["path"] == "/protected"
assert access_csrf_cookie["path"] == "/protected"
# Test the default refresh cookies
response = test_client.get("/refresh_token")
cookies = response.headers.getlist("Set-Cookie")
assert len(cookies) == 2 # JWT and CSRF value
refresh_cookie = _get_cookie_from_response(response, "refresh_foo")
refresh_csrf_cookie = _get_cookie_from_response(response, "refresh_foo_csrf")
assert refresh_cookie is not None
assert refresh_csrf_cookie is not None
assert refresh_cookie["path"] == "/refresh_protected"
assert refresh_csrf_cookie["path"] == "/refresh_protected"
def test_csrf_token_not_in_cookie(app):
test_client = app.test_client()
| |
<filename>src/models/new_allen_nlp/Mortality/MortalityReader.py
import tempfile
from typing import Dict, Iterable, List, Tuple
from overrides import overrides
import torch
import allennlp
from allennlp.data import DataLoader, DatasetReader, Instance, Vocabulary
from allennlp.data.fields import LabelField, TextField, MetadataField, MultiLabelField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder, CnnEncoder
'''transformer stuff'''
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.data.token_indexers import PretrainedTransformerIndexer
# from allennlp.modules.text_field_embedders import
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder
from allennlp.modules.seq2vec_encoders import BertPooler
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy, Auc
from allennlp.training.optimizers import AdamOptimizer
from allennlp.training.trainer import Trainer, GradientDescentTrainer
from allennlp.training.util import evaluate
# import the regularization
from allennlp.nn.regularizers import L2Regularizer, RegularizerApplicator
import pandas as pd
import os
import gc
from tqdm.auto import tqdm
import sys
sys.path.append("/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/")
from src.preprocessing.text_preprocessing import preprocess_mimic
import torch
import matplotlib.pyplot as plt
from CONST import LOGGER_NAME
'''
get the logger, if it is available
'''
import logging
import numpy as np
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(LOGGER_NAME)
logger.debug("hello")
@DatasetReader.register("MortalityReader")
class MortalityReader(DatasetReader):
def __init__(self,
lazy: bool = True,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = 768*4,
train_listfile: str = "/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/in-hospital-mortality/train/listfile.csv",
test_listfile: str = "/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/in-hospital-mortality/test/listfile.csv",
notes_dir: str = "/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/extracted_notes",
skip_patients_file: str ="/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/extracted_notes/null_patients.txt",
stats_write_dir: str="/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/extracted_notes/",
all_stays: str = "/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/root/all_stays.csv",
limit_examples: int = None,
use_preprocessing: bool = False,
num_classes: int=2,
mode: str='train',
data_type: str="MORTALITY",
args=None,
hadm2eps_path: str="/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/extracted_notes/hadm2episode.dict"
):
super().__init__(lazy)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self.max_tokens = max_tokens
self.train_listfile = train_listfile
self.test_listfile = test_listfile
self.notes_dir = notes_dir
self.use_preprocessing = use_preprocessing
logger.critical(f"we are getting the max tokens {self.max_tokens} "
f"and use_preproc is {self.use_preprocessing}")
self.null_patients = []
with open(skip_patients_file, "r") as file:
for line in file:
self.null_patients.append(line.strip())
self.stats_write_dir = stats_write_dir
self.all_stays_path = all_stays
self.all_stays_df = self.get_all_stays()
self.limit_examples = limit_examples
self.cur_examples = 0
self.lengths = []
self.num_classes = num_classes
self.mode = mode
self.sampled_idx = {}
self.data_type = data_type
self.args = args
self.get_idx() #realistically, only the train_idx will be set, and we simply need to compare against
# self.null_patients
self.vocab = None
self.hadm2eps_path = hadm2eps_path
self.listfile_df = pd.read_csv(train_listfile)
if self.data_type == "PHENOTYPING" or self.data_type == "DECOMPENSATION":
self.labels_start_idx = 2
elif self.data_type == "MORTALITY":
self.labels_start_idx = 1
self.labels = list(self.listfile_df.columns[self.labels_start_idx:])
# def set_mode(self, mode: str):
# if mode == "train":
# self.limit_examples = None
# else:
#
# pass
def get_idx(self):
train_sampler = self.get_sampler(self.train_listfile)
self.sampled_idx["train"] = list(train_sampler)
self.train_sampler = train_sampler
test_sampler = self.get_sampler(self.test_listfile)
self.sampled_idx["valid"] = list(test_sampler)
self.test_sampler = test_sampler
if self.limit_examples:
self.sampled_idx["train"] = self.sampled_idx["train"][:self.limit_examples]
self.sampled_idx["valid"] = self.sampled_idx["valid"][:self.limit_examples]
def get_label_stats(self, file_path: str):
'''
Gets label (mortality) stats
'''
# get stats on the dataset listed at _path_
from collections import defaultdict
self.stats = defaultdict(int)
with open(file_path, "r") as file:
file.readline() # could also pandas readcsv and ignore first line
for line in file:
info_filename, label = line.split(",")
self.stats[int(label)] +=1
return self.stats
'''
Parses the line, according to the mode. Returns a dict with the proper keys set
Could also have implemented this with a DF instead
'''
def parse_line(self, line):
info_dict = {}
mapping_dict = {}
if self.data_type == "MORTALITY":
headers = ["filename", "label"]
elif self.data_type == "DECOMPENSATION":
headers = ["filename", "time", "label"]
elif self.data_type == "PHENOTYPING":
headers = ["filename", "time", "label"]
else:
headers = ["filename", "time", "label"]
for i,header in enumerate(headers):
mapping_dict[header] = i #can also use a dict comprehension here
info_array = line.split(",")
for key in mapping_dict:
if key == "label":
info_dict[key] = int(info_array[mapping_dict[key]])
elif key == "time":
info_dict[key] = float(info_array[mapping_dict[key]])
else:
info_dict[key] = info_array[mapping_dict[key]]
return info_dict
'''
Reads in all the labels, and forms a sampler, according to a balanced approach.
'''
def get_sampler(self, listfile: str = ""):
self.labels = []
# read out the personal statement, and expand upon this
# current events
# politics and area studies
# controversial issues: pipeline protests
# saudi arms deal!
# conservative, etc.
# african and canadian politics
# excerpt from the referee letter!
# sampling_num_classes
sampling_num_classes = None
if self.data_type == "DECOMPENSATION" or self.data_type == "MORTALITY":
sampling_num_classes = 2
else:
sampling_num_classes = 25
self.class_counts = np.zeros(sampling_num_classes) # fix sampling for phenotypes
with open(listfile, "r") as file:
file.readline()
for line in file:
info_dict = self.parse_line(line)
self.labels.append([info_dict["label"]])
self.class_counts[int(info_dict["label"])] += 1
# now, we assign the weights to ALL the class labels
self.class_weights = 1/self.class_counts
# essentially, assign the weights as the ratios, from the self.stats stuff
all_label_weights = self.class_weights[self.labels].squeeze() #produce an array of size labels, but looking up the value in class weights each time
num_samples = self.limit_examples if self.limit_examples else len(all_label_weights)
num_samples = min(num_samples, len(all_label_weights))
if self.args.sampler_type == "balanced":
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights=all_label_weights,
num_samples=num_samples,
replacement = False)
elif self.args.sampler_type == "random":
sampler = torch.utils.data.sampler.SubsetRandomSampler(indices=[i for i in range(len(all_label_weights))])
# sampler = list(sampler)[:num_samples]
else:
logger.critical("Weird sampler specified \n")
sampler = None
return sampler
def get_sampler_from_dataset(self, dataset):
self.labels = []
self.class_counts = np.zeros(2)
for data in dataset: # could replace this with an arbitrary data source, and we just yield from it
info_dict = data.fields
label = int(info_dict["label"].label)
self.labels.append(label)
self.class_counts[label] += 1
# now, we assign the weights to ALL the class labels
self.class_weights = 1/self.class_counts
# essentially, assign the weights as the ratios, from the self.stats stuff
all_label_weights = self.class_weights[self.labels] #produce an array of size labels, but looking up the value in class weights each time
num_samples = self.limit_examples if self.limit_examples else len(all_label_weights)
num_samples = min(num_samples, len(all_label_weights))
if self.args.sampler_type == "balanced":
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights=all_label_weights,
num_samples=num_samples,
replacement = False)
else:
sampler = torch.utils.data.sampler.SubsetRandomSampler(indices=[i for i in range(len(all_label_weights))])
# sampler = list(sampler)[:num_samples]
return sampler #now that we have a sampler, we can do things: pass it into the dataloader
'''
Creates and saves a histogram of the note lengths
'''
def make_lengths_histogram(self):
pass
'''
Gets stats for the data listed at the datapath
'''
def get_note_stats(self, file_path, name="train"):
print(f"in note stats, the logger is {logger} and we have {__name__}")
print(logger.getEffectiveLevel())
from collections import defaultdict
self.note_stats = defaultdict(list)
exclusions = 0
num_examples = 0
with open(file_path, "r") as file:
for line in file:
num_examples+=1
with open(file_path, "r") as file, \
open(os.path.join(self.stats_write_dir, "note_lengths.txt") , "a") as note_length_file:
file.readline() # could also pandas readcsv and ignore first line
for example_number,line in enumerate(tqdm(file, total=num_examples)):
if self.mode != "test" and self.limit_examples and example_number > self.limit_examples:
break
info_filename, label = line.split(",")
info = info_filename.split("_")
patient_id = info[0]
# verify string inside a list of string
if patient_id not in self.null_patients: # could also just do try except here
eps = int("".join([c for c in info[1] if c.isdigit()]))
notes = pd.read_pickle(os.path.join(self.notes_dir, patient_id, "notes.pkl"))
notes[["CHARTTIME", "STORETIME", "CHARTDATE"]] = notes[["CHARTTIME", "STORETIME", "CHARTDATE"]].apply(pd.to_datetime)
# fill in the time, do two passes. Any not caught in the first pass will get helped by second
notes["CHARTTIME"] = notes["CHARTTIME"].fillna(notes["STORETIME"])
notes["CHARTTIME"] = notes["CHARTTIME"].fillna(value=notes["CHARTDATE"].map(lambda x: pd.Timestamp(x) + pd.Timedelta(days=1) - pd.Timedelta(seconds=1)))
assert len(notes[notes["CHARTTIME"].isnull()]) == 0 # all of them should have been filled in.
# now, let's sort the notes
episode_specific_notes = notes[notes["EPISODES"] == eps].copy(deep=True)
# hadm_id = episode_specific_notes.groupby(["HADM_ID"]).agg({""}) # hadm ids seem to 1-to1 correspond to episodes
hadm_id = episode_specific_notes["HADM_ID"]
one_hadm_id = hadm_id.unique()
logger.info(type(one_hadm_id))
assert (one_hadm_id.shape[0]) == 1
assert len(one_hadm_id) == 1
icu_intime = self.all_stays_df[ self.all_stays_df["HADM_ID"] == one_hadm_id[0]]
# we are assuming that the intime is not null
intime_date = pd.Timestamp(icu_intime["INTIME"].iloc[0]) # iloc will automatically extract once you get to the base element
intime_date_plus_time = pd.Timestamp(intime_date) + pd.Timedelta(days=2)
# all notes up to two days. Including potentially previous events.
mask = ( episode_specific_notes["CHARTTIME"] > intime_date) & (episode_specific_notes["CHARTTIME"] <= intime_date_plus_time)
all_mask = (episode_specific_notes["CHARTTIME"] <= intime_date_plus_time)
time_episode_specific_notes = episode_specific_notes[mask].copy(deep=True)
logger.debug("Went from {} to {} notes\n".format(len(episode_specific_notes), len(time_episode_specific_notes)))
if len(time_episode_specific_notes) > 0:
text_df = time_episode_specific_notes
text_df.sort_values("CHARTTIME", ascending=True, inplace=True) # we want them sorted by increasing time
# unlike the other one, we found our performance acceptable. Therefore, we use only the first note.
text = " ".join(text_df["TEXT"].tolist()) #assuming sorted order
tokens = self.tokenizer.tokenize(text)
if patient_id in self.note_stats:
logger.info("Encountering the patient another time, for another episode {} {}".format(patient_id, eps))
self.note_stats[patient_id].append(len(tokens) )# the same patient id can be encoutnered for multiple episodes
if int(patient_id)%1000==0:
logger.info("text for patient {} \n: {}".format(patient_id,text))
logger.info("end of text for patient {} \n".format(patient_id))
else:
logger.warning("No text found for patient {}. This is with the time hour {} window\n. ".format(patient_id, 48))
exclusions +=1
'''below code is functionally | |
from __future__ import print_function
import argparse
from datetime import datetime
import os
import re
import signal
import ssl
import subprocess
import sys
import tempfile
import time
from werkzeug.serving import make_ssl_devcert
# pylint: disable=wrong-import-order
try:
from urllib.parse import urlsplit, splitport
except ImportError:
from urllib2 import splitport
from urlparse import urlsplit
import requests
_VALID_HOST_PATTERN = r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}([:]\d+)?$'
class LiveAndLetDieError(BaseException):
pass
def _log(logging, message):
if logging:
print('LIVEANDLETDIE: {0}'.format(message))
def _validate_host(host):
if re.match(_VALID_HOST_PATTERN, host):
return host
else:
raise argparse.ArgumentTypeError('{0} is not a valid host!'
.format(host))
def split_host(host):
"""
Splits host into host and port.
:param str host:
Host including port.
:returns:
A ``(str(host), int(port))`` tuple.
"""
host, port = (host.split(':') + [None])[:2]
return host, int(port)
def check(server):
"""Checks whether a server is running."""
return server.check()
def live(app):
"""
Starts a live app in a separate process
and checks whether it is running.
"""
return app.live()
def start(*args, **kwargs):
"""Alias for :funct:`live`"""
live(*args, **kwargs)
def die(app):
"""
Starts a live app in a separate process
and checks whether it is running.
"""
return app.live()
def stop(*args, **kwargs):
"""Alias for :funct:`die`"""
die(*args, **kwargs)
def port_in_use(port, kill=False, logging=False):
"""
Checks whether a port is free or not.
:param int port:
The port number to check for.
:param bool kill:
If ``True`` the process will be killed.
:returns:
The process id as :class:`int` if in use, otherwise ``False`` .
"""
command_template = 'lsof -iTCP:{0} -sTCP:LISTEN'
process = subprocess.Popen(command_template.format(port).split(),
stdout=subprocess.PIPE)
headers = process.stdout.readline().decode().split()
if 'PID' not in headers:
_log(logging, 'Port {0} is free.'.format(port))
return False
index_pid = headers.index('PID')
index_cmd = headers.index('COMMAND')
row = process.stdout.readline().decode().split()
if len(row) < index_pid:
_log(logging, 'Port {0} is free.'.format(port))
return False
pid = int(row[index_pid])
command = row[index_cmd]
if pid and command.startswith('python'):
_log(logging, 'Port {0} is already being used by process {1}!'
.format(port, pid))
if kill:
_log(logging,
'Killing process with id {0} listening on port {1}!'
.format(pid, port))
os.kill(pid, signal.SIGKILL)
# Check whether it was really killed.
try:
# If still alive
kill_process(pid, logging)
# call me again
_log(logging,
'Process {0} is still alive! checking again...'
.format(pid))
return port_in_use(port, kill)
except OSError:
# If killed
return False
else:
return pid
def kill_process(pid, logging=False):
try:
_log(logging, 'Killing process {0}!'.format(pid))
os.kill(int(pid), signal.SIGKILL)
return
except OSError:
# If killed
return False
def _get_total_seconds(td):
"""
Fixes the missing :meth:`datetime.timedelta.total_seconds()`
method in Python 2.6
"""
# pylint: disable=invalid-name
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) \
/ 10 ** 6
class Base(object):
"""
Base class for all frameworks.
:param str path:
Absolute path to app directory or module (depends on framework).
:param str host:
A host at which the live server should listen.
:param float timeout:
Timeout in seconds for the check.
:param str check_url:
URL where to check whether the server is running.
Default is ``"http://{host}:{port}"``.
:param bool logging:
Whether liveandletdie logs should be printed out.
:param bool suppress_output:
Whether the stdout of the launched application should be suppressed.
"""
_argument_parser = argparse.ArgumentParser()
def __init__(self, path, host='127.0.0.1', port=8001, timeout=10.0,
check_url=None, executable='python', logging=False,
suppress_output=True, **kwargs):
self.path = path
self.timeout = timeout
self.host = host
self.port = port
self.process = None
self.executable = executable
self.logging = logging
self.suppress_output = suppress_output
self.check_url = 'http://{0}:{1}'.format(host, port)
self.scheme = 'http'
if check_url:
self.check_url = self._normalize_check_url(check_url)
def create_command(self):
pass
@property
def default_url(self):
return '{0}://{1}:{2}'.format(self.scheme, self.host, self.port)
def _kill(self):
if self.process:
try:
os.killpg(self.process.pid, signal.SIGKILL)
except OSError:
self.process.kill()
self.process.wait()
def _normalize_check_url(self, check_url):
"""
Normalizes check_url by:
* Adding the `http` scheme if missing
* Adding or replacing port with `self.port`
"""
# TODO: Write tests for this method
split_url = urlsplit(check_url)
host = splitport(split_url.path or split_url.netloc)[0]
return '{0}://{1}:{2}'.format(self.scheme, host, self.port)
def check(self, check_url=None):
"""
Checks whether a server is running.
:param str check_url:
URL where to check whether the server is running.
Default is ``"http://{self.host}:{self.port}"``.
"""
if check_url is not None:
self.check_url = self._normalize_check_url(check_url)
response = None
sleeped = 0.0
now = datetime.now()
while not response:
try:
response = requests.get(self.check_url, verify=False)
except requests.exceptions.ConnectionError:
if sleeped > self.timeout:
self._kill()
raise LiveAndLetDieError(
'{0} server {1} didn\'t start in specified timeout {2} '
'seconds!\ncommand: {3}'.format(
self.__class__.__name__,
self.check_url,
self.timeout,
' '.join(self.create_command())
)
)
time.sleep(1)
sleeped = _get_total_seconds(datetime.now() - now)
return _get_total_seconds(datetime.now() - now)
def live(self, kill_port=False, check_url=None):
"""
Starts a live server in a separate process
and checks whether it is running.
:param bool kill_port:
If ``True``, processes running on the same port as ``self.port``
will be killed.
:param str check_url:
URL where to check whether the server is running.
Default is ``"http://{self.host}:{self.port}"``.
"""
pid = port_in_use(self.port, kill_port)
if pid:
raise LiveAndLetDieError(
'Port {0} is already being used by process {1}!'
.format(self.port, pid)
)
host = str(self.host)
if re.match(_VALID_HOST_PATTERN, host):
with open(os.devnull, "w") as devnull:
if self.suppress_output:
self.process = subprocess.Popen(self.create_command(),
stderr=devnull,
stdout=devnull,
preexec_fn=os.setsid)
else:
self.process = subprocess.Popen(self.create_command(),
preexec_fn=os.setsid)
_log(self.logging, 'Starting process PID: {0}'
.format(self.process.pid))
duration = self.check(check_url)
_log(self.logging,
'Live server started in {0} seconds. PID: {1}'
.format(duration, self.process.pid))
return self.process
else:
raise LiveAndLetDieError('{0} is not a valid host!'.format(host))
def start(self, *args, **kwargs):
"""Alias for :meth:`.live`"""
self.live(*args, **kwargs)
def die(self):
"""Stops the server if it is running."""
if self.process:
_log(self.logging,
'Stopping {0} server with PID: {1} running at {2}.'
.format(self.__class__.__name__, self.process.pid,
self.check_url))
self._kill()
def stop(self, *args, **kwargs):
"""Alias for :meth:`.die`"""
self.die(*args, **kwargs)
@classmethod
def _add_args(cls):
cls._argument_parser.add_argument('--liveandletdie',
help='Run as test live server.',
type=_validate_host,
nargs='?',
const='172.16.31.10:5000')
@classmethod
def parse_args(cls, logging=False):
"""
Parses command line arguments.
Looks for --liveandletdie [host]
:returns:
A ``(str(host), int(port))`` or ``(None, None)`` tuple.
"""
cls._add_args()
args = cls._argument_parser.parse_args()
if args.liveandletdie:
_log(logging, 'Running as test live server at {0}'
.format(args.liveandletdie))
return split_host(args.liveandletdie)
else:
return None, None
class WrapperBase(Base):
"""Base class for frameworks that require their app to be wrapped."""
def create_command(self):
return [
self.executable,
self.path,
'--liveandletdie',
'{0}:{1}'.format(self.host, self.port),
]
class Flask(WrapperBase):
def __init__(self, *args, **kwargs):
"""
:param bool ssl:
If true, the app will be run with ``ssl_context="adhoc"`` and the
schema of the ``self.check_url`` will be ``"https"``.
"""
self.ssl = kwargs.pop('ssl', None)
super(Flask, self).__init__(*args, **kwargs)
if self.ssl:
self.scheme = 'https'
@classmethod
def _add_args(cls):
super(Flask, cls)._add_args()
cls._argument_parser.add_argument('--ssl',
help='Run with "adhoc" ssl context.',
type=bool,
nargs='?',
default=False)
def create_command(self):
command = super(Flask, self).create_command()
if self.ssl is True:
command += ['--ssl=1']
return command
def check(self, check_url=None):
url = self.check_url if check_url is None else \
self._normalize_check_url(check_url)
if self.ssl:
url = url.replace('http://', 'https://')
super(Flask, self).check(url)
@classmethod
def wrap(cls, app):
"""
Adds test live server capability to a Flask app module.
:param app:
A :class:`flask.Flask` app instance.
"""
host, port = cls.parse_args()
ssl_context = None
if host:
if cls._argument_parser.parse_args().ssl:
try:
import OpenSSL # pylint: disable=unused-variable
except ImportError:
# OSX fix
sys.path.append(
'/System/Library/Frameworks/Python.framework/Versions/'
'{0}.{1}/Extras/lib/python/'
.format(sys.version_info.major, sys.version_info.minor)
)
try:
import OpenSSL # pylint: disable=unused-variable
except ImportError:
# Linux fix
sys.path.append(
'/usr/lib/python{0}.{1}/dist-packages/'
.format(sys.version_info.major, sys.version_info.minor)
)
try:
import OpenSSL # pylint: disable=unused-variable
except ImportError:
raise LiveAndLetDieError(
'Flask app could not be launched because the pyopenssl '
'library is not installed on your system!'
)
ssl_context = 'adhoc'
app.run(host=host, port=port, ssl_context=ssl_context)
sys.exit()
class GAE(Base):
def __init__(self, dev_appserver_path, *args, **kwargs):
"""
:param str dev_appserver:
Path to dev_appserver.py
"""
super(GAE, self).__init__(*args, **kwargs)
self.dev_appserver_path = dev_appserver_path
self.admin_port = kwargs.get('admin_port', 5555)
def create_command(self):
command = [
self.dev_appserver_path,
'--host={0}'.format(self.host),
'--port={0}'.format(self.port),
'--admin_port={0}'.format(self.admin_port),
'--skip_sdk_update_check=yes',
self.path
]
if self.dev_appserver_path.endswith(('.py', '.pyc')):
command = [self.executable] + command
return command
class WsgirefSimpleServer(WrapperBase):
def __init__(self, *args, **kwargs):
"""
:param bool ssl:
If true, the app will be run with ssl enabled and the
scheme of the ``self.check_url`` will be ``"https"``.
"""
self.ssl = kwargs.pop('ssl', None)
super(WsgirefSimpleServer, self).__init__(*args, **kwargs)
if self.ssl:
self.scheme = 'https'
def create_command(self):
command = super(WsgirefSimpleServer, self).create_command()
if self.ssl is True:
command += ['--ssl=1']
return command
def check(self, check_url=None):
url = self.check_url if check_url is None else \
self._normalize_check_url(check_url)
if self.ssl:
url = url.replace('http://', 'https://')
super(WsgirefSimpleServer, self).check(url)
@classmethod
def _add_args(cls):
super(WsgirefSimpleServer, cls)._add_args()
cls._argument_parser.add_argument('--ssl',
help='Run with ssl enabled.',
type=bool,
nargs='?',
default=False)
@classmethod
def wrap(cls, app):
host, port = cls.parse_args()
if host:
from wsgiref.simple_server import make_server
server = make_server(host, port, app)
if cls._argument_parser.parse_args().ssl:
# Set HTTPS='1' makes wsgiref set wsgi.url_scheme='https'
# This in turn makes pyramid set request.scheme='https'
server.base_environ['HTTPS'] = '1'
with tempfile.TemporaryDirectory() as td:
# Generate temporary self-signed cert/key pair
# using the library used by Flask for 'adhoc' ssl_context
certpath = '{}/liveandletdie'.format(td)
| |
<filename>netket/operator/_hamiltonian.py
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Sequence, Union
from numba import jit
import numpy as np
import math
from netket.graph import AbstractGraph, Graph
from netket.hilbert import AbstractHilbert, Fock
from netket.utils.types import DType
from . import spin, boson
from ._local_operator import LocalOperator
from ._graph_operator import GraphOperator
from ._discrete_operator import DiscreteOperator
from ._lazy import Squared
class SpecialHamiltonian(DiscreteOperator):
def to_local_operator(self):
raise NotImplementedError(
"Must implemented to_local_operator for {}".format(type(self))
)
def conjugate(self, *, concrete: bool = True):
return self.to_local_operator().conjugate(concrete=concrete)
def __add__(self, other):
if type(self) is type(other):
res = self.copy()
res += other
return res
return self.to_local_operator().__add__(other)
def __sub__(self, other):
if type(self) is type(other):
res = self.copy()
res -= other
return res
return self.to_local_operator().__sub__(other)
def __radd__(self, other):
if type(self) is type(other):
res = self.copy()
res += other
return res
return self.to_local_operator().__radd__(other)
def __rsub__(self, other):
if type(self) is type(other):
res = self.copy()
res -= other
return res
return self.to_local_operator().__rsub__(other)
def __iadd__(self, other):
if type(self) is type(other):
self._iadd_same_hamiltonian(other)
return self
return NotImplemented
def __isub__(self, other):
if type(self) is type(other):
self._isub_same_hamiltonian(other)
return self
return NotImplemented
def __mul__(self, other):
return self.to_local_operator().__mul__(other)
def __rmul__(self, other):
return self.to_local_operator().__rmul__(other)
def _op__matmul__(self, other):
return self.to_local_operator().__matmul__(other)
def _op__rmatmul__(self, other):
if self == other and self.is_hermitian:
return Squared(self)
return self.to_local_operator().__matmul__(other)
def _concrete_matmul_(self, other):
return self.to_local_operator() @ other
class Ising(SpecialHamiltonian):
r"""
The Transverse-Field Ising Hamiltonian :math:`-h\sum_i \sigma_i^{(x)} +J\sum_{\langle i,j\rangle} \sigma_i^{(z)}\sigma_j^{(z)}`.
This implementation is considerably faster than the Ising hamiltonian constructed by summing :class:`~netket.operator.LocalOperator` s.
"""
def __init__(
self,
hilbert: AbstractHilbert,
graph: AbstractGraph,
h: float,
J: float = 1.0,
dtype: DType = float,
):
r"""
Constructs the Ising Operator from an hilbert space and a
graph specifying the connectivity.
Args:
hilbert: Hilbert space the operator acts on.
h: The strength of the transverse field.
J: The strength of the coupling. Default is 1.0.
dtype: The dtype of the matrix elements.
Examples:
Constructs an ``Ising`` operator for a 1D system.
>>> import netket as nk
>>> g = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)
>>> hi = nk.hilbert.Spin(s=0.5, N=g.n_nodes)
>>> op = nk.operator.Ising(h=1.321, hilbert=hi, J=0.5, graph=g)
>>> print(op)
Ising(J=0.5, h=1.321; dim=20)
"""
assert (
graph.n_nodes == hilbert.size
), "The size of the graph must match the hilbert space"
super().__init__(hilbert)
self._h = dtype(h)
self._J = dtype(J)
self._edges = np.asarray(
[[u, v] for u, v in graph.edges()],
dtype=np.intp,
)
self._dtype = dtype
@property
def h(self) -> float:
"""The magnitude of the transverse field"""
return self._h
@property
def J(self) -> float:
"""The magnitude of the hopping"""
return self._J
@property
def edges(self) -> np.ndarray:
return self._edges
@property
def is_hermitian(self) -> bool:
return True
@property
def dtype(self) -> DType:
return self._dtype
def conjugate(self, *, concrete=True):
# if real
if isinstance(self.h, float) and isinstance(self.J, float):
return self
else:
raise NotImplementedError
@staticmethod
@jit(nopython=True)
def n_conn(x, out):
r"""Return the number of states connected to x.
Args:
x (matrix): A matrix of shape (batch_size,hilbert.size) containing
the batch of quantum numbers x.
out (array): If None an output array is allocated.
Returns:
array: The number of connected states x' for each x[i].
"""
if out is None:
out = np.empty(
x.shape[0],
dtype=np.int32,
)
out.fill(x.shape[1] + 1)
return out
@property
def max_conn_size(self) -> int:
"""The maximum number of non zero ⟨x|O|x'⟩ for every x."""
return self.size + 1
def copy(self):
graph = Graph(edges=[list(edge) for edge in self.edges])
return Ising(hilbert=self.hilbert, graph=graph, J=self.J, h=self.h)
def to_local_operator(self):
# The hamiltonian
ha = LocalOperator(self.hilbert, dtype=self.dtype)
if self.h != 0:
for i in range(self.hilbert.size):
ha -= self.h * spin.sigmax(self.hilbert, i)
if self.J != 0:
for (i, j) in self.edges:
ha += self.J * (
spin.sigmaz(self.hilbert, i) * spin.sigmaz(self.hilbert, j)
)
return ha
def _iadd_same_hamiltonian(self, other):
if self.hilbert != other.hilbert:
raise NotImplementedError(
"Cannot add hamiltonians on different hilbert spaces"
)
self._h += other.h
self._J += other.J
def _isub_same_hamiltonian(self, other):
if self.hilbert != other.hilbert:
raise NotImplementedError(
"Cannot add hamiltonians on different hilbert spaces"
)
self._h -= other.h
self._J -= other.J
@staticmethod
@jit(nopython=True)
def _flattened_kernel(
x,
sections,
edges,
h,
J,
):
n_sites = x.shape[1]
n_conn = n_sites + 1
x_prime = np.empty(
(
x.shape[0] * n_conn,
n_sites,
)
)
mels = np.empty(x.shape[0] * n_conn, dtype=type(h))
diag_ind = 0
for i in range(x.shape[0]):
mels[diag_ind] = 0.0
for k in range(edges.shape[0]):
mels[diag_ind] += J * x[i, edges[k, 0]] * x[i, edges[k, 1]]
odiag_ind = 1 + diag_ind
mels[odiag_ind : (odiag_ind + n_sites)].fill(-h)
x_prime[diag_ind : (diag_ind + n_conn)] = np.copy(x[i])
for j in range(n_sites):
x_prime[j + odiag_ind][j] *= -1.0
diag_ind += n_conn
sections[i] = diag_ind
return x_prime, mels
def get_conn_flattened(
self,
x,
sections,
pad=False,
):
r"""Finds the connected elements of the Operator. Starting
from a given quantum number x, it finds all other quantum numbers x' such
that the matrix element :math:`O(x,x')` is different from zero. In general there
will be several different connected states x' satisfying this
condition, and they are denoted here :math:`x'(k)`, for :math:`k=0,1...N_{\mathrm{connected}}`.
This is a batched version, where x is a matrix of shape (batch_size,hilbert.size).
Args:
x (matrix): A matrix of shape (batch_size,hilbert.size) containing
the batch of quantum numbers x.
sections (array): An array of size (batch_size) useful to unflatten
the output of this function.
See numpy.split for the meaning of sections.
pad (bool): no effect here
Returns:
matrix: The connected states x', flattened together in a single matrix.
array: An array containing the matrix elements :math:`O(x,x')` associated to each x'.
"""
return self._flattened_kernel(
np.asarray(x),
sections,
self._edges,
self._h,
self._J,
)
def _get_conn_flattened_closure(self):
_edges = self._edges
_h = self._h
_J = self._J
fun = self._flattened_kernel
def gccf_fun(x, sections):
return fun(x, sections, _edges, _h, _J)
return jit(nopython=True)(gccf_fun)
def __repr__(self):
return f"Ising(J={self._J}, h={self._h}; dim={self.hilbert.size})"
class Heisenberg(GraphOperator):
r"""
The Heisenberg hamiltonian on a lattice.
"""
def __init__(
self,
hilbert: AbstractHilbert,
graph: AbstractGraph,
J: Union[float, Sequence[float]] = 1.0,
sign_rule=None,
*,
acting_on_subspace: Union[List[int], int] = None,
):
"""
Constructs an Heisenberg operator given a hilbert space and a graph providing the
connectivity of the lattice.
Args:
hilbert: Hilbert space the operator acts on.
graph: The graph upon which this hamiltonian is defined.
J: The strength of the coupling. Default is 1.
Can pass a sequence of coupling strengths with coloured graphs:
edges of colour n will have coupling strength J[n]
sign_rule: If True, Marshal's sign rule will be used. On a bipartite
lattice, this corresponds to a basis change flipping the Sz direction
at every odd site of the lattice. For non-bipartite lattices, the
sign rule cannot be applied. Defaults to True if the lattice is
bipartite, False otherwise.
If a sequence of coupling strengths is passed, defaults to False
and a matching sequence of sign_rule must be specified to override it
acting_on_subspace: Specifies the mapping between nodes of the graph and
Hilbert space sites, so that graph node :code:`i ∈ [0, ..., graph.n_nodes - 1]`,
corresponds to :code:`acting_on_subspace[i] ∈ [0, ..., hilbert.n_sites]`.
Must be a list of length `graph.n_nodes`. Passing a single integer :code:`start`
is equivalent to :code:`[start, ..., start + graph.n_nodes - 1]`.
Examples:
Constructs a ``Heisenberg`` operator for a 1D system.
>>> import netket as nk
>>> g = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)
>>> hi = nk.hilbert.Spin(s=0.5, total_sz=0, N=g.n_nodes)
>>> op = nk.operator.Heisenberg(hilbert=hi, graph=g)
>>> print(op)
Heisenberg(J=1.0, sign_rule=True; dim=20)
"""
if isinstance(J, Sequence):
# check that the number of Js matches the number of colours
assert len(J) == max(graph.edge_colors) + 1
if sign_rule is None:
sign_rule = [False] * len(J)
else:
assert len(sign_rule) == len(J)
for i in range(len(J)):
subgraph = Graph(edges=graph.edges(filter_color=i))
if sign_rule[i] and not subgraph.is_bipartite():
raise ValueError(
"sign_rule=True specified for a non-bipartite lattice"
)
else:
if sign_rule is None:
sign_rule | |
import pygame, pygcurse
from pygame.locals import *
import LED_display as LD
import HC_SR04 as RS
import threading
import time
import copy
import os
t=threading.Thread(target=LD.main, args=())
t.setDaemon(True)
t.start()
WINWIDTH = 32
WINHEIGHT = 16
FPS = 60
mode_list = ['mouse', 'keyboard', 'sensor']
mode = mode_list[2]
isfullscreen = False
if mode == 'mouse':
isfullscreen = True
iScreen =[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# - Pygcurse board
win = pygcurse.PygcurseWindow(32, 16, fullscreen=isfullscreen)
def main():
# os.system('cls' if os.name == 'nt' else 'clear')
mainClock = pygame.time.Clock()
newGame = True
while True:
oScreen = copy.deepcopy(iScreen)
win.fill('@', fgcolor='black', bgcolor='black')
if newGame:
pygame.mouse.set_pos(win.centerx * win.cellwidth, (win.bottom) * win.cellheight)
mousex, mousey = pygame.mouse.get_pos()
before_cellx_arr = [WINWIDTH//2] * 3
cellx, celly = WINWIDTH//2, WINHEIGHT -2
ballx, bally = cellx, WINHEIGHT - 3
ball_direction = {'NW' : [-1, -1], 'N' : [0, -1], 'NE' : [1, -1], 'SW' : [-1, 1], 'S' : [0, 1], 'SE' : [1, 1]}
ballspeed = 2
ballspeed = 4 - ballspeed
ballcnt = 0
ballmv = ball_direction['N']
bricks = [[1,1], [6,1], [11,1], [16,1], [21, 1], [26,1], [3,4], [8,4], [13,4], [18,4], [23,4]]
if mode == 'keyboard':
counter = 0
moveRight = False
moveLeft = False
gameOver = False
newGame = False
if mode == 'sensor':
if not gameOver:
cellx = RS.get_distance()-10
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE) or (event.type == KEYDOWN and event.key == ord('q')):
pygame.quit()
sys.exit()
# Input
# mouse mode
if mode == 'mouse':
if event.type == MOUSEMOTION and not gameOver:
mousex, mousey = event.pos
cellx, celly = win.getcoordinatesatpixel(mousex, mousey)
celly = WINHEIGHT - 2
# keyboard mode
elif mode == 'keyboard':
if event.type == KEYDOWN:
if event.key == K_LEFT or event.key == ord('a'):
if not gameOver:
cellx -= 1
counter += 1
moveRight = False
moveLeft = True
if event.key == K_RIGHT or event.key == ord('d'):
if not gameOver:
cellx += 1
counter += 1
moveLeft == False
moveRight = True
if event.type == KEYUP:
if event.key == K_LEFT or event.key == ord('a'):
counter = 0
moveLeft = False
if event.key == K_RIGHT or event.key == ord('d'):
counter = 0
moveRight = False
# Act
drawBricks(bricks, oScreen)
if mode == 'sensor':
if not gameOver:
if cellx < 1 or cellx > WINWIDTH -2:
cellx = before_cellx_arr[-1]
if abs(cellx - before_cellx_arr[-1]) > 15:
cellx = before_cellx_arr[-1]
else:
cellx = sum(before_cellx_arr[1:]+[cellx],1)//len(before_cellx_arr)
before_cellx_arr.pop(0)
before_cellx_arr.append(cellx)
cellx = before_cellx_arr[-1]
elif mode == 'keyboard':
if not gameOver:
if counter > 2:
if moveRight: cellx += 1
if moveLeft: cellx -= 1
elif moveRight or moveLeft:
counter += 1
if cellx < 2:
cellx = 2
if cellx > WINWIDTH-3:
cellx = WINWIDTH-3
elif mode == 'mouse':
if cellx < 2:
cellx = 2
if cellx > WINWIDTH-3:
cellx = WINWIDTH-3
# Ball check
hit = oScreen[bally][ballx]
if ballcnt == 0:
# Hit wall
if bally <= 0 or bally >= WINHEIGHT -1 or ballx <= 0 or ballx >= WINWIDTH - 1:
if bally <= 0:
ballmv = [ballmv[0], -ballmv[1]]
if ballx <= 0 or ballx >= WINWIDTH - 1:
ballmv = [-ballmv[0], ballmv[1]]
if ballx <= 0:
# GameOver
pass
# Hit brick
elif hit == 3 or hit == 4:
print('Hit!')
bricks = breakBrick(ballx, bally, oScreen, bricks)
if hit == 3 or 4:
for key, value in ball_direction.items():
if ballmv == value:
direction = key
if direction[0] == 'N':
direction = 'S' + direction[1:]
elif direction[0] == 'S':
direction = 'N' + direction[1:]
ballmv = ball_direction[direction]
# Hit pad
elif (bally == celly-1 or bally == celly) and ballx >= cellx -2 and ballx <= cellx + 2:
if ballx < cellx:
ballmv = ball_direction['NW']
elif ballx > cellx:
ballmv = ball_direction['NE']
else:
ballmv = ball_direction['N']
oScreen[bally][ballx] = 2
# Ball movement
if ballcnt >= ballspeed:
ballx += ballmv[0]
bally += ballmv[1]
ballcnt = 0
else:
ballcnt += 1
# fill matrix
# - Change oScreen matrix output in this area
# Initialize Pad
#moveBall(ballx, bally, oScreen, WINWIDTH, WINHEIGHT)
drawPad(cellx, celly, oScreen)
#ex)
# - Draw Matrix
# consoleMatrix(oScreen)
#pygcurseMatrix(oScreen)
drawMatrix(oScreen)
win.update()
# time.sleep(delay)
mainClock.tick(FPS)
# os.system('cls' if os.name == 'nt' else 'clear')
def breakBrick(x, y, screen, bricks):
for brick in bricks:
if x >= brick[0] and x <= brick[0] + 3 and y >= brick[1] and y <= brick[1] + 1:
bricks.pop(bricks.index(brick))
break
return bricks
def drawBricks(bricks, screen):
for brick in bricks:
cnt = 0
for i in range(brick[0], brick[0]+4):
cnt += 1
for j in range(brick[1], brick[1]+2):
if cnt == 1 or cnt == 4:
screen[j][i] = 4
else:
screen[j][i] = 3
def ballCheck(x, y, screen, width, height):
return ballmv
def drawPad(x, y, screen):
for i in range(x-2,x+3,1):
screen[y][i] = 1
def consoleMatrix(screen):
for i in screen:
print(i)
def pygcurseMatrix(screen):
for i in range(16):
for j in | |
<gh_stars>1-10
#!/usr/bin/env python
import json, os, string, sys, tempfile
from contextlib import nested
from distutils.core import run_setup
from django.utils.importlib import import_module
from fabric.context_managers import settings as fab_settings
from fabric.context_managers import _setenv, cd
from fabric.contrib.files import exists, comment, sed, append
from fabric.decorators import runs_once, hosts
from fabric.main import find_fabfile
from fabric.network import normalize
from fabric.operations import local, run, sudo, prompt, get, put
from fabric.state import _AttributeDict, env, output
from fabric.version import get_version
woven_env = _AttributeDict({
'HOSTS':[], #optional - a list of host strings to setup on as per Fabric
'ROLEDEFS':{}, #optional as per fabric. eg {'staging':['<EMAIL>']}
'HOST_SSH_PORT':10022, #optional - the ssh port to be setup
'HOST_USER':'', #optional - can be used in place of defining it elsewhere (ie host_string)
'HOST_PASSWORD':'',#optional
'SSH_KEY_FILENAME':'',#optional - as per fabric, a path to a key to use in place your local .ssh key
#The first setup task is usually disabling the default root account and changing the ssh port.
'ROOT_USER':'root', #optional - mostly the default administrative account is root
'DISABLE_ROOT': False, #optional - disable the default administrative account
'ROOT_PASSWORD':'', #optional - blank by default
'DEFAULT_SSH_PORT':22, #optional - The default ssh port, prior to woven changing it. Defaults to 22
'DISABLE_SSH_PASSWORD': False, #optional - setting this to true will disable password login and use ssh keys only.
'ENABLE_UFW':True, #optional - If some alternative firewall is already pre-installed
#optional - the default firewall rules (note ssh is always allowed)
'UFW_RULES':['allow 80,443/tcp'],
'ROLE_UFW_RULES':{},
#The default packages that are setup. It is NOT recommended you change these:
'HOST_BASE_PACKAGES':[
'ufw', #firewall
'subversion','git-core','mercurial','bzr', #version control
'gcc','build-essential', 'python-dev', 'python-setuptools', #build
'apache2','libapache2-mod-wsgi','nginx', #webservers
'python-imaging', #pil
'python-psycopg2','python-mysqldb','python-pysqlite2'], #default database drivers
'HOST_EXTRA_PACKAGES':[], #optional - additional packages as required
'ROLE_PACKAGES':{},#define ROLEDEFS packages instead of using HOST_BASE_PACKAGES + HOST_EXTRA_PACKAGES
#Apache list of modules to disable for performance and memory efficiency
#This list gets disabled
'APACHE_DISABLE_MODULES':['alias','auth_basic','authn_file','authz_default','authz_groupfile',
'authz_user','autoindex','cgid','dir',
'setenvif','status'],
#Specify a linux base backend to use. Not yet implemented
#'LINUX_BASE':'debian',
#define a list of repositories/sources to search for packages
'LINUX_PACKAGE_REPOSITORIES':[], # eg ppa:bchesneau/gunicorn
#Virtualenv/Pip
'DEPLOYMENT_ROOT':'',
'PROJECT_APPS_PATH':'',#a relative path from the project package directory for any local apps
'PIP_REQUIREMENTS':[], #a list of pip requirement and or pybundle files to use for installation
#Application media
'STATIC_URL':'', #optional
'STATIC_ROOT':'', #optional
#Database migrations
'MANUAL_MIGRATION':False, #optional Manage database migrations manually
})
def _parse_project_version(version=''):
"""
Returns the significant part of the version excluding the build
The final forms returned can be
major.minor
major.minor stage (spaces will be replaced with '-')
major.minor.stage
major.minor-stage
major.minorstage (eg 1.0rc1)
major.minor.maintenance
major.minor.maintenance-stage
major.minor.maintenancestage
Anything beyond the maintenance or stage whichever is last is ignored
"""
def mm_version(vers):
stage = ''
stage_sep = ''
finalvers = ''
if not vers.isdigit():
for num,char in enumerate(vers):
if char.isdigit():
finalvers += str(char)
elif char.isalpha():
stage = vers[num:]
break
elif char in [' ','-']: #sep
#We will strip spaces to avoid needing to 'quote' paths
stage_sep = '-'
stage = vers[num+1:]
break
else:
finalvers = vers
#remove any final build numbers
if ' ' in stage:
stage = stage.split(' ')[0]
elif '-' in stage:
stage = stage.split('-')[0]
return (finalvers,stage,stage_sep)
v = version.split('.')
if len(v)==1: return v[0]
major = v[0]
minor = v[1]
maint = ''
stage = ''
if len(v)>2 and v[2]<>'0': #(1.0.0 == 1.0)
maint = v[2]
if len(v)>3 and v[3][0].isalpha():
stage = v[3]
project_version = '.'.join([major,minor,maint,stage])
else:
#Detect stage in minor
minor,stage_minor,stage_minor_sep = mm_version(minor)
if maint: #may be maint = ''
maint, stage_maint, stage_maint_sep = mm_version(maint)
else:
stage_maint = ''; stage_maint_sep = ''
if stage_minor:
stage = stage_minor
stage_sep = stage_minor_sep
elif stage_maint:
stage = stage_maint
stage_sep = stage_maint_sep
finalvers = [major,minor]
if maint: finalvers.append(maint)
finalvers = '.'.join(finalvers)
if stage:
finalvers = stage_sep.join([finalvers,stage])
project_version = finalvers
return project_version
def _root_domain():
"""
Deduce the root domain name - usually a 'naked' domain.
This only needs to be done prior to the first deployment
"""
if not hasattr(env,'root_domain'):
cwd = os.getcwd().split(os.sep)
domain = ''
#if the first env.host has a domain name then we'll use that
#since there are no top level domains that have numbers in them we can test env.host
username, host, port = normalize(env.hosts[0])
if host[-1] in string.ascii_letters:
domain_parts = env.host.split('.')
length = len(domain_parts)
if length==2:
#assumes .com .net etc so we want the full hostname for the domain
domain = host
elif length==3 and len(domain_parts[-1])==2:
#assume country tld so we want the full hostname for domain
domain = host
elif length >=3:
#assume the first part is the hostname of the machine
domain = '.'.join(domain[1:])
#we'll just pick the first directory in the path which has a period.
else:
for d in cwd:
if '.' in d:
domain = d
if not domain and env.INTERACTIVE:
domain = prompt('Enter the root domain for this project ',default='example.com')
else:
domain = 'example.com'
env.root_domain = domain
return env.root_domain
def check_settings():
"""
Validate the users settings conf prior to deploy
"""
valid=True
if not get_version() >= '1.0':
print "FABRIC ERROR: Woven is only compatible with Fabric < 1.0"
valid = False
if not env.MEDIA_ROOT or not env.MEDIA_URL:
print "MEDIA ERROR: You must define a MEDIA_ROOT & MEDIA_URL in your settings.py"
print "even if plan to deploy your media separately to your project"
valid = False
if not env.TEMPLATE_DIRS:
print "TEMPLATES_DIRS ERROR: You must define a TEMPLATES_DIRS in your settings.py"
valid=False
if env.DEFAULT_DATABASE_ENGINE in ['django.db.backends.','django.db.backends.dummy']:
print "DATABASE SETTINGS ERROR: The default database engine has not been defined in your settings.py file"
print "At a minimum you must define an sqlite3 database for woven to deploy,"
print "or define a database backend is managed outside of woven."
valid=False
if not valid: sys.exit(1)
def disable_virtualenvwrapper():
"""
Hack to workaround an issue with virtualenvwrapper logging caused by Fabric sudo
Can also add --noprofile to env.shell
"""
profile_path = '/'.join([deployment_root(),'.profile'])
sed(profile_path,'source /usr/local/bin/virtualenvwrapper.sh','')
def enable_virtualenvwrapper():
profile_path = '/'.join([deployment_root(),'.profile'])
append(profile_path, 'source /usr/local/bin/virtualenvwrapper.sh')
def deployment_root():
"""
deployment root varies per host based on the user
It can be overridden by the DEPLOYMENT_ROOT setting
"""
if not env.DEPLOYMENT_ROOT: return '/'.join(['/home',env.user])
else: return env.DEPLOYMENT_ROOT
def get_project_version():
return env.project_version
def set_env(settings=None, setup_dir=''):
"""
Used in management commands or at the module level of a fabfile to
integrate woven project django.conf settings into fabric, and set the local current
working directory to the distribution root (where setup.py lives).
``settings`` is your django settings module to pass in
if you want to call this from a fabric script.
``setup_dir`` is an optional path to the directory containing setup.py
This would be used in instances where setup.py was not above the cwd
This function is used to set the environment for all hosts
"""
#switch the working directory to the distribution root where setup.py is
if hasattr(env, 'setup_path') and env.setup_path:
setup_path = env.setup_path
else:
with fab_settings(fabfile='setup.py'):
if setup_dir:
setup_path = os.path.join(setup_dir,'setup.py')
else:
setup_path = find_fabfile()
if not setup_path:
print 'Error: You must have a setup.py file in the current or a parent folder'
sys.exit(1)
local_working_dir = os.path.split(setup_path)[0]
os.chdir(local_working_dir)
setup = run_setup('setup.py',stop_after="init")
if setup.get_name() == 'UNKNOWN' or setup.get_version()=='0.0.0' or not setup.packages:
print "ERROR: You must define a minimum of name, version and packages in your setup.py"
sys.exit(1)
#project env variables for deployment
env.project_name = setup.get_name() #project_name()
env.project_full_version = setup.get_version()#local('python setup.py --version').rstrip()
env.project_version = _parse_project_version(env.project_full_version)
env.project_fullname = '-'.join([env.project_name,env.project_version])
env.project_package_name = setup.packages[0]
env.patch = False
#django settings are passed in by the command
#We'll assume that if the settings aren't passed in we're running from a fabfile
if not settings:
sys.path.insert(0,local_working_dir)
#import global settings
project_settings = import_module(env.project_name+'.settings')
else:
project_settings = settings
#If sqlite is used we can manage the database on first deployment
env.DEFAULT_DATABASE_ENGINE = project_settings.DATABASES['default']['ENGINE']
env.DEFAULT_DATABASE_NAME = project_settings.DATABASES['default']['NAME']
#overwrite with main sitesettings module
#just for MEDIA_URL, ADMIN_MEDIA_PREFIX, and STATIC_URL
#if this settings file exists
try:
site_settings = import_module('.'.join([env.project_name,'sitesettings.settings']))
project_settings.MEDIA_URL = site_settings.MEDIA_URL
project_settings.ADMIN_MEDIA_PREFIX = site_settings.ADMIN_MEDIA_PREFIX
project_settings.DATABASES = site_settings.DATABASES
if hasattr(site_settings,'STATIC_URL'):
project_settings.STATIC_URL = site_settings.STATIC_URL
else:
project_settings.STATIC_URL = project_settings.ADMIN_MEDIA_PREFIX
except ImportError:
pass
#update woven_env from project_settings
local_settings = dir(project_settings)
#only get settings that woven uses
for setting in local_settings:
if setting.isupper() and hasattr(woven_env,setting):
s = getattr(project_settings,setting,'')
woven_env[setting] = s
#upate the fabric env with all the woven settings
env.update(woven_env)
#set any user/password defaults if | |
in slices:
try:
self.recover_slice(slice_obj=s)
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error("Error in recoverSlice for property list {}".format(e))
if s.is_inventory():
raise e
def recover_broker_slice(self, *, slice_obj: ABCSlice):
"""
Recover broker slice at the AM, do the following if the model.reload file is detected
- Close the existing delegations
- Create the new delegations from the reloaded ARM
- Add the delegations to the Broker Slice
@param slice_obj Slice object
"""
if self.get_type() != ActorType.Authority:
return False
if not slice_obj.is_broker_client():
return False
from fabric_cf.actor.core.container.globals import GlobalsSingleton
if not GlobalsSingleton.get().can_reload_model():
return False
self.logger.info(f"Closing old delegations and adding new delegations to the slice: {slice_obj}!")
delegation_names = []
try:
delegations = self.plugin.get_database().get_delegations(slice_id=str(slice_obj.get_slice_id()))
except Exception as e:
self.logger.error(e)
raise ActorException(f"Could not fetch delegations records for slice {slice_obj} from database")
for d in delegations:
self.logger.info(f"Closing delegation: {d}!")
d.set_graph(graph=None)
d.transition(prefix="closed as part of recovers", state=DelegationState.Closed)
delegation_names.append(d.get_delegation_name())
self.plugin.get_database().update_delegation(delegation=d)
adms = self.policy.aggregate_resource_model.generate_adms()
# Create new delegations and add to the broker slice; they will be re-registered with the policy in the recovery
for name in delegation_names:
new_delegation_graph = adms.get(name)
dlg_obj = DelegationFactory.create(did=new_delegation_graph.get_graph_id(),
slice_id=slice_obj.get_slice_id(),
delegation_name=name)
dlg_obj.set_slice_object(slice_object=slice_obj)
dlg_obj.set_graph(graph=new_delegation_graph)
dlg_obj.transition(prefix="Reload Model", state=DelegationState.Delegated)
self.plugin.get_database().add_delegation(delegation=dlg_obj)
def recover_inventory_slice(self, *, slice_obj: ABCSlice) -> bool:
"""
Check and Reload ARM for an inventory slice for an AM
@param slice_obj slice object
@return True if ARM was reloaded; otherwise False
"""
if self.get_type() != ActorType.Authority:
return False
if not slice_obj.is_inventory():
return False
# Check and Reload ARM if needed
from fabric_cf.actor.core.container.globals import GlobalsSingleton
arm_graph = GlobalsSingleton.get().check_and_reload_model(graph_id=slice_obj.get_graph_id())
if arm_graph is not None:
slice_obj.set_graph(graph=arm_graph)
return arm_graph is not None
def recover_slice(self, *, slice_obj: ABCSlice):
"""
Recover slice
@param slice_obj slice_obj
"""
slice_id = slice_obj.get_slice_id()
if self.get_slice(slice_id=slice_id) is not None:
self.logger.debug("Found slice_id: {} slice:{}".format(slice_id, slice_obj))
else:
self.logger.info("Recovering slice: {}".format(slice_id))
self.recover_inventory_slice(slice_obj=slice_obj)
self.recover_broker_slice(slice_obj=slice_obj)
self.logger.debug("Informing the plugin about the slice")
self.plugin.revisit(slice_obj=slice_obj)
self.logger.debug("Registering slice: {}".format(slice_id))
self.re_register_slice(slice_object=slice_obj)
self.logger.debug("Recovering reservations in slice: {}".format(slice_id))
self.recover_reservations(slice_obj=slice_obj)
self.logger.debug("Recovering delegations in slice: {}".format(slice_id))
self.recover_delegations(slice_obj=slice_obj)
self.logger.info("Recovery of slice {} complete".format(slice_id))
def recover_reservations(self, *, slice_obj: ABCSlice):
"""
Recover reservations
@param slice_obj slice object
"""
self.logger.info(
"Starting to recover reservations in slice {}({})".format(slice_obj.get_name(), slice_obj.get_slice_id()))
reservations = None
try:
reservations = self.plugin.get_database().get_reservations(slice_id=slice_obj.get_slice_id())
except Exception as e:
self.logger.error(e)
raise ActorException(
"Could not fetch reservation records for slice {}({}) from database".format(slice_obj.get_name(),
slice_obj.get_slice_id()))
self.logger.debug("There are {} reservations(s) in slice".format(len(reservations)))
for r in reservations:
try:
self.recover_reservation(r=r, slice_obj=slice_obj)
except Exception as e:
self.logger.error("Unexpected error while recovering reservation {}".format(e))
self.logger.info("Recovery for reservations in slice {} completed".format(slice_obj))
def recover_reservation(self, *, r: ABCReservationMixin, slice_obj: ABCSlice):
"""
Recover reservation
@param r reservation
@param slice_obj slice object
"""
try:
r.restore(actor=self, slice_obj=slice_obj)
self.logger.info(
"Found reservation # {} in state {}".format(r.get_reservation_id(), r.get_reservation_state()))
if r.is_closed():
self.logger.info("Reservation #{} is closed. Nothing to recover.".format(r.get_reservation_id()))
return
self.logger.info("Recovering reservation #{}".format(r.get_reservation_id()))
self.logger.debug("Recovering reservation object r={}".format(r))
self.logger.debug("Registering the reservation with the actor")
self.re_register(reservation=r)
self.logger.info(r)
self.logger.debug("Revisiting with the Plugin")
self.plugin.revisit(reservation=r)
self.logger.info(r)
self.logger.debug("Revisiting with the actor policy")
self.policy.revisit(reservation=r)
self.logger.info("Recovered reservation #{}".format(r.get_reservation_id()))
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error("Exception occurred in recovering reservation e={}".format(e))
raise ActorException("Could not recover Reservation #{}".format(r))
def recover_delegations(self, *, slice_obj: ABCSlice):
"""
Recover delegations for a slice
@param slice_obj slice object
"""
self.logger.info(
"Starting to recover delegations in slice {}({})".format(slice_obj.get_name(), slice_obj.get_slice_id()))
try:
delegations = self.plugin.get_database().get_delegations(slice_id=str(slice_obj.get_slice_id()))
except Exception as e:
self.logger.error(e)
raise ActorException(
"Could not fetch delegations records for slice {}({}) from database".format(slice_obj.get_name(),
slice_obj.get_slice_id()))
self.logger.debug("There are {} delegations(s) in slice".format(len(delegations)))
for d in delegations:
try:
self.logger.info("Delegation has properties: {}".format(d))
self.recover_delegation(d=d, slice_obj=slice_obj)
except Exception as e:
self.logger.error("Unexpected error while recovering delegation {}".format(e))
self.logger.info("Recovery for delegations in slice {} completed".format(slice_obj))
def recover_delegation(self, *, d: ABCDelegation, slice_obj: ABCSlice):
"""
Recover delegation
@param d delegation
@param slice_obj slice object
"""
try:
d.restore(actor=self, slice_obj=slice_obj)
self.logger.info(
"Found delegation # {} in state {}".format(d.get_delegation_id(), d.get_state_name()))
if d.is_closed():
self.logger.info("Delegation #{} is closed. Nothing to recover.".format(d.get_delegation_id()))
return
self.logger.info("Recovering delegation #{}".format(d.get_delegation_id()))
self.logger.debug("Recovering delegation object d={}".format(d))
self.logger.debug("Registering the delegation with the actor")
self.re_register_delegation(delegation=d)
self.logger.info(d)
self.logger.debug("Revisiting with the Plugin")
self.plugin.revisit(delegation=d)
self.logger.info(d)
self.logger.debug("Revisiting with the actor policy")
self.policy.revisit_delegation(delegation=d)
self.logger.info("Recovered delegation #{}".format(d.get_delegation_id()))
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error("Exception occurred in recovering delegation e={}".format(e))
raise ActorException("Could not recover delegation #{}".format(d))
def register(self, *, reservation: ABCReservationMixin):
self.wrapper.register_reservation(reservation=reservation)
def register_slice(self, *, slice_object: ABCSlice):
self.wrapper.register_slice(slice_object=slice_object)
def register_delegation(self, *, delegation: ABCDelegation):
self.wrapper.register_delegation(delegation=delegation)
def remove_reservation(self, *, reservation: ABCReservationMixin = None, rid: ID = None):
if reservation is not None:
self.wrapper.remove_reservation(rid=reservation.get_reservation_id())
if rid is not None:
self.wrapper.remove_reservation(rid=rid)
def remove_slice(self, *, slice_object: ABCSlice):
self.wrapper.remove_slice(slice_id=slice_object.get_slice_id())
def remove_slice_by_slice_id(self, *, slice_id: ID):
self.wrapper.remove_slice(slice_id=slice_id)
def re_register_delegation(self, *, delegation: ABCDelegation):
self.wrapper.re_register_delegation(delegation=delegation)
def re_register(self, *, reservation: ABCReservationMixin):
self.wrapper.re_register_reservation(reservation=reservation)
def re_register_slice(self, *, slice_object: ABCSlice):
self.wrapper.re_register_slice(slice_object=slice_object)
def issue_delayed(self):
"""
Issues delayed operations
"""
assert self.recovered
self.close_reservations(reservations=self.closing)
self.closing.clear()
def reset(self):
"""
Reset an actor
"""
self.issue_delayed()
self.policy.reset()
def set_actor_clock(self, *, clock):
"""
Set actor clock
@param clock clock
"""
self.clock = clock
def set_description(self, *, description: str):
"""
Set description
@param description description
"""
self.description = description
def set_identity(self, *, token: AuthToken):
"""
Set identity
@param token token
"""
self.identity = token
self.name = self.identity.get_name()
self.guid = token.get_guid()
def set_policy(self, *, policy):
"""
Set policy
@param policy policy
"""
self.policy = policy
def set_recovered(self, *, value: bool):
"""
Set recovered flag
@param value value
"""
self.recovered = value
def set_plugin(self, *, plugin):
"""
Set plugin
@param plugin
"""
self.plugin = plugin
def set_stopped(self, *, value: bool):
"""
Set stopped flag
@param value value
"""
self.stopped = value
def is_on_actor_thread(self) -> bool:
"""
Check if running on actor thread
@return true if running on actor thread, false otherwise
"""
result = False
try:
self.thread_lock.acquire()
result = self.thread == threading.current_thread()
finally:
self.thread_lock.release()
return result
def execute_on_actor_thread_and_wait(self, *, runnable: ABCActorRunnable):
"""
Execute an incoming action on actor thread
@param runnable incoming action/operation
"""
if self.is_on_actor_thread():
return runnable.run()
else:
status = ExecutionStatus()
event = ActorEvent(status=status, runnable=runnable)
self.queue_event(incoming=event)
with status.lock:
while not status.done:
status.lock.wait()
if status.exception is not None:
raise status.exception
return status.result
def run(self):
"""
Actor run function for actor thread
"""
try:
self.logger.info("Actor Main Thread started")
self.actor_count -= 1
self.actor_main()
except Exception as e:
self.logger.error(f"Unexpected error {e}")
self.logger.error(traceback.format_exc())
finally:
self.logger.info("Actor Main Thread exited")
def start(self):
"""
Start an Actor
"""
try:
self.thread_lock.acquire()
if self.thread is not None:
raise ActorException("This actor has already been started")
self.thread = threading.Thread(target=self.run)
self.thread.setName(self.get_name())
self.thread.setDaemon(True)
self.thread.start()
finally:
self.thread_lock.release()
self.message_service.start()
if self.plugin.get_handler_processor() is not None:
self.plugin.get_handler_processor().start()
def stop(self):
"""
Stop an actor
"""
self.stopped = True
self.message_service.stop()
try:
self.thread_lock.acquire()
temp = self.thread
self.thread = None
if temp is not None:
self.logger.warning("It seems that the actor thread is running. Interrupting it")
try:
# TODO find equivalent of interrupt
with self.actor_main_lock:
self.actor_main_lock.notify_all()
temp.join()
except Exception as e:
self.logger.error("Could not join actor thread {}".format(e))
self.logger.error(traceback.format_exc())
finally:
self.thread_lock.release()
finally:
if self.thread_lock is not None and self.thread_lock.locked():
self.thread_lock.release()
if self.plugin.get_handler_processor() is not None:
self.plugin.get_handler_processor().shutdown()
def tick_handler(self):
"""
Tick handler
"""
def handle_failed_rpc(self, *, rid: ID, rpc: FailedRPC):
"""
Handler failed rpc
"""
self.wrapper.process_failed_rpc(rid=rid, rpc=rpc)
def __str__(self):
return "actor: [{}/{}]".format(self.name, self.guid)
def unregister(self, *, reservation: ABCReservationMixin, rid: ID):
"""
Unregister reservation
@param reservation reservation
@param rid reservation id
"""
if reservation is not None:
self.wrapper.unregister_reservation(rid=reservation.get_reservation_id())
if rid is not None:
self.wrapper.unregister_reservation(rid=rid)
def unregister_slice(self, *, slice_object: ABCSlice):
"""
Unregister slice
@param slice_obj slice object
"""
self.wrapper.unregister_slice(slice_id=slice_object.get_slice_id())
def unregister_slice_by_slice_id(self, *, slice_id: ID):
"""
Unregister slice by slice id
@param slice_id slice id
"""
self.wrapper.unregister_slice(slice_id=slice_id)
def queue_timer(self, timer: ABCTimerTask):
"""
Queue an event on Actor timer queue
"""
with self.actor_main_lock:
self.timer_queue.put_nowait(timer)
self.logger.debug("Added timer to timer queue {}".format(timer.__class__.__name__))
self.actor_main_lock.notify_all()
def queue_event(self, *, incoming: ABCActorEvent):
"""
Queue an even on Actor Event Queue
"""
with self.actor_main_lock:
self.event_queue.put_nowait(incoming)
self.logger.debug("Added event to event queue {}".format(incoming.__class__.__name__))
self.actor_main_lock.notify_all()
def await_no_pending_reservations(self):
"""
Await until no pending reservations
"""
self.wrapper.await_nothing_pending()
def actor_main(self):
"""
Actor Main loop
"""
while True:
events = []
timers = []
with self.actor_main_lock:
while self.event_queue.empty() and self.timer_queue.empty() and not self.stopped:
try:
self.actor_main_lock.wait()
except InterruptedError as e:
self.logger.info("Actor thread interrupted. Exiting")
return
if self.stopped:
self.logger.info("Actor exiting")
return
if not self.event_queue.empty():
try:
for event in IterableQueue(source_queue=self.event_queue):
events.append(event)
except Exception as e:
self.logger.error(f"Error while adding event to event queue! e: {e}")
self.logger.error(traceback.format_exc())
if not self.timer_queue.empty():
try:
for timer in IterableQueue(source_queue=self.timer_queue):
timers.append(timer)
except Exception as | |
<reponame>jhill1/thetis
r"""
3D advection diffusion equation for tracers.
The advection-diffusion equation of tracer :math:`T` in conservative form reads
.. math::
\frac{\partial T}{\partial t}
+ \nabla_h \cdot (\textbf{u} T)
+ \frac{\partial (w T)}{\partial z}
= \nabla_h \cdot (\mu_h \nabla_h T)
+ \frac{\partial}{\partial z} \Big(\mu \frac{\partial T}{\partial z}\Big)
:label: tracer_eq
where :math:`\nabla_h` denotes horizontal gradient, :math:`\textbf{u}` and
:math:`w` are the horizontal and vertical velocities, respectively, and
:math:`\mu_h` and :math:`\mu` denote horizontal and vertical diffusivity.
"""
from __future__ import absolute_import
from .utility import *
from .equation import Term, Equation
__all__ = [
'TracerEquation',
'TracerTerm',
'HorizontalAdvectionTerm',
'VerticalAdvectionTerm',
'HorizontalDiffusionTerm',
'VerticalDiffusionTerm',
'SourceTerm',
]
class TracerTerm(Term):
"""
Generic tracer term that provides commonly used members and mapping for
boundary functions.
"""
def __init__(self, function_space,
bathymetry=None, v_elem_size=None, h_elem_size=None,
use_symmetric_surf_bnd=True, use_lax_friedrichs=True,
sipg_factor=Constant(1.0),
sipg_factor_vertical=Constant(1.0)):
"""
:arg function_space: :class:`FunctionSpace` where the solution belongs
:kwarg bathymetry: bathymetry of the domain
:type bathymetry: 3D :class:`Function` or :class:`Constant`
:kwarg v_elem_size: scalar :class:`Function` that defines the vertical
element size
:kwarg h_elem_size: scalar :class:`Function` that defines the horizontal
element size
:kwarg bool use_symmetric_surf_bnd: If True, use symmetric surface boundary
condition in the horizontal advection term
:kwarg sipg_factor: :class: `Constant` or :class: `Function` horizontal SIPG penalty scaling factor
:kwarg sipg_factor_vertical: :class: `Constant` or :class: `Function` vertical SIPG penalty scaling factor
"""
super(TracerTerm, self).__init__(function_space)
self.bathymetry = bathymetry
self.h_elem_size = h_elem_size
self.v_elem_size = v_elem_size
continuity = element_continuity(self.function_space.ufl_element())
self.horizontal_dg = continuity.horizontal == 'dg'
self.vertical_dg = continuity.vertical == 'dg'
self.use_symmetric_surf_bnd = use_symmetric_surf_bnd
self.use_lax_friedrichs = use_lax_friedrichs
self.sipg_factor = sipg_factor
self.sipg_factor_vertical = sipg_factor_vertical
# define measures with a reasonable quadrature degree
p, q = self.function_space.ufl_element().degree()
self.quad_degree = (2*p + 1, 2*q + 1)
self.dx = dx(degree=self.quad_degree)
self.dS_h = dS_h(degree=self.quad_degree)
self.dS_v = dS_v(degree=self.quad_degree)
self.ds = ds(degree=self.quad_degree)
self.ds_surf = ds_surf(degree=self.quad_degree)
self.ds_bottom = ds_bottom(degree=self.quad_degree)
def get_bnd_functions(self, c_in, uv_in, elev_in, bnd_id, bnd_conditions):
"""
Returns external values of tracer and uv for all supported
boundary conditions.
Volume flux (flux) and normal velocity (un) are defined positive out of
the domain.
:arg c_in: Internal value of tracer
:arg uv_in: Internal value of horizontal velocity
:arg elev_in: Internal value of elevation
:arg bnd_id: boundary id
:type bnd_id: int
:arg bnd_conditions: dict of boundary conditions:
``{bnd_id: {field: value, ...}, ...}``
"""
funcs = bnd_conditions.get(bnd_id)
if 'elev' in funcs:
elev_ext = funcs['elev']
else:
elev_ext = elev_in
if 'value' in funcs:
c_ext = funcs['value']
else:
c_ext = c_in
if 'uv' in funcs:
uv_ext = funcs['uv']
elif 'flux' in funcs:
assert self.bathymetry is not None
h_ext = elev_ext + self.bathymetry
area = h_ext*self.boundary_len # NOTE using external data only
uv_ext = funcs['flux']/area*self.normal
elif 'un' in funcs:
uv_ext = funcs['un']*self.normal
else:
uv_ext = uv_in
return c_ext, uv_ext, elev_ext
class HorizontalAdvectionTerm(TracerTerm):
r"""
Horizontal advection term :math:`\nabla_h \cdot (\textbf{u} T)`
The weak formulation reads
.. math::
\int_\Omega \nabla_h \cdot (\textbf{u} T) \phi dx
= -\int_\Omega T\textbf{u} \cdot \nabla_h \phi dx
+ \int_{\mathcal{I}_h\cup\mathcal{I}_v}
T^{\text{up}} \text{avg}(\textbf{u}) \cdot
\text{jump}(\phi \textbf{n}_h) dS
where the right hand side has been integrated by parts;
:math:`\mathcal{I}_h,\mathcal{I}_v` denote the set of horizontal and
vertical facets,
:math:`\textbf{n}_h` is the horizontal projection of the unit normal vector,
:math:`T^{\text{up}}` is the upwind value, and :math:`\text{jump}` and
:math:`\text{avg}` denote the jump and average operators across the
interface.
"""
def residual(self, solution, solution_old, fields, fields_old, bnd_conditions=None):
if fields_old.get('uv_3d') is None:
return 0
elev = fields_old['elev_3d']
uv = fields_old['uv_3d']
uv_depth_av = fields_old['uv_depth_av']
if uv_depth_av is not None:
uv = uv + uv_depth_av
# FIXME is this an option?
lax_friedrichs_factor = fields_old.get('lax_friedrichs_tracer_scaling_factor')
f = 0
f += -solution*inner(uv, nabla_grad(self.test))*self.dx
if self.horizontal_dg:
# add interface term
uv_av = avg(uv)
un_av = (uv_av[0]*self.normal('-')[0]
+ uv_av[1]*self.normal('-')[1])
s = 0.5*(sign(un_av) + 1.0)
c_up = solution('-')*s + solution('+')*(1-s)
f += c_up*(uv_av[0]*jump(self.test, self.normal[0])
+ uv_av[1]*jump(self.test, self.normal[1])
+ uv_av[2]*jump(self.test, self.normal[2]))*(self.dS_v)
f += c_up*(uv_av[0]*jump(self.test, self.normal[0])
+ uv_av[1]*jump(self.test, self.normal[1])
+ uv_av[2]*jump(self.test, self.normal[2]))*(self.dS_h)
# Lax-Friedrichs stabilization
if self.use_lax_friedrichs:
gamma = 0.5*abs(un_av)*lax_friedrichs_factor
f += gamma*dot(jump(self.test), jump(solution))*(self.dS_v + self.dS_h)
if bnd_conditions is not None:
for bnd_marker in self.boundary_markers:
funcs = bnd_conditions.get(bnd_marker)
ds_bnd = ds_v(int(bnd_marker), degree=self.quad_degree)
if funcs is None:
continue
else:
c_in = solution
c_ext, uv_ext, eta_ext = self.get_bnd_functions(c_in, uv, elev, bnd_marker, bnd_conditions)
# add interior tracer flux
f += c_in*(uv[0]*self.normal[0]
+ uv[1]*self.normal[1])*self.test*ds_bnd
# add boundary contribution if inflow
uv_av = 0.5*(uv + uv_ext)
un_av = self.normal[0]*uv_av[0] + self.normal[1]*uv_av[1]
s = 0.5*(sign(un_av) + 1.0)
f += (1-s)*(c_ext - c_in)*un_av*self.test*ds_bnd
if self.use_symmetric_surf_bnd:
f += solution*(uv[0]*self.normal[0] + uv[1]*self.normal[1])*self.test*ds_surf
return -f
class VerticalAdvectionTerm(TracerTerm):
r"""
Vertical advection term :math:`\partial (w T)/(\partial z)`
The weak form reads
.. math::
\int_\Omega \frac{\partial (w T)}{\partial z} \phi dx
= - \int_\Omega T w \frac{\partial \phi}{\partial z} dx
+ \int_{\mathcal{I}_v} T^{\text{up}} \text{avg}(w) \text{jump}(\phi n_z) dS
where the right hand side has been integrated by parts;
:math:`\mathcal{I}_v` denotes the set of vertical facets,
:math:`n_z` is the vertical projection of the unit normal vector,
:math:`T^{\text{up}}` is the
upwind value, and :math:`\text{jump}` and :math:`\text{avg}` denote the
jump and average operators across the interface.
In the case of ALE moving mesh we substitute :math:`w` with :math:`w - w_m`,
:math:`w_m` being the mesh velocity.
"""
def residual(self, solution, solution_old, fields, fields_old, bnd_conditions=None):
w = fields_old.get('w')
if w is None:
return 0
w_mesh = fields_old.get('w_mesh')
lax_friedrichs_factor = fields_old.get('lax_friedrichs_tracer_scaling_factor')
vertvelo = w[2]
if w_mesh is not None:
vertvelo = w[2] - w_mesh
f = 0
f += -solution*vertvelo*Dx(self.test, 2)*self.dx
if self.vertical_dg:
w_av = avg(vertvelo)
s = 0.5*(sign(w_av*self.normal[2]('-')) + 1.0)
c_up = solution('-')*s + solution('+')*(1-s)
f += c_up*w_av*jump(self.test, self.normal[2])*self.dS_h
if self.use_lax_friedrichs:
# Lax-Friedrichs
gamma = 0.5*abs(w_av*self.normal('-')[2])*lax_friedrichs_factor
f += gamma*dot(jump(self.test), jump(solution))*self.dS_h
# NOTE Bottom impermeability condition is naturally satisfied by the definition of w
# NOTE imex solver fails with this in tracerBox example
f += solution*vertvelo*self.normal[2]*self.test*self.ds_surf
return -f
class HorizontalDiffusionTerm(TracerTerm):
r"""
Horizontal diffusion term :math:`-\nabla_h \cdot (\mu_h \nabla_h T)`
Using the symmetric interior penalty method the weak form becomes
.. math::
\int_\Omega \nabla_h \cdot (\mu_h \nabla_h T) \phi dx
=& -\int_\Omega \mu_h (\nabla_h \phi) \cdot (\nabla_h T) dx \\
&+ \int_{\mathcal{I}_h\cup\mathcal{I}_v} \text{jump}(\phi \textbf{n}_h)
\cdot \text{avg}(\mu_h \nabla_h T) dS
+ \int_{\mathcal{I}_h\cup\mathcal{I}_v} \text{jump}(T \textbf{n}_h)
\cdot \text{avg}(\mu_h \nabla \phi) dS \\
&- \int_{\mathcal{I}_h\cup\mathcal{I}_v} \sigma \text{avg}(\mu_h) \text{jump}(T \textbf{n}_h) \cdot
\text{jump}(\phi \textbf{n}_h) dS
where :math:`\sigma` is a penalty parameter, see Hillewaert (2013).
<NAME> (2013). Development of the discontinuous Galerkin method
for high-resolution, large scale CFD and acoustics in industrial
geometries. PhD Thesis. Université catholique de Louvain.
https://dial.uclouvain.be/pr/boreal/object/boreal:128254/
"""
def residual(self, solution, solution_old, fields, fields_old, bnd_conditions=None):
if fields_old.get('diffusivity_h') is None:
return 0
diffusivity_h = fields_old['diffusivity_h']
sipg_factor = self.sipg_factor
diff_tensor = as_matrix([[diffusivity_h, 0, 0],
[0, diffusivity_h, 0],
[0, 0, 0]])
grad_test = grad(self.test)
diff_flux = dot(diff_tensor, grad(solution))
f = 0
f += inner(grad_test, diff_flux)*self.dx
if self.horizontal_dg:
h_cell = self.mesh.ufl_cell().sub_cells()[0]
p, q = self.function_space.ufl_element().degree()
cp = (p + 1) * (p + 2) / 2 if h_cell == triangle else (p + 1)**2
# by default the factor is multiplied by 2 to ensure convergence
sigma = cp * FacetArea(self.mesh) / CellVolume(self.mesh)
sp = sigma('+')
sm = sigma('-')
sigma_max = sipg_factor * conditional(sp > sm, sp, sm)
ds_interior = (self.dS_h + self.dS_v)
f += sigma_max * inner(
jump(self.test, self.normal),
dot(avg(diff_tensor), jump(solution, self.normal))
)*ds_interior
f += -inner(avg(dot(diff_tensor, grad(self.test))),
jump(solution, self.normal))*ds_interior
f += -inner(jump(self.test, self.normal),
avg(dot(diff_tensor, grad(solution))))*ds_interior
# symmetric bottom boundary condition
# NOTE introduces a flux through the bed - breaks mass conservation
f += - inner(diff_flux, self.normal)*self.test*self.ds_bottom
f += - inner(diff_flux, self.normal)*self.test*self.ds_surf
return -f
class VerticalDiffusionTerm(TracerTerm):
r"""
Vertical diffusion term :math:`-\frac{\partial}{\partial z} \Big(\mu \frac{T}{\partial z}\Big)`
Using the symmetric interior penalty method the weak form becomes
.. math::
\int_\Omega \frac{\partial}{\partial z} \Big(\mu \frac{T}{\partial z}\Big) \phi dx
=& -\int_\Omega \mu \frac{\partial T}{\partial z} \frac{\partial \phi}{\partial z} dz \\
&+ \int_{\mathcal{I}_{h}} \text{jump}(\phi n_z) \text{avg}\Big(\mu \frac{\partial T}{\partial z}\Big) dS
+ \int_{\mathcal{I}_{h}} \text{jump}(T n_z) \text{avg}\Big(\mu \frac{\partial \phi}{\partial z}\Big) dS \\
&- \int_{\mathcal{I}_{h}} \sigma \text{avg}(\mu) \text{jump}(T n_z) \cdot
\text{jump}(\phi n_z) dS
where :math:`\sigma` is a penalty parameter, see Hillewaert (2013).
Hillewaert, Koen (2013). Development of the discontinuous Galerkin method
for high-resolution, large scale CFD and acoustics in industrial
geometries. PhD Thesis. Université catholique de Louvain.
https://dial.uclouvain.be/pr/boreal/object/boreal:128254/
"""
def residual(self, solution, solution_old, fields, fields_old, bnd_conditions=None):
if fields_old.get('diffusivity_v') is None:
return 0
diffusivity_v = fields_old['diffusivity_v']
sipg_factor = self.sipg_factor_vertical
grad_test = Dx(self.test, 2)
diff_flux = | |
<reponame>twopis/twopis
# -*- coding: utf-8 -*-
# Code for creating many graphs
import numpy as np
import json
import copy
from scipy.stats import beta, linregress
import matplotlib.patches as mpatches
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
import ptitprince as pt
from sklearn import decomposition
# For Regression
import statsmodels.api as sm
import utils
import tsne
# version 0.3.8
# import umap
# Object for storing a dataset
class Dataset:
def __init__(self, data, target):
self.data = data
self.target = target
# Initialize a standard chart and return the figure
def initStandardChart(saveOutput):
# set figure size
plt.clf()
if saveOutput:
fig = plt.figure()
fig.set_size_inches((11.), (8.5))
else:
fig = plt.figure(1, figsize=(8, 6))
return fig
# Initialize a chart with two subplots and return the figure and axes
def init2SubplotChart(saveOutput):
# set figure size
plt.clf()
if saveOutput:
fig, axes = plt.subplots(2)
fig.set_size_inches((8.5), (11))
else:
fig, axes = plt.subplots(2)
return fig, axes
# If saveOutput, save the chart at filename; else, display it.
def finishChart(saveOutput, filename):
if saveOutput:
utils.check_and_create_path(filename)
pp = PdfPages(filename)
pp.savefig()
pp.close()
else:
plt.show()
plt.close()
# Fit using statsmodel
def runRegression(regrX, regrY):
regrX = sm.add_constant(regrX)
regrModel = sm.OLS(regrY,regrX)
regr = regrModel.fit()
slope = regr.params[1]
intercept = regr.params[0]
r2 = regr.rsquared
# F Value
fval = regr.fvalue
# Degrees of freedom
df = regr.df_resid
pval = regr.pvalues[1]
return (slope, intercept, r2, fval, df, pval)
# given a list of colors with r/g/b specified in the 0-255 range, convert them to the 0-1 range
def getPercentColors(predefinedColors):
return list(map(lambda c: list(map(lambda v: v/255.0, c)), predefinedColors))
# darken/lighten an r, g, b tuple expressed in the 0-1 range
def adjustColor(color, adjust):
r, g, b = color
r = max(0, min(r*adjust, 1))
g = max(0, min(g*adjust, 1))
b = max(0, min(b*adjust, 1))
return (r, g, b)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# do PCA down to 3 components, then visualize
# Portions of this adapted from code written by <NAME>
# dataSet is the list of data to train on, with names as an array of names for those feature vectors
# includeNames is true if we want to print the names in the figure
# saveOutput is true if we want to save the feature to a file rather than view it
def pca3Viz(dataSet, names, includeNames, saveOutput, saveDir):
np.random.seed(5)
X = dataSet.data
# train PCA
pca = decomposition.PCA(n_components=3)
pca.fit(X)
# transform the training and test data
X = pca.transform(X)
baseSaveDir = saveDir
for t in dataSet.target:
fig = initStandardChart(saveOutput)
saveDir = baseSaveDir + t["name"]
y = t["target"]
# set 3d axes
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
# if we include names, print them
if (includeNames):
for i in range(len(X)):
name = names[i]
ax.text3D(X[i, 0], X[i, 1], (X[i, 2] + 0.01),
name, horizontalalignment='center', size=8)
# plot the data.
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.get_cmap("Set1"))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
# save or show the data
if includeNames:
labelText = "_labels"
else:
labelText = "_no_labels"
filename = saveDir + "pca3D" + labelText + ".pdf"
finishChart(saveOutput, filename)
# do PCA down to 2 components, then visualize
# Portions of this adapted from code written by <NAME>
# dataSet is the list of data to train on, with names as an array of names for those feature vectors
# includeNames is true if we want to print the names in the figure
# saveOutput is true if we want to save the feature to a file rather than view it
# oneThree is true if we are examining axes 1 and 3 rather than 1 and 2
def pca2Viz(dataSet, names, includeNames, saveOutput, saveDir, oneThree):
np.random.seed(5)
X = dataSet.data
# get the proper axes to examine
if (oneThree):
ax0 = 0
ax1 = 2
numComponents = 3
else:
ax0 = 0
ax1 = 1
numComponents = 2
# train PCA and transform data
pca = decomposition.PCA(n_components=numComponents)
pca.fit(X)
X = pca.transform(X)
baseSaveDir = saveDir
for t in dataSet.target:
fig = initStandardChart(saveOutput)
saveDir = baseSaveDir + t["name"]
y = t["target"]
# plot the data.
plt.scatter(X[:, ax0], X[:, ax1], s=49, c=y, cmap=plt.get_cmap("Set1"))
ymin, ymax = plt.ylim()
yscale = ymax - ymin
# if we include names, print them
if (includeNames):
for i in range(len(X)):
name = names[i]
plt.text(X[i, ax0], (X[i, ax1] + 0.015*yscale),
name, horizontalalignment='center', size=8)
# save or show the data
if includeNames:
labelText = "_labels"
else:
labelText = "_no_labels"
if (oneThree):
oneThreeText = "_1_3"
else:
oneThreeText = ""
filename = saveDir + "pca2D" + oneThreeText + labelText + ".pdf"
finishChart(saveOutput, filename)
# do PCA down to 4 components, then visualize
# Portions of this adapted from code written by <NAME>
# dataSet is the list of data to train on, with names as an array of names for those feature vectors
# includeNames is true if we want to print the names in the figure
# saveOutput is true if we want to save the feature to a file rather than view it
def pca4Viz(dataSet, names, includeNames, saveOutput, saveDir):
np.random.seed(5)
X = dataSet.data
# train and apply PCA
pca = decomposition.PCA(n_components=4)
pca.fit(X)
X = pca.transform(X)
baseSaveDir = saveDir
for t in dataSet.target:
fig, axes = init2SubplotChart(saveOutput)
saveDir = baseSaveDir + t["name"]
y = t["target"]
# make the two plots, with names if we are including names
for k in range(len(axes)):
ax = axes[k]
offset = k*2
secondStartIndex = 1
ax.scatter(X[:, 0+offset], X[:, secondStartIndex+offset], s=49, c=y, cmap=plt.get_cmap("Set1"))
if (includeNames):
ymin, ymax = ax.axes.get_ylim()
yscale = ymax - ymin
for i in range(len(X)):
name = names[i]
ax.text(X[i, 0+offset], (X[i, secondStartIndex+offset] + 0.015*yscale),
name, horizontalalignment='center', size=8)
# save or show the data
if includeNames:
labelText = "_labels"
else:
labelText = "_no_labels"
filename = saveDir + "pca4D" + labelText + ".pdf"
finishChart(saveOutput, filename)
# data histogram with no beta distribution shown
def dataHistogram(data, saveOutput, saveDir, name):
dataHistogramWithBeta(data, 0, 0, saveOutput, saveDir, name, useBeta=False)
# data histogram with a beta distribution specified by a (alpha) and b (beta)
def dataHistogramWithBeta(data, a, b, saveOutput, saveDir, name, useBeta=True):
fig = initStandardChart(saveOutput)
n, bins, patches = plt.hist(data, bins='auto', density=True, stacked=True)
if useBeta:
x = np.linspace(beta.ppf(0.001, a, b), beta.ppf(0.999, a, b), 1000)
plt.plot(x, beta.pdf(x, a, b), 'r-', lw=2, alpha=0.8, label='beta pdf')
plt.xlim(0, 1)
filename = saveDir + name + ".pdf"
finishChart(saveOutput, filename)
# Calculate x and y coordinates to display beta distributions
def compBetaData(beta_1, beta_2):
a1, b1 = beta_1
a2, b2 = beta_2
x1 = np.linspace(beta.ppf(0.001, a1, b1), beta.ppf(0.999, a1, b1), 1000)
y1 = beta.pdf(x1, a1, b1)
x2 = np.linspace(beta.ppf(0.001, a2, b2), beta.ppf(0.999, a2, b2), 1000)
y2 = beta.pdf(x2, a2, b2)
return x1, y1, x2, y2
# graph two beta distributions compared to a past distribution
def twoBetasComp(data, sims, saveOutput, saveDir, name, oldDist=None, oldSims=None, xlim=[0, 1]):
fig = initStandardChart(saveOutput)
if (not(oldDist == None)):
x1, y1, x2, y2 = oldDist
plt.plot(x1, y1, 'r-.', lw=2, alpha=0.4)
plt.plot(x2, y2, 'b-.', lw=2, alpha=0.4)
if (not(oldSims == None)):
colorMap = plt.get_cmap("Set2")
colors = colorMap(range(len(oldSims)))
for i, s in enumerate(oldSims):
sim, simName = s
plt.axvline(x=sim, linestyle=":", color=colors[i], alpha=0.5)
x1, y1, x2, y2 = data
plt.plot(x1, y1, 'r-', lw=2, alpha=0.5, label='Different Author')
plt.plot(x2, y2, 'b-', lw=2, alpha=0.5, label='Same Author')
colorMap = plt.get_cmap("Set2")
colors = colorMap(range(len(sims)))
for i, s in enumerate(sims):
sim, simName = s
plt.axvline(x=sim, linestyle="-", label=simName, color=colors[i])
plt.xlim(xlim[0], xlim[1])
plt.legend()
filename = saveDir + name + ".pdf"
finishChart(saveOutput, filename)
# graph two beta distributions on a zoomed scale
def twoBetasZoom(data, sims, saveOutput, saveDir, name, oldDist=None, oldSims=None):
twoBetasComp(data, sims, saveOutput, saveDir, name, oldDist=oldDist, oldSims=oldSims, xlim=[0.8, 1])
# graph two beta distributions on a log scale
def twoBetasLog(data, sims, saveOutput, saveDir, name, oldDist=None, oldSims=None):
fig = initStandardChart(saveOutput)
if (not(oldDist == None)):
x1, y1, x2, y2 = oldDist
plt.semilogx(1-x1, y1, 'r-.', lw=2, alpha=0.4)
plt.semilogx(1-x2, y2, 'b-.', lw=2, alpha=0.4)
if (not(oldSims == None)):
colorMap = plt.get_cmap("Set2")
colors = colorMap(range(len(oldSims)))
for i, s in enumerate(oldSims):
sim, simName = s
plt.axvline(x=1-sim, linestyle=":", color=colors[i], alpha=0.5)
x1, y1, x2, y2 = data
plt.semilogx(1-x1, y1, 'r-', lw=2, alpha=0.5, label='Different Author')
plt.semilogx(1-x2, y2, 'b-', lw=2, alpha=0.5, label='Same Author')
colorMap = plt.get_cmap("Set2")
colors = colorMap(range(len(sims)))
for i, s in enumerate(sims):
sim, simName = s
plt.axvline(x=1-sim, linestyle="-", label=simName, color=colors[i])
xEnd = 0.001
plt.xlim(1, xEnd)
xTicks = [xEnd, 0.01, 0.1, 1]
xLabels = list(map(lambda x: str(1-x), xTicks))
plt.xticks(xTicks, xLabels)
plt.legend()
filename = | |
<filename>pilot_main.py
#!/usr/bin/python
"""Parses and loads RGI questions from excel into MongoDB"""
from xlrd import open_workbook
from sys import argv
from pilot_parser import parse
from pymongo import MongoClient
import json
from pprint import pprint
# from utils import write_json
def main(args):
"""Main body"""
args_len = len(args)
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
# set source excel and destination json files
if args_len == 1:
src = args[0] + '.xlsx'
dest = args[0] + '.json'
elif args_len == 2:
src = args[0] + '.xlsx'
dest = args[1] + '.json'
else:
print 'you must enter valid source and destination file names. If you enter a single \
argument, that will be taken as both source and desitnation name. Please limit input \
to two arguments.'
exit()
# Error handling for non-existing files
try:
workbook = open_workbook(src)
except IOError:
print 'File does not exist. Please give a valid source file'
exit()
data = []
# get sheets names
sheet_names = workbook.sheet_names()
parse(sheet_names[1], workbook.sheet_by_name(sheet_names[1]), data)
countries_data = [
{'country': 'Afghanistan', 'country_ID': 'AFG', 'iso2': 'AF'},
# {'country': 'Aland Islands', 'country_ID': 'ALA', 'iso2': 'AX'},
{'country': 'Albania', 'country_ID': 'ALB', 'iso2': 'AL'},
{'country': 'Algeria', 'country_ID': 'DZA', 'iso2': 'DZ'},
# {'country': 'American Samoa', 'country_ID': 'ASM', 'iso2': 'AS'},
# {'country': 'Andorra', 'country_ID': 'AND', 'iso2': 'AD'},
{'country': 'Angola', 'country_ID': 'AGO', 'iso2': 'AO'},
# {'country': 'Anguilla', 'country_ID': 'AIA', 'iso2': 'AI'},
# {'country': 'Antarctica', 'country_ID': 'ATA', 'iso2': 'AQ'},
{'country': 'Antigua and Barbuda', 'country_ID': 'ATG', 'iso2': 'AG'},
{'country': 'Argentina', 'country_ID': 'ARG', 'iso2': 'AR'},
{'country': 'Armenia', 'country_ID': 'ARM', 'iso2': 'AM'},
{'country': 'Aruba', 'country_ID': 'ABW', 'iso2': 'AW'},
{'country': 'Australia', 'country_ID': 'AUS', 'iso2': 'AU'},
{'country': 'Austria', 'country_ID': 'AUT', 'iso2': 'AT'},
{'country': 'Azerbaijan', 'country_ID': 'AZE', 'iso2': 'AZ'},
{'country': 'Bahamas', 'country_ID': 'BHS', 'iso2': 'BS'},
{'country': 'Bahrain', 'country_ID': 'BHR', 'iso2': 'BH'},
{'country': 'Bangladesh', 'country_ID': 'BGD', 'iso2': 'BD'},
{'country': 'Barbados', 'country_ID': 'BRB', 'iso2': 'BB'},
{'country': 'Belarus', 'country_ID': 'BLR', 'iso2': 'BY'},
{'country': 'Belgium', 'country_ID': 'BEL', 'iso2': 'BE'},
{'country': 'Belize', 'country_ID': 'BLZ', 'iso2': 'BZ'},
{'country': 'Benin', 'country_ID': 'BEN', 'iso2': 'BJ'},
{'country': 'Bermuda', 'country_ID': 'BMU', 'iso2': 'BM'},
{'country': 'Bhutan', 'country_ID': 'BTN', 'iso2': 'BT'},
{'country': 'Bolivia, Plurinational State of', 'country_ID': 'BOL', 'iso2': 'BO'},
# {'country': 'Bonaire, Sint Eustatius and Saba', 'country_ID': 'BES', 'iso2': 'BQ'},
{'country': 'Bosnia and Herzegovina', 'country_ID': 'BIH', 'iso2': 'BA'},
{'country': 'Botswana', 'country_ID': 'BWA', 'iso2': 'BW'},
# {'country': 'Bouvet Island', 'country_ID': 'BVT', 'iso2': 'BV'},
{'country': 'Brazil', 'country_ID': 'BRA', 'iso2': 'BR'},
# {'country': 'British Indian Ocean Territory', 'country_ID': 'IOT', 'iso2': 'IO'},
{'country': 'Brunei Darussalam', 'country_ID': 'BRN', 'iso2': 'BN'},
{'country': 'Bulgaria', 'country_ID': 'BGR', 'iso2': 'BG'},
{'country': 'Burkina Faso', 'country_ID': 'BFA', 'iso2': 'BF'},
{'country': 'Burundi', 'country_ID': 'BDI', 'iso2': 'BI'},
{'country': 'Cambodia', 'country_ID': 'KHM', 'iso2': 'KH'},
{'country': 'Cameroon', 'country_ID': 'CMR', 'iso2': 'CM'},
{'country': 'Canada', 'country_ID': 'CAN', 'iso2': 'CA'},
{'country': 'Cape Verde', 'country_ID': 'CPV', 'iso2': 'CV'},
{'country': 'Cayman Islands', 'country_ID': 'CYM', 'iso2': 'KY'},
{'country': 'Central African Republic', 'country_ID': 'CAF', 'iso2': 'CF'},
{'country': 'Chad', 'country_ID': 'TCD', 'iso2': 'TD'},
{'country': 'Chile', 'country_ID': 'CHL', 'iso2': 'CL'},
{'country': 'China', 'country_ID': 'CHN', 'iso2': 'CN'},
# {'country': 'Christmas Island', 'country_ID': 'CXR', 'iso2': 'CX'},
# {'country': 'Cocos (Keeling) Islands', 'country_ID': 'CCK', 'iso2': 'CC'},
{'country': 'Colombia', 'country_ID': 'COL', 'iso2': 'CO'},
{'country': 'Comoros', 'country_ID': 'COM', 'iso2': 'KM'},
{'country': 'Congo', 'country_ID': 'COG', 'iso2': 'CG'},
{'country': 'Congo, The Democratic Republic of the', 'country_ID': 'COD', 'iso2': 'CD'},
# {'country': 'Cook Islands', 'country_ID': 'COK', 'iso2': 'CK'},
{'country': 'Costa Rica', 'country_ID': 'CRI', 'iso2': 'CR'},
{'country': 'Cote d\'Ivoire', 'country_ID': 'CIV', 'iso2': 'CI'},
{'country': 'Croatia', 'country_ID': 'HRV', 'iso2': 'HR'},
{'country': 'Cuba', 'country_ID': 'CUB', 'iso2': 'CU'},
# {'country': 'Curacao', 'country_ID': 'CUW', 'iso2': 'CW'},
{'country': 'Cyprus', 'country_ID': 'CYP', 'iso2': 'CY'},
{'country': 'Czech Republic', 'country_ID': 'CZE', 'iso2': 'CZ'},
{'country': 'Denmark', 'country_ID': 'DNK', 'iso2': 'DK'},
{'country': 'Djibouti', 'country_ID': 'DJI', 'iso2': 'DJ'},
{'country': 'Dominica', 'country_ID': 'DMA', 'iso2': 'DM'},
{'country': 'Dominican Republic', 'country_ID': 'DOM', 'iso2': 'DO'},
{'country': 'Ecuador', 'country_ID': 'ECU', 'iso2': 'EC'},
{'country': 'Egypt', 'country_ID': 'EGY', 'iso2': 'EG'},
{'country': 'El Salvador', 'country_ID': 'SLV', 'iso2': 'SV'},
{'country': 'Equatorial Guinea', 'country_ID': 'GNQ', 'iso2': 'GQ'},
{'country': 'Eritrea', 'country_ID': 'ERI', 'iso2': 'ER'},
{'country': 'Estonia', 'country_ID': 'EST', 'iso2': 'EE'},
{'country': 'Ethiopia', 'country_ID': 'ETH', 'iso2': 'ET'},
# {'country': 'Falkland Islands (Malvinas)', 'country_ID': 'FLK', 'iso2': 'FK'},
# {'country': 'Faroe Islands', 'country_ID': 'FRO', 'iso2': 'FO'},
{'country': 'Fiji', 'country_ID': 'FJI', 'iso2': 'FJ'},
{'country': 'Finland', 'country_ID': 'FIN', 'iso2': 'FI'},
{'country': 'France', 'country_ID': 'FRA', 'iso2': 'FR'},
# {'country': 'French Guiana', 'country_ID': 'GUF', 'iso2': 'GF'},
# {'country': 'French Polynesia', 'country_ID': 'PYF', 'iso2': 'PF'},
# {'country': 'French Southern Territories', 'country_ID': 'ATF', 'iso2': 'TF'},
{'country': 'Gabon', 'country_ID': 'GAB', 'iso2': 'GA'},
{'country': 'Gambia', 'country_ID': 'GMB', 'iso2': 'GM'},
{'country': 'Georgia', 'country_ID': 'GEO', 'iso2': 'GE'},
{'country': 'Germany', 'country_ID': 'DEU', 'iso2': 'DE'},
{'country': 'Ghana', 'country_ID': 'GHA', 'iso2': 'GH'},
# {'country': 'Gibraltar', 'country_ID': 'GIB', 'iso2': 'GI'},
{'country': 'Greece', 'country_ID': 'GRC', 'iso2': 'GR'},
{'country': 'Greenland', 'country_ID': 'GRL', 'iso2': 'GL'},
{'country': 'Grenada', 'country_ID': 'GRD', 'iso2': 'GD'},
# {'country': 'Guadeloupe', 'country_ID': 'GLP', 'iso2': 'GP'},
{'country': 'Guam', 'country_ID': 'GUM', 'iso2': 'GU'},
{'country': 'Guatemala', 'country_ID': 'GTM', 'iso2': 'GT'},
# {'country': 'Guernsey', 'country_ID': 'GGY', 'iso2': 'GG'},
{'country': 'Guinea', 'country_ID': 'GIN', 'iso2': 'GN'},
{'country': 'Guinea-Bissau', 'country_ID': 'GNB', 'iso2': 'GW'},
{'country': 'Guyana', 'country_ID': 'GUY', 'iso2': 'GY'},
{'country': 'Haiti', 'country_ID': 'HTI', 'iso2': 'HT'},
# {'country': 'Heard Island and McDonald Islands', 'country_ID': 'HMD', 'iso2': 'HM'},
# {'country': 'Holy See (Vatican City State)', 'country_ID': 'VAT', 'iso2': 'VA'},
{'country': 'Honduras', 'country_ID': 'HND', 'iso2': 'HN'},
{'country': 'Hong Kong', 'country_ID': 'HKG', 'iso2': 'HK'},
{'country': 'Hungary', 'country_ID': 'HUN', 'iso2': 'HU'},
{'country': 'Iceland', 'country_ID': 'ISL', 'iso2': 'IS'},
{'country': 'India', 'country_ID': 'IND', 'iso2': 'IN'},
{'country': 'Indonesia', 'country_ID': 'IDN', 'iso2': 'ID'},
{'country': 'Iran, Islamic Republic of', 'country_ID': 'IRN', 'iso2': 'IR'},
{'country': 'Iraq', 'country_ID': 'IRQ', 'iso2': 'IQ'},
{'country': 'Ireland', 'country_ID': 'IRL', 'iso2': 'IE'},
# {'country': 'Isle of Man', 'country_ID': 'IMN', 'iso2': 'IM'},
{'country': 'Israel', 'country_ID': 'ISR', 'iso2': 'IL'},
{'country': 'Italy', 'country_ID': 'ITA', 'iso2': 'IT'},
{'country': 'Jamaica', 'country_ID': 'JAM', 'iso2': 'JM'},
{'country': 'Japan', 'country_ID': 'JPN', 'iso2': 'JP'},
# {'country': 'Jersey', 'country_ID': 'JEY', 'iso2': 'JE'},
{'country': 'Jordan', 'country_ID': 'JOR', 'iso2': 'JO'},
{'country': 'Kazakhstan', 'country_ID': 'KAZ', 'iso2': 'KZ'},
{'country': 'Kenya', 'country_ID': 'KEN', 'iso2': 'KE'},
{'country': 'Kiribati', 'country_ID': 'KIR', 'iso2': 'KI'},
{'country': 'DPRK Korea', 'country_ID': 'PRK', 'iso2': 'KP'},
{'country': 'Republic of Korea', 'country_ID': 'KOR', 'iso2': 'KR'},
{'country': 'Kuwait', 'country_ID': 'KWT', 'iso2': 'KW'},
{'country': 'Kyrgyzstan', 'country_ID': 'KGZ', 'iso2': 'KG'},
{'country': 'Lao Peoples Democratic Republic', 'country_ID': 'LAO', 'iso2': 'LA'},
{'country': 'Latvia', 'country_ID': 'LVA', 'iso2': 'LV'},
{'country': 'Lebanon', 'country_ID': 'LBN', 'iso2': 'LB'},
{'country': 'Lesotho', 'country_ID': 'LSO', 'iso2': 'LS'},
{'country': 'Liberia', 'country_ID': 'LBR', 'iso2': 'LR'},
{'country': 'Libya', 'country_ID': 'LBY', 'iso2': 'LY'},
{'country': 'Liechtenstein', 'country_ID': 'LIE', 'iso2': 'LI'},
{'country': 'Lithuania', 'country_ID': 'LTU', 'iso2': 'LT'},
{'country': 'Luxembourg', 'country_ID': 'LUX', 'iso2': 'LU'},
# {'country': 'Macao', 'country_ID': 'MAC', 'iso2': 'MO'},
{'country': 'Macedonia, Republic of', 'country_ID': 'MKD', 'iso2': 'MK'},
{'country': 'Madagascar', 'country_ID': 'MDG', 'iso2': 'MG'},
{'country': 'Malawi', 'country_ID': 'MWI', 'iso2': 'MW'},
{'country': 'Malaysia', 'country_ID': 'MYS', 'iso2': 'MY'},
# {'country': 'Maldives', 'country_ID': 'MDV', 'iso2': 'MV'},
{'country': 'Mali', 'country_ID': 'MLI', 'iso2': 'ML'},
# {'country': 'Malta', 'country_ID': 'MLT', 'iso2': 'MT'},
# {'country': 'Marshall Islands', 'country_ID': 'MHL', 'iso2': 'MH'},
# {'country': 'Martinique', 'country_ID': 'MTQ', 'iso2': 'MQ'},
{'country': 'Mauritania', 'country_ID': 'MRT', 'iso2': 'MR'},
# {'country': 'Mauritius', 'country_ID': 'MUS', 'iso2': 'MU'},
# {'country': 'Mayotte', 'country_ID': 'MYT', 'iso2': 'YT'},
{'country': 'Mexico', 'country_ID': 'MEX', 'iso2': 'MX'},
# {'country': 'Micronesia, Federated States of', 'country_ID': 'FSM', 'iso2': 'FM'},
{'country': 'Moldova, Republic of', 'country_ID': 'MDA', 'iso2': 'MD'},
# {'country': 'Monaco', 'country_ID': 'MCO', 'iso2': 'MC'},
{'country': 'Mongolia', 'country_ID': 'MNG', 'iso2': 'MN'},
{'country': 'Montenegro', 'country_ID': 'MNE', 'iso2': 'ME'},
# {'country': 'Montserrat', 'country_ID': 'MSR', 'iso2': 'MS'},
{'country': 'Morocco', 'country_ID': 'MAR', 'iso2': 'MA'},
{'country': 'Mozambique', 'country_ID': 'MOZ', 'iso2': 'MZ'},
{'country': 'Myanmar', 'country_ID': 'MMR', 'iso2': 'MM'},
{'country': 'Namibia', 'country_ID': 'NAM', 'iso2': 'NA'},
# {'country': 'Nauru', 'country_ID': 'NRU', 'iso2': 'NR'},
{'country': 'Nepal', 'country_ID': 'NPL', 'iso2': 'NP'},
{'country': 'Netherlands', 'country_ID': 'NLD', 'iso2': 'NL'},
# {'country': 'New Caledonia', 'country_ID': 'NCL', 'iso2': 'NC'},
{'country': 'New Zealand', 'country_ID': 'NZL', 'iso2': 'NZ'},
{'country': 'Nicaragua', 'country_ID': 'NIC', 'iso2': 'NI'},
{'country': 'Niger', 'country_ID': 'NER', 'iso2': 'NE'},
{'country': 'Nigeria', 'country_ID': 'NGA', 'iso2': 'NG'},
# {'country': 'Niue', 'country_ID': 'NIU', 'iso2': 'NU'},
# {'country': 'Norfolk Island', 'country_ID': 'NFK', 'iso2': 'NF'},
# {'country': 'Northern Mariana Islands', 'country_ID': 'MNP', 'iso2': 'MP'},
{'country': 'Norway', 'country_ID': 'NOR', 'iso2': 'NO'},
{'country': 'Oman', 'country_ID': 'OMN', 'iso2': 'OM'},
| |
<gh_stars>1-10
from Multiple_GAN_codes.Basic_structure import *
from keras.datasets import mnist
import time
from utils import *
from scipy.misc import imsave as ims
from ops import *
from utils import *
from Utlis2 import *
import random as random
from glob import glob
import os,gzip
import keras as keras
from glob import glob
def file_name(file_dir):
t1 = []
file_dir = "F:/Third_Experiment/Multiple_GAN_codes/data/images_background/"
for root, dirs, files in os.walk(file_dir):
for a1 in dirs:
b1 = "F:/Third_Experiment/Multiple_GAN_codes/data/images_background/" + a1 + "/renders/*.png"
b1 = "F:/Third_Experiment/Multiple_GAN_codes/data/images_background/" + a1
for root2, dirs2, files2 in os.walk(b1):
for c1 in dirs2:
b2 = b1 + "/" + c1 + "/*.png"
img_path = glob(b2)
t1.append(img_path)
print('root_dir:', root) # 当前目录路径
print('sub_dirs:', dirs) # 当前路径下所有子目录
print('files:', files) # 当前路径下所有非目录子文件
cc = []
for i in range(len(t1)):
a1 = t1[i]
for p1 in a1:
cc.append(p1)
return cc
def sample_gumbel(shape, eps=1e-20):
"""Sample from Gumbel(0, 1)"""
U = tf.random_uniform(shape, minval=0, maxval=1)
return -tf.log(-tf.log(U + eps) + eps)
def my_gumbel_softmax_sample(logits, cats_range, temperature=0.1):
""" Draw a sample from the Gumbel-Softmax distribution"""
y = logits + sample_gumbel(tf.shape(logits))
logits_with_noise = tf.nn.softmax(y / temperature)
return logits_with_noise
def load_mnist(dataset_name):
data_dir = os.path.join("./data", dataset_name)
def extract_data(filename, num_data, head_size, data_size):
with gzip.open(filename) as bytestream:
bytestream.read(head_size)
buf = bytestream.read(data_size * num_data)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float)
return data
data = extract_data(data_dir + '/train-images-idx3-ubyte.gz', 60000, 16, 28 * 28)
trX = data.reshape((60000, 28, 28, 1))
data = extract_data(data_dir + '/train-labels-idx1-ubyte.gz', 60000, 8, 1)
trY = data.reshape((60000))
data = extract_data(data_dir + '/t10k-images-idx3-ubyte.gz', 10000, 16, 28 * 28)
teX = data.reshape((10000, 28, 28, 1))
data = extract_data(data_dir + '/t10k-labels-idx1-ubyte.gz', 10000, 8, 1)
teY = data.reshape((10000))
trY = np.asarray(trY)
teY = np.asarray(teY)
X = np.concatenate((trX, teX), axis=0)
y = np.concatenate((trY, teY), axis=0).astype(np.int)
seed = 547
np.random.seed(seed)
np.random.shuffle(X)
np.random.seed(seed)
np.random.shuffle(y)
y_vec = np.zeros((len(y), 10), dtype=np.float)
for i, label in enumerate(y):
y_vec[i, y[i]] = 1.0
return X / 255., y_vec
def My_Encoder_mnist(image,name, batch_size=64, reuse=False):
with tf.variable_scope(name) as scope:
if reuse:
scope.reuse_variables()
len_discrete_code = 4
is_training = True
z_dim = 32
x = image
net = lrelu(conv2d(x, 64, 4, 4, 2, 2, name='c_conv1'))
net = lrelu(bn(conv2d(net, 128, 4, 4, 2, 2, name='c_conv2'), is_training=is_training, scope='c_bn2'))
net = tf.reshape(net, [batch_size, -1])
net = lrelu(bn(linear(net, 1024, scope='c_fc3'), is_training=is_training, scope='c_bn3'))
net = lrelu(bn(linear(net, 64, scope='e_fc11'), is_training=is_training, scope='c_bn11'))
z_mean = linear(net, z_dim, 'e_mean')
z_log_sigma_sq = linear(net, z_dim, 'e_log_sigma_sq')
z_log_sigma_sq = tf.nn.softplus(z_log_sigma_sq)
return z_mean, z_log_sigma_sq
def My_Classifier_mnist(image,name, batch_size=64, reuse=False):
with tf.variable_scope(name) as scope:
if reuse:
scope.reuse_variables()
len_discrete_code = 4
is_training = True
z_dim = 32
x = image
net = lrelu(conv2d(x, 64, 4, 4, 2, 2, name='c_conv1'))
net = lrelu(bn(conv2d(net, 128, 4, 4, 2, 2, name='c_conv2'), is_training=is_training, scope='c_bn2'))
net = tf.reshape(net, [batch_size, -1])
net = lrelu(bn(linear(net, 1024, scope='c_fc3'), is_training=is_training, scope='c_bn3'))
net = lrelu(bn(linear(net, 64, scope='e_fc11'), is_training=is_training, scope='c_bn11'))
out_logit = linear(net, len_discrete_code, scope='e_fc22')
softmaxValue = tf.nn.softmax(out_logit)
return out_logit,softmaxValue
class LifeLone_MNIST(object):
def __init__(self):
self.batch_size = 64
self.input_height = 28
self.input_width = 28
self.c_dim = 1
self.z_dim = 32
self.len_discrete_code = 4
self.epoch = 10
self.learning_rate = 0.0002
self.beta1 = 0.5
# MNIST dataset
mnistName = "mnist"
fashionMnistName = "Fashion"
data_X, data_y = load_mnist(mnistName)
x_train = data_X[0:60000]
x_test = data_X[60000:70000]
y_train = data_y[0:60000]
y_test = data_y[60000:70000]
self.mnist_train_x = x_train
self.mnist_train_y = np.zeros((np.shape(x_train)[0],4))
self.mnist_train_y[:,0] = 1
self.mnist_test_x = x_test
self.mnist_test_y = y_test
data_X, data_y = load_mnist(fashionMnistName)
x_train1 = data_X[0:60000]
x_test1 = data_X[60000:70000]
y_train1 = data_y[0:60000]
y_test1 = data_y[60000:70000]
self.mnistFashion_train_x = x_train1
self.mnistFashion_train_y = np.zeros((np.shape(x_train1)[0],4))
self.mnistFashion_train_y[:,1] = 1
files = file_name(1)
data_files = files
data_files = sorted(data_files)
data_files = np.array(data_files) # for tl.iterate.minibatches
n_examples = np.shape(data_files)[0]
batch = [get_image(batch_file, 105, 105,
resize_height=28, resize_width=28,
crop=False, grayscale=True) \
for batch_file in data_files]
thirdX = np.array(batch)
for t1 in range(n_examples):
a1 = thirdX[t1]
for p1 in range(28):
for p2 in range(28):
if thirdX[t1,p1,p2] == 1.0:
thirdX[t1,p1,p2] = 0
else:
thirdX[t1, p1, p2] = 1
myTest = thirdX[0:self.batch_size]
self.thirdX = np.reshape(thirdX,(-1,28,28,1))
self.thirdY = np.zeros((np.shape(self.thirdX)[0],4))
self.thirdY[:,2] = 1
#ims("results/" + "gggg" + str(0) + ".jpg", merge(myTest[:64], [8, 8]))
cc1 = 0
def build_model(self):
min_value = 1e-10
# some parameters
image_dims = [self.input_height, self.input_width, self.c_dim]
bs = self.batch_size
self.inputs = tf.placeholder(tf.float32, [bs] + image_dims, name='real_images')
self.z = tf.placeholder(tf.float32, [self.batch_size, self.z_dim], name='z')
self.y = tf.placeholder(tf.float32, [self.batch_size, self.len_discrete_code])
#GAN networks
gan_code = tf.concat((self.z,self.y),axis=1)
G1 = Generator_mnist("GAN_generator",gan_code, reuse=False)
## 1. GAN Loss
# output of D for real images
D_real, D_real_logits, _ = Discriminator_Mnist(self.inputs, "discriminator", reuse=False)
# output of D for fake images
D_fake, D_fake_logits, input4classifier_fake = Discriminator_Mnist(G1, "discriminator", reuse=True)
self.g_loss = tf.reduce_mean(D_fake_logits)
self.d_loss = tf.reduce_mean(D_real_logits) - tf.reduce_mean(D_fake_logits)
epsilon = tf.random_uniform([], 0.0, 1.0)
x_hat = epsilon * self.inputs + (1 - epsilon) * G1
_,d_hat,_ = Discriminator_Mnist(x_hat, "discriminator", reuse=True)
scale = 10.0
ddx = tf.gradients(d_hat, x_hat)[0]
ddx = tf.sqrt(tf.reduce_sum(tf.square(ddx), axis=1))
ddx = tf.reduce_mean(tf.square(ddx - 1.0) * scale)
self.d_loss = self.d_loss + ddx
# losses
'''
d_r_loss = tf.losses.mean_squared_error(tf.ones_like(D_real_logits), D_real_logits)
d_f_loss = tf.losses.mean_squared_error(tf.zeros_like(D_fake_logits), D_fake_logits)
self.d_loss = (d_r_loss + d_f_loss) / 2.0
self.g_loss = tf.losses.mean_squared_error(tf.ones_like(D_fake_logits), D_fake_logits)
'''
""" Graph Input """
# images
self.isPhase = 0
#domain 1
z_mean, z_log_sigma_sq = My_Encoder_mnist(self.inputs,"encoder1", batch_size=64, reuse=False)
out_logit,softmaxValue = My_Classifier_mnist(self.inputs,"classifier", batch_size=64, reuse=False)
continous_variables = z_mean + z_log_sigma_sq * tf.random_normal(tf.shape(z_mean), 0, 1, dtype=tf.float32)
log_y = tf.log(softmaxValue + 1e-10)
discrete_real = my_gumbel_softmax_sample(log_y, np.arange(self.len_discrete_code))
y_labels = tf.argmax(softmaxValue,1)
y_labels = tf.cast(y_labels,dtype=tf.float32)
y_labels = tf.reshape(y_labels,(-1,1))
code1 = tf.concat((continous_variables,discrete_real),axis=1)
reco1 = Generator_mnist("generator1",code1, reuse=False)
reco2 = reco1
#VAE loss
reconstruction_loss1 = tf.reduce_mean(tf.reduce_sum(tf.square(reco1 - self.inputs), [1, 2, 3]))
KL_divergence1 = 0.5 * tf.reduce_sum(
tf.square(z_mean - y_labels) + tf.square(z_log_sigma_sq) - tf.log(1e-8 + tf.square(z_log_sigma_sq)) - 1, 1)
KL_divergence1 = tf.reduce_mean(KL_divergence1)
self.vae_loss1 = reconstruction_loss1 + KL_divergence1
self.vaeLoss = self.vae_loss1
#classification loss
self.classifier_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=out_logit, labels=self.y))
""" Training """
# divide trainable variables into a group for D and a group for G
T_vars = tf.trainable_variables()
encoder_vars1 = [var for var in T_vars if var.name.startswith('encoder1')]
encoderClassifier_vars1 = [var for var in T_vars if var.name.startswith('classifier')]
generator1_vars = [var for var in T_vars if var.name.startswith('generator1')]
discriminator_vars1 = [var for var in T_vars if var.name.startswith('discriminator')]
GAN_generator_vars = [var for var in T_vars if var.name.startswith('GAN_generator')]
self.output1 = reco1
self.output2 = reco2
self.GAN_output = G1
# optimizers
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.vae1_optim = tf.train.AdamOptimizer(self.learning_rate, beta1=self.beta1) \
.minimize(self.vaeLoss, var_list=encoder_vars1 + generator1_vars)
self.d_optim = tf.train.AdamOptimizer(self.learning_rate, beta1=self.beta1) \
.minimize(self.d_loss, var_list=discriminator_vars1)
self.g_optim = tf.train.AdamOptimizer(self.learning_rate * 5, beta1=self.beta1) \
.minimize(self.g_loss, var_list=GAN_generator_vars)
self.classifier_optim = tf.train.AdamOptimizer(self.learning_rate, beta1=self.beta1) \
.minimize(self.classifier_loss, var_list=encoderClassifier_vars1)
b1 = 0
def predict(self):
out_logit1, softmaxValue1 = My_Classifier_mnist(self.inputs, "classifier", batch_size=64,
reuse=True)
predictions = tf.argmax(softmaxValue1, 1, name="predictions")
return predictions
def test(self):
with tf.Session() as sess:
self.saver = tf.train.Saver()
self.sess = sess
sess.run(tf.global_variables_initializer())
self.saver.restore(sess, 'models/TeacherStudent_WGAN_1')
myIndex = 2
mnist_x_test = self.mnistFashion_train_x[myIndex*self.batch_size: (myIndex+1)*self.batch_size]
mnist_y_test = self.mnist_test_y
testX = self.mnistFashion_train_x
testY = self.mnistFashion_train_y
mnistFashion_x_test = self.mnistFashion_train_x[0:self.batch_size]
mnistFashion_y_test = self.mnistFashion_train_y
r2 = np.reshape(mnist_x_test,(-1,28,28))
ims("results/" + "Real" + str(0) + ".jpg", merge(r2[:64], [8, 8]))
myTestX = np.concatenate((self.mnist_train_x,self.mnistFashion_train_x,self.thirdX),axis=0)
index = [i for i in range(np.shape(myTestX)[0])]
random.shuffle(index)
myTestX = myTestX[index]
myTestX = myTestX[0:self.batch_size]
#myTestX = np.concatenate((self.mnist_train_x[0:32],self.mnistFashion_train_x[0:32]),axis=0)
predictions = sess.run(self.output1, feed_dict={self.inputs: myTestX})
predictions = np.reshape(predictions,(-1,28,28))
ims("results/" + "myResults" + str(0) + ".png", merge(predictions[:64], [8, 8]))
myTestX = np.reshape(myTestX,(-1,28,28))
ims("results/" + "myReal" + str(0) + ".png", merge(myTestX[:64], [8, 8]))
totalN = np.shape(testX)[0]
myN = int(totalN/self.batch_size)
myPrediction = self.predict()
totalPredictions = []
myCount = 0
for i in range(myN):
my1 = testX[self.batch_size*i:self.batch_size*(i+1)]
predictions = sess.run(myPrediction,feed_dict={self.inputs:my1})
for k in range(self.batch_size):
totalPredictions.append(predictions[k])
if predictions[k] == 1:
myCount = myCount+1
totalPredictions = np.array(totalPredictions)
print(totalPredictions)
p = myCount
b1 = totalPredictions
r = sess.run(self.output1, feed_dict={self.inputs: mnist_x_test})
r = np.reshape(r,(-1,28,28))
ims("results/" + "my" + str(0) + ".jpg", merge(r[:64], [8, 8]))
def Generate_GAN_Samples(self,n_samples,classN):
myArr = []
for tt in range(classN):
y1 = np.zeros((self.batch_size,4))
y1[:,0] = 1
num1 = int(n_samples/self.batch_size)
for i in range(num1):
batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_dim]).astype(np.float32)
g_outputs = self.sess.run(
self.GAN_output,
feed_dict={self.z: batch_z,self.y:y1})
for t1 in range(self.batch_size):
myArr.append(g_outputs[t1])
myArr = np.array(myArr)
return myArr
def train(self):
isFirstStage = False
with tf.Session() as sess:
self.sess = sess
sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
self.saver.restore(sess, 'models/TeacherStudent_WGAN')
# saver to save model
self.saver = tf.train.Saver()
old_Nsamples = 30000
oldX = self.Generate_GAN_Samples(old_Nsamples,2)
oldY = np.zeros((np.shape(oldX)[0],4))
oldY[:,0] = 1
b1 = oldX[0:self.batch_size]
b1 = np.reshape(b1,(-1,28,28))
ims("results/" + "b1" + str(0) + ".jpg", merge(b1[:64], [8, 8]))
testX = oldX
totalN = np.shape(testX)[0]
myN = int(totalN / self.batch_size)
myPrediction = self.predict()
totalPredictions = []
myCount = | |
import sys, os, time
import numpy as np
import scipy as sci
import scipy.stats as ss
import scipy.sparse.linalg as slin
import copy
from .mytools.MinTree import MinTree
from scipy.sparse import coo_matrix, csr_matrix, lil_matrix
from .mytools.ioutil import loadedge2sm
from .edgepropertyAnalysis import MultiEedgePropBiGraph
import math
from .._model import DMmodel
from spartan.util.basicutil import param_default
from spartan.backend import STensor
def score_level_objects( objscores, p=0.90):
'''implement with Perato distribution, given significant value
'''
sortscores = sorted(objscores)
sortobjs = np.argsort(objscores)
alpha = 0.9
tail_fir_score = np.percentile(sortscores, [alpha*100])[0]
if tail_fir_score == 0:
'remove 0 if the number of percentile 90% is 0'
firindex = np.argwhere(sortscores > 0)[0]
sortscores = sortscores[firindex:]
sortobjs = sortobjs[firindex:]
'fit generalized pareto distribution using 10% upper tail data'
tailidx = int(alpha * len(sortscores))
tailscores = sortscores[tailidx:]
tailobjs = sortobjs[tailidx:]
shape, pos, scale = ss.pareto.fit(tailscores)
cdfs = ss.pareto.cdf(tailscores, shape, pos, scale)
levelidxs = np.argwhere(cdfs >= p)
levelobjs = tailobjs[levelidxs].T[0]
return levelobjs
def score_heristic_level_objects( objscores ):
'''todo: implement with Perato distribution, given significant value
'''
sortscores = sorted(objscores, reverse=True)
sortobjs = np.argsort(objscores)[::-1]
diffscores = - np.diff(sortscores)
levelid = np.argmax(diffscores)
levelobjs = sortobjs[ : levelid+1]
return levelobjs
def nonzero_objects( objscores ):
objects = np.where( objscores > 0 )[0]
return objects
class Ptype(object):
freq =0
ts = 1
rate=2
@staticmethod
def ptype2str(p):
if p == Ptype.freq:
return 'freq'
if p == Ptype.ts:
return 'ts'
if p == Ptype.rate:
return 'rate'
@staticmethod
def ptypes2str(ptypes):
strs=[]
if Ptype.freq in ptypes:
strs.append(Ptype.ptype2str(Ptype.freq))
if Ptype.ts in ptypes:
strs.append(Ptype.ptype2str(Ptype.ts))
if Ptype.rate in ptypes:
strs.append(Ptype.ptype2str(Ptype.rate))
pstr = '-'.join(strs)
return pstr
class HoloScopeOpt:
def __init__(self, graphmat, qfun='exp', b=32,
aggmethod='sum', sdrop=True, mbd=0.5, sdropscale='linear',
tsprop=None, tunit='s', rateprop=None):
'how many times of a user rates costumers if he get the cost balance'
self.coe = 0
'the larger expbase can give a heavy penalty to the power-law curve'
self.expbase = b
self.scale = qfun
self.b = b
self.aggmethod=aggmethod
self.suspbd = 0.0 #susp < suspbd will assign to zero
self.priordropslop=sdrop
self.graph=graphmat.tocoo()
self.graphr = self.graph.tocsr()
self.graphc = self.graph.tocsc()
self.matricizetenor=None
self.nU, self.nV=graphmat.shape
self.indegrees = graphmat.sum(0).getA1()
self.e0 = math.log(graphmat.sum(), self.nU) #logrithm of edges
print('matrix size: {} x {}\t#edges: {}'.format(self.nU, self.nV,
self.indegrees.sum()))
# tunit is only used for files input
self.tsprop, self.rateprop, self.tunit = tsprop, rateprop, tunit
self.tspim, self.ratepim = None, None
'field for multiple property graph'
if tsprop is not None or rateprop is not None:
if self.priordropslop:
self.orggraph = self.graphr.copy()
else:
self.orggraph = self.graphr
if tsprop is not None:
self.mbd = mbd #multiburst bound
self.tspim = MultiEedgePropBiGraph(self.orggraph)
"""
since the data is cut by the end of time, so we need to see
whether there is enough time twait from end of retweet to end of the
whole data to judge if it is a sudden drop or cut by the end of time.
twaits:
"""
if isinstance(tsprop, str) and os.path.isfile(tsprop):
self.tspim.load_from_edgeproperty(tsprop, mtype=coo_matrix,
dtype=np.int64)
twaits = {'s':12*3600, 'h':24, 'd':30, None:0}
twait = twaits[tunit]
elif isinstance(tsprop, STensor):
self.tspim.trans_array_to_edgeproperty(tsprop,
mtype=coo_matrix, dtype=np.int64)
twait = 12
else:
raise Exception('Error: incorrect time stamp property')
self.tspim.setup_ts4all_sinks(twait)
if self.priordropslop:
'slops weighted with max burst value'
self.weightWithDropslop(weighted=True, scale=sdropscale)
else:
self.priordropslop = False #no input of time attribute
if rateprop is not None:
self.ratepim = MultiEedgePropBiGraph(self.orggraph)
if isinstance(rateprop, str) and os.path.isfile(rateprop):
self.ratepim.load_from_edgeproperty(rateprop, mtype=coo_matrix, dtype=float)
elif isinstance(rateprop, STensor):
self.ratepim.trans_array_to_edgeproperty(rateprop,
mtype=coo_matrix, dtype=float)
else:
raise Exception('Error: incorrect rate property')
self.ratepim.setup_rate4all_sinks()
'weighed with idf prior from Fraudar'
#self.weightWithIDFprior()
'if weighted the matrix the windegrees is not equal to indegrees'
self.windegrees = self.graphc.sum(0).getA1()
self.woutdegrees = self.graphr.sum(1).getA1()
self.A = np.array([]) #binary array
self.fbs = np.zeros(graphmat.shape[1], dtype=np.int) #frequency of bs in B
'\frac_{ f_A{(bi)} }{ f_U{(bi)}}'
self.bsusps = np.array([]) # the suspicious scores of products given A
self.vx = 0 # current objective value
self.vxs = [] #record all the vxs of optimizing iterations
self.Y= np.array([])
self.yfbs = np.array([])
self.ybsusps = np.array([])
'current is the best'
self.bestvx = self.vx
self.bestA = np.array([])
self.bestfbs = np.array([])
self.bestbsusps = np.array([])
def weightWithDropslop(self, weighted, scale):
'weight the adjacency matrix with the sudden drop of ts for each col'
if weighted:
colWeights = np.multiply(self.tspim.dropslops, self.tspim.dropfalls)
else:
colWeights = self.tspim.dropslops
if scale == 'logistic':
from scipy.stats import logistic
from sklearn import preprocessing
'zero mean scale'
colWeights = preprocessing.scale(colWeights)
colWeights = logistic.cdf(colWeights)
elif scale == 'linear':
from sklearn import preprocessing
#add a base of suspecious for each edge
colWeights = preprocessing.minmax_scale(colWeights) +1
elif scale == 'plusone':
colWeights += 1
elif scale == 'log1p':
colWeights = np.log1p(colWeights) + 1
else:
print('[Warning] no scale for the prior weight')
n = self.nV
colDiag = lil_matrix((n, n))
colDiag.setdiag(colWeights)
self.graphr = self.graphr * colDiag.tocsr()
self.graph = self.graphr.tocoo(copy=False)
self.graphc = self.graph.tocsc(copy=False)
print("finished computing weight matrix")
def weightWithIDFprior(self):
print('weightd with IDF prior')
colWeights = 1.0/np.log(self.indegrees + 5)
n = self.nV
colDiag = lil_matrix((n, n))
colDiag.setdiag(colWeights)
self.graphr = self.graphr * colDiag.tocsr()
self.graph = self.graphr.tocoo(copy=False)
self.graphc = self.graph.tocsc(copy=False)
return
'new objective with no f_A(v)/|A|'
def maxobjfunc(self, A, fbs, bsusps=None):
nu = 0.0
de = 0.0
numA = np.sum(A)
de = numA + bsusps.sum() #math.sqrt(numA*bsusps.sum())#similar
if numA == 0:
return 0
if bsusps is not None:
nu = np.dot(fbs, bsusps)
else:
nu = fbs.sum()
res = nu/np.float64( de )
return res
def aggregationMultiProp(self, mbs, method='sum'):
if method == 'rank':
from scipy.stats import rankdata
rankmethod = 'average'
k=60 #for rank fusion
values = list(mbs.values())
if len(mbs) == 1:
val = values[0]
if method == 'rank':
rb = rankdata(-np.array(val), method=rankmethod)
return np.reciprocal(rb+k) * k
else:
return val
if method == 'sum':
'this is the joint probability of exp form of prob'
bsusps = values[0]
for v in values[1:]:
bsusps += v
elif method == 'rank':
'rank fusion'
arrbsusps = []
for val in values:
rb = rankdata(-np.array(val), method=rankmethod)
arrbsusps.append(np.reciprocal(rb+k))
bsusps = np.array(arrbsusps).sum(0) * k
else:
print('[Error] Invalid method {}\n'.format(method))
return bsusps
#@profile
def evalsusp4ts(self, suspusers, multiburstbd = 0.5, weighted=True):
'the id of suspusers consistently starts from 0 no matter the source'
incnt, inratio = self.tspim.suspburstinvolv(multiburstbd, weighted,
delta=True)
suspts=inratio
return suspts
#@profile
def evalsusp4rate(self, suspusers, neutral=False, scale='max'):
susprates = self.ratepim.suspratedivergence(neutral, delta=True)
if scale == 'max':
if self.ratepim.maxratediv > 0:
nsusprates = susprates/self.ratepim.maxratediv
else:
nsusprates = susprates
elif scale=='minmax':
#need a copy, and do not change susprates' value for delta
from sklearn import preprocessing
nsusprates = preprocessing.minmax_scale(susprates, copy=True)
else:
#no scale
nsusprates = susprates
return nsusprates
'sink suspicious with qfunc, no f_A(v)/|A|'
def prodsuspicious(self, fbs, A=None, scale='exp', ptype=[Ptype.freq]):
multibsusps={}
if Ptype.freq in ptype:
posids = self.windegrees>0
bs = np.zeros(self.nV)
bs[posids] = np.divide(fbs[posids], self.windegrees[posids].astype(np.float64))
multibsusps[Ptype.freq] = bs
if Ptype.ts in ptype:
suspusers = A.nonzero()[0]
bs = self.evalsusp4ts(suspusers, multiburstbd=self.mbd)
multibsusps[Ptype.ts] = bs
if Ptype.rate in ptype:
suspusers = A.nonzero()[0]
bs = self.evalsusp4rate(suspusers)
multibsusps[Ptype.rate] = bs
bsusps = self.aggregationMultiProp(multibsusps, self.aggmethod)
bsusps = self.qfunc(bsusps, fbs=fbs, scale=scale,
numratios=len(multibsusps))
return bsusps
def initpimsuspects(self, suspusers, ptype):
if Ptype.ts in ptype:
self.tspim.setupsuspects(suspusers)
temp1, temp2 = self.tspim.suspburstinvolv(multiburstbd=0.5, weighted=True,
delta=False)
if Ptype.rate in ptype:
self.ratepim.setupsuspects(suspusers)
tmp = self.ratepim.suspratedivergence(neutral=False,
delta=False)
return
def start(self, A0, ptype=[Ptype.ts]):
self.A = A0
users = A0.nonzero()[0]
self.ptype=ptype # the property type that the postiorer uses
self.fbs = self.graphr[users].sum(0).getA1()
self.fbs = self.fbs.astype(np.float64, copy=False)
'initially set up currrent suspects'
self.initpimsuspects(users, ptype=ptype)
self.bsusps = self.prodsuspicious(self.fbs, self.A, ptype=ptype)
self.vx = self.maxobjfunc(self.A, self.fbs, self.bsusps)
self.vxs.append(self.vx)
"current is the best"
self.bestA = np.array(self.A)
self.bestvx = self.vx
self.bestfbs = np.array(self.fbs)
self.bestbsusps = np.array(self.bsusps)
def candidatefbs(self, z):
'increase or decrease'
coef = 1 if self.A[z] == 0 else -1
bz = self.graphr[z]
candfbs = (coef*bz + self.fbs).getA1()
return candfbs
#@profile
def greedyshaving(self):
'''greedy algorithm'''
maxint = np.iinfo(np.int64).max//2
delscores = np.array([maxint]*self.nU)
delcands = self.A.nonzero()[0]
deluserCredit = self.graphr[delcands,:].dot(self.bsusps)
delscores[delcands] = deluserCredit
print('set up the greedy min tree')
MT = MinTree(delscores)
i=0
sizeA = np.sum(self.A)
sizeA0 = sizeA
setA = set(self.A.nonzero()[0])
while len(setA) > 0:
z, nextdelta = MT.getMin()
setY = setA - {z}
Y = copy.copy(self.A) # A is X
Y[z] = 1-Y[z]
self.Y=Y
self.yfbs = self.candidatefbs(z)
Ylist = Y.nonzero()[0]
self.setdeltapimsusp(z, Ylist, add=False)
self.ybsusps = self.prodsuspicious(self.yfbs, self.Y,
ptype=self.ptype)
vy = self.maxobjfunc(self.Y, self.yfbs, self.ybsusps)
'chose next if next if the best'
if vy > self.bestvx:
self.bestA = np.array(self.Y)
self.bestfbs = self.yfbs
self.bestbsusps = self.ybsusps
self.bestvx = vy
MT.changeVal(z, maxint) #make the min to the largest for deletion
prodchange = self.ybsusps - self.bsusps
effectprod = | |
<filename>examples/signal_processing_examples/dsp_filters.py
import scipy.signal
import numpy as np
import sys
"""
This module shows how to use map_element in IoTPy
to build a library of classes for filtering streams
by encapsulating software from scipy.signal and
other software libraries.
The module consists of a base Filter class and
specific filters --- such as bandpass IIR filters ---
that are subclasses of Filter.
"""
import os
import matplotlib.pyplot as plt
from scipy.signal import butter, firwin
import numpy as np
sys.path.append(os.path.abspath("../../IoTPy/core"))
sys.path.append(os.path.abspath("../../IoTPy/helper_functions"))
sys.path.append(os.path.abspath("../../IoTPy/agent_types"))
from generate_waves import generate_sine_wave, plot_signal
# stream is in ../../IoTPy/core
from stream import Stream, StreamArray
# op, merge are in ../../IoTPy/agent_types
from op import map_window_list, map_element
from merge import merge_window
# recent_values is in ../../IoTPy/helper_functions
from recent_values import recent_values
from basics import merge_w
# window_dot_product is in .
from window_dot_product import window_dot_product
def reverse_array(b):
"""
Reverse array b. Same as numpy.flip
Parameters
----------
b: list or array
Returns
-------
reverse_b: array
flips b.
If b is [0, 1, 2] then reverse_b is [2, 1, 0]
"""
b = np.array(b)
M = len(b)
reverse_b = np.zeros(M)
for i in range(M):
reverse_b[i] = b[M - 1 - i]
return reverse_b
def bandpass_FIR(in_stream, out_stream, b):
"""
Creates an agent that executes a
FIR (Finite Impulse Response) filter of
in_stream to produce out_stream using
filter parameter b.
Parameters
----------
in_stream: Stream
out_stream: Stream
b: array
Note
----
out_stream[n] = sum over k in [0, M] of b[k]*in_stream[n-k]
Therefore, setting k to M-k:
out_stream[n] = sum over k in [0, M] of b[M-k]*in_stream[n-M+k]
So, out_stream[n] is the dot product of the reverse of b and
the in_stream window consisting of: in_stream[n-M]... in_stream[M].
"""
reverse_b = reverse_array(b)
window_dot_product(in_stream, out_stream, multiplicand_vector=reverse_b)
def bandpass_IIR(in_stream, out_stream, b, a):
"""
Creates an agent that executes a
IIR (Infinite Impulse Response) filter of
in_stream to produce out_stream using
filter parameters b, a.
Parameters
----------
in_stream: Stream
out_stream: Stream
b: array
a: array
Note
----
out_stream[n] = B - A where
B is sum of k over [0, .., M] of b[k]*in_stream[n-k]
A is sum of k over [1,..., M] of a[k]*out_stream[n-k]
We convert these equations to equations over windows
starting at the index n-M, by substituting M-k for k to get:
B is sum of k over [0, .., M] of b[M-k]*in_stream[n-M+k]
A is sum of k over [0,..., M-1] of a[M-k]*out_stream[n-M+k]
The window operations in function f implement these
equations.
"""
reverse_b = reverse_array(b)
reverse_a = reverse_array(a[1:])
M = len(b)
# Initialize the streams so that windows of size M
# can be merged.
out_stream.extend(np.zeros(M))
in_stream.extend(np.zeros(M))
@merge_w
def f(windows, b, a):
# This function operates on two windows, x_window
# which is a window into the input stream, and
# y_window which is a window into the output stream.
# np.dot is the dot product.
x_window, y_window = windows
return np.dot(b, x_window) - np.dot(a, y_window[1:])
# Create the agent.
# Note that the output stream of the agent gets fed back as an
# input stream.
f(in_streams=[in_stream, out_stream], out_stream=out_stream,
window_size=M, step_size=1, b=reverse_b, a=reverse_a)
#---------------------------------------------------------------
#---------------------------------------------------------------
# EXAMPLES OF CLASS BASED FILTERS
#---------------------------------------------------------------
#---------------------------------------------------------------
#---------------------------------------------------------------
# FILTER BASE CLASS
#---------------------------------------------------------------
class Filter(object):
"""
The base class of filters that filter input streams to
produc output streams.
Uses a, b parameters from scipy.signal or other libraries.
Parameters
----------
a, b: list of float
Parameters that define a filter.
For a Finite Impulse Response filter, a is None.
Attributes
----------
N: int
the length of b
x: array of float
The N most recent values of the input stream.
x[0] is the most recent value. For example if
the current value of in_stream is:
in_stream[j-N+1, ... j] then
x[k] = in_stream[j - k]
x is initialized to 0.
y: array of float
The N most recent values of the output stream.
Initialized to 0.
y and x have the same structure.
"""
def __init__(self, b, a=None):
self.b = np.array(b)
if a is not None:
self.a = np.array(a)
self.N = len(b)
self.x = np.zeros(self.N)
self.y = np.zeros(self.N)
def filter_element(self, element):
"""
This is the standard filter calculation.
The formula depends on the type of filter.
The formula must be entered for the subclass
derived from the base Filter class.
Parameters
----------
element: float or int
The next element of the stream.
"""
pass
def filter_stream(self, in_stream, out_stream):
"""
Filters the input stream to get the output stream using
the function filter_element.
"""
map_element(self.filter_element, in_stream, out_stream)
#---------------------------------------------------------------
# CLASS BANDPASS IIR FILTER
#---------------------------------------------------------------
class BP_IIR(Filter):
"""
Bandpass IIR (Infinite Impulse Response) Filter.
Parameters
----------
a, b: float or int
Filter parameters obtained from scipy.signal.butter
or other sources.
"""
def __init__(self, b, a):
Filter.__init__(self, b, a)
def filter_element(self, element):
"""
Uses a standard formula for IIR filters.
Shifts x, y to the right by 1 to accomodate
new entry for input x, and then updates y[0].
"""
# Insert a new value -- element --- into x.
# First shift x to the right by 1.
self.x[1:] = self.x[:- 1]
self.x[0] = element
# Insert a new value into y.
# First shift y to the right by 1.
self.y[1:] = self.y[:-1]
# Compute new value for y[0]
self.y[0] = self.b[0] * self.x[0]
self.y[0] += sum(self.b[1:]*self.x[1:] -
self.a[1:]*self.y[1:])
return self.y[0]
#---------------------------------------------------------------
# CLASS BANDPASS FIR FILTER
#---------------------------------------------------------------
class BP_FIR(Filter):
"""
Bandpass FIR (Finite Impulse Response) Filter.
Parameters
----------
b: float or int
Filter parameters obtained from scipy.signal.butter
or other sources.
"""
def __init__(self, b):
Filter.__init__(self, b)
def filter_element(self, element):
"""
Uses a standard formula for FIR filters.
Shifts x, y to the right by 1 to accomodate
new entry for input x, and compute output.
"""
# Accomodate new entry, element, in input
# stream x.
self.x[1:] = self.x[:- 1]
self.x[0] = element
return np.sum(self.b * self.x)
#---------------------------------------------------------------
# GET FILTER PARAMETERS
#---------------------------------------------------------------
# See scipy.signal for a library of filters.
def butter_bandpass(lowcut, highcut, fs, order=2):
"""
Butterworth IIR filter.
butter() is from scipy.signal
"""
lowcut, highcut = lowcut*2.0/fs, highcut*2.0/fs
b, a = butter(order, [lowcut, highcut], btype='band')
return b, a
def fir_bandpass(lowcut, highcut, fs):
"""
FIR filter.
firwin() is from scipy.signal
"""
lowcut, highcut =lowcut*2.0/fs, highcut*2.0/fs
b = firwin(
numtaps=201, cutoff = [lowcut, highcut],
window='blackmanharris', pass_zero=False)
return b
#----------------------------------------------------------------
# TESTS
#----------------------------------------------------------------
def generate_test_waves(
low_frequency, medium_frequency, high_frequency,
max_amplitude, phase_shift, sample_rate, time_duration):
# Generate streams of waves with different frequencies,
# amplitudes and phase shifts. Each wave is a pure
# frequency. Return a wave that combines frequencies.
wave_data_low_frequency = generate_sine_wave(
low_frequency, max_amplitude, phase_shift,
sample_rate, time_duration)
wave_data_medium_frequency = generate_sine_wave(
medium_frequency, max_amplitude, phase_shift,
sample_rate, time_duration)
wave_data_high_frequency = generate_sine_wave(
high_frequency, max_amplitude, phase_shift,
sample_rate, time_duration)
# Generate a wave that is the sum of pure-frequency
# waves.
return (wave_data_low_frequency +
wave_data_medium_frequency +
wave_data_high_frequency)
def drive_input_and_plot_output(
input_signal, x, y):
# Put data into the input stream of the filter.
x.extend(input_signal)
# Run a step and plot output.
Stream.scheduler.step()
# We now have values in in_stream and out_stream.
# Next plot the recent values of the streams
# Get the most recent values of streams x, y.
before_filtering_data = recent_values(x)
after_filtering_data = recent_values(y)
# Plot
plt.figure(1)
plt.subplot(211)
plt.plot(before_filtering_data)
plt.subplot(212)
plt.plot(after_filtering_data)
plt.show()
def setup_parameters():
"""
This test generates waves with low, medium and high
frequency, and sums these three waves to get a combined
frequency wave. Then it puts the combined frequency
wave through a bandpass filter which passes only the
medium-frequency wave through.
The output plot has the combined frequency wave in the
first subplot and the filter output in the second subplot.
"""
# SET PARAMETERS
# fs: sample rate
# order: order of the filter
# lowcut, highcut: lower and upper thresholds of a bandpass
# filter.
fs, order, lowcut, highcut = 50.0, 2, 1.0, 5.0
input_signal = generate_test_waves(
low_frequency=0.25, medium_frequency=2.5, high_frequency=15.0,
max_amplitude=1, phase_shift=0.0, sample_rate=fs,
time_duration=10.0)
return fs, order, lowcut, highcut, input_signal
def test_bandpass_IIR_filter():
# Set up parameters
fs, order, lowcut, highcut, input_signal = setup_parameters()
x = StreamArray('x')
y = StreamArray('y')
# Create bandpass filter
b, a = butter_bandpass(lowcut, highcut, fs, order)
BP_IIR(b, a).filter_stream(in_stream=x, out_stream=y)
# Plot output
drive_input_and_plot_output(input_signal, x, y)
def test_bandpass_FIR_filter():
# Set up parameters
fs, order, lowcut, highcut, input_signal = setup_parameters()
x = StreamArray('x')
y = StreamArray('y')
# Create a bandpass filter.
b = fir_bandpass(lowcut, | |
[]
for j in range(len(bp_shear[0])):
na_std.append([
np.std(bp_shear[:, j]), np.std(bp_stretch[:, j]), np.std(bp_stagger[:, j]),
np.std(bp_buckle[:j]), np.std(bp_prop[:, j]), np.std(bp_open[:, j]), np.std(bp_shift[:, j]),
np.std(bp_slide[:, j]), np.std(bp_rise[:, j]), np.std(bp_tilt[:, j]), np.std(bp_roll[:, j]),
np.std(bp_twist[:, j])])
na_std = np.array(na_std)
return na_std
def plot(self, **kwargs):
"""Plot time-averaged base parameters for each basse pair in a 1D graph.
One plot is produced for each parameter. It shows the the mean and
standard deviation for each individual base pair. Each plot is saved to
PNG file with name "<parameter_name>.png".
Parameters
----------
ax : matplotlib.pyplot.Axes (optional)
Provide `ax` to have all plots plotted in the same axes.
"""
na_avg, na_std = self.mean_std()
for k in range(len(na_avg[0])):
ax = kwargs.pop('ax', plt.subplot(111))
x = list(range(1, len(na_avg[:, k]) + 1))
ax.errorbar(x, na_avg[:, k], yerr=na_std[:, k], fmt='-o')
ax.set_xlim(0, len(na_avg[:, k]) + 1)
ax.set_xlabel(r"Nucleic Acid Number")
param = self.profiles.values()[0].dtype.names[k]
if param in ["Shear", "Stretch", "Stagger", "Rise", "Shift", "Slide"]:
ax.set_ylabel("{0!s} ($\AA$)".format((param)))
else:
ax.set_ylabel("{0!s} (deg)".format((param)))
ax.figure.savefig("{0!s}.png".format((param)))
ax.figure.clf()
def sorted_profiles_iter(self):
"""Return an iterator over profiles sorted by frame/order parameter.
The iterator produces tuples ``(q, profile)``. Typically, `q` is the
frame number.
"""
if self.profiles is None:
return
for q in sorted(self.profiles):
yield (q, self.profiles[q])
__iter__ = sorted_profiles_iter
class X3DNA(BaseX3DNA):
"""Run X3DNA_ on a single frame or a DCD trajectory.
Only a subset of all X3DNA control parameters is supported and can be set
with keyword arguments. For further details on X3DNA_ see the `X3DNA docs`_.
Running X3DNA with the :class:`X3DNA` class is a 3-step process:
1. set up the class with all desired parameters
2. run X3DNA with :meth:`X3DNA.run`
3. collect the data from the output file with :meth:`X3DNA.collect`
The class also provides some simple plotting functions of the collected
data such as :meth:`X3DNA.plot` or :meth:`X3DNA.plot3D`.
When methods return helicoidal basepair parameter as lists, then the order
is always
====== ==============
index parameter
====== ==============
0 shear
1 stretch
2 stagger
3 buckle
4 propeller
5 opening
6 shift
7 slide
8 rise
9 tilt
10 roll
11 twist
====== ==============
.. versionadded:: 0.8
.. _`X3DNA docs`: http://forum.x3dna.org/
"""
def __init__(self, filename, **kwargs):
"""Set up parameters to run X3DNA_ on PDB *filename*.
Parameters
----------
filename : str
The `filename` is used as input for X3DNA in the
:program:`xdna_ensemble` command. It specifies the name of a
PDB coordinate file to be used. This must be in Brookhaven
protein databank format or something closely approximating
this.
executable : str (optional)
Path to the :program:`xdna_ensemble` executable directories
(e.g. ``/opt/x3dna/2.1 and /opt/x3dna/2.1/bin``) must be set
and then added to export in bashrc file. See X3DNA
documentation for set-up instructions.
x3dna_param : bool (optional)
Determines whether base step or base pair parameters will be
calculated. If ``True`` (default) then stacked *base step*
parameters will be analyzed. If ``False`` then stacked *base
pair* parameters will be analyzed.
logfile : str (optional)
Write output from X3DNA to `logfile` (default: "bp_step.par")
See Also
--------
:class:`X3DNAtraj`
"""
# list of temporary files, to be cleaned up on __del__
self.tempfiles = [
"auxiliary.par", "bestpairs.pdb", "bp_order.dat", "bp_helical.par", "cf_7methods.par",
"col_chains.scr", "col_helices.scr", "hel_regions.pdb", "ref_frames.dat", "hstacking.pdb", "stacking.pdb"
]
self.tempdirs = []
self.filename = filename
logger.info("Setting up X3DNA analysis for %(filename)r", vars(self))
# guess executables
self.exe = {}
x3dna_exe_name = kwargs.pop('executable', 'xdna_ensemble')
self.x3dna_param = kwargs.pop('x3dna_param', True)
self.exe['xdna_ensemble'] = which(x3dna_exe_name)
if self.exe['xdna_ensemble'] is None:
errmsg = "X3DNA binary {x3dna_exe_name!r} not found.".format(**vars())
logger.fatal(errmsg)
logger.fatal("%(x3dna_exe_name)r must be on the PATH or provided as keyword argument 'executable'.",
vars())
raise OSError(errno.ENOENT, errmsg)
x3dnapath = os.path.dirname(self.exe['xdna_ensemble'])
self.logfile = kwargs.pop("logfile", "bp_step.par")
if self.x3dna_param is False:
self.template = textwrap.dedent("""x3dna_ensemble analyze -b 355d.bps --one %(filename)r """)
else:
self.template = textwrap.dedent("""find_pair -s %(filename)r stdout |analyze stdin """)
# sanity checks
for program, path in self.exe.items():
if path is None or which(path) is None:
logger.error("Executable %(program)r not found, should have been %(path)r.",
vars())
# results
self.profiles = OrderedDict()
def run(self, **kwargs):
"""Run X3DNA on the input file."""
inpname = kwargs.pop("inpfile", None)
outname = kwargs.pop("outfile", self.logfile)
x3dnaargs = vars(self).copy()
x3dnaargs.update(kwargs)
x3dna_param = kwargs.pop('x3dna_param', self.x3dna_param)
inp = self.template % x3dnaargs
if inpname:
with open(inpname, "w") as f:
f.write(inp)
logger.debug("Wrote X3DNA input file %r for inspection", inpname)
logger.info("Starting X3DNA on %(filename)r (trajectory: %(dcd)r)", x3dnaargs)
logger.debug("%s", self.exe['xdna_ensemble'])
with open(outname, "w") as output:
x3dna = subprocess.call([inp], shell=True)
with open(outname, "r") as output:
# X3DNA is not very good at setting returncodes so check ourselves
for line in output:
if line.strip().startswith(('*** ERROR ***', 'ERROR')):
x3dna.returncode = 255
break
if x3dna.bit_length != 0:
logger.fatal("X3DNA Failure (%d). Check output %r", x3dna.bit_length, outname)
logger.info("X3DNA finished: output file %(outname)r", vars())
def collect(self, **kwargs):
"""Parse the output from a X3DNA run into numpy recarrays.
Can deal with outputs containing multiple frames.
The method saves the result as :attr:`X3DNA.profiles`, a dictionary
indexed by the frame number. Each entry is a
:class:`np.recarray`.
If the keyword `outdir` is supplied (e.g. ".") then each profile is
saved to a gzipped data file.
Parameters
----------
run : str, int (optional
identifier, free form [1]
outdir : str (optional)
save output data under `outdir`/`run` if set to any other
value but ``None`` [``None``]
"""
# Shear Stretch Stagger Buckle Prop-Tw Opening Shift Slide Rise Tilt Roll Twist
#0123456789.0123456789.0123456789.0123456789.0123456789.0123456789.123456789.123456789.123456789.123456789.123456789.123456789.123456789.
# 11 22 33 44
#T-A -0.033 -0.176 0.158 -12.177 -8.979 1.440 0.000 0.000 0.000 0.000 0.000 0.000
#C-G -0.529 0.122 -0.002 -7.983 -10.083 -0.091 -0.911 1.375 3.213 -0.766 -4.065 41.492
# only parse bp_step.par
x3dna_output = kwargs.pop("x3dnaout", self.logfile)
run = kwargs.pop("run", 1) # id number
outdir = kwargs.pop("outdir", os.path.curdir)
logger.info("Collecting X3DNA profiles for run with id %s", run)
length = 1 # length of trajectory --- is this really needed?? No... just for info
if '*' in self.filename:
import glob
filenames = glob.glob(self.filename)
length = len(filenames)
if length == 0:
logger.error("Glob pattern %r did not find any files.", self.filename)
raise ValueError("Glob pattern {0!r} did not find any files.".format(self.filename))
logger.info("Found %d input files based on glob pattern %s", length, self.filename)
# one recarray for each frame, indexed by frame number
self.profiles = OrderedDict()
logger.info("Run %s: Reading %d X3DNA profiles from %r", run, length, x3dna_output)
x3dna_profile_no = 0
records = []
with open(x3dna_output, "r") as x3dna:
read_data = False
for line in x3dna:
line = line.rstrip() # preserve columns (FORTRAN output...)
if self.x3dna_param is False:
if line.startswith("# Shear"):
read_data = True
logger.debug("Started reading data")
fields = line.split()
x3dna_profile_no = int(1) # useless int value code based off hole plugin
records = []
continue
if read_data:
if len(line.strip()) != 0:
try:
Sequence, Shear, Stretch, Stagger, Buckle, Propeller, Opening, Shift, Slide, Rise, \
Tilt, Roll, Twist = line.split()
except:
logger.critical("Run %d: Problem parsing line %r", run, line.strip())
logger.exception("Check input file %r.", x3dna_output)
raise
records.append(
[float(Shear), float(Stretch), float(Stagger), float(Buckle), float(Propeller),
float(Opening), float(Shift), float(Slide), float(Rise), float(Tilt), float(Roll),
float(Twist)])
continue
else:
# end of records (empty line)
read_data = False
else:
if line.startswith("# Shift"):
read_data = True
logger.debug("Started reading data")
fields = line.split()
x3dna_profile_no = int(1) # useless int value code based off hole plugin
records = []
continue
if read_data:
if len(line.strip()) != 0:
try:
Sequence, Shift, Slide, Rise, Tilt, Roll, Twist = line.split()
except:
logger.critical("Run %d: Problem parsing line %r", run, line.strip())
logger.exception("Check input file %r.", x3dna_output)
raise
records.append(
[float(Shift), float(Slide), float(Rise), float(Tilt), float(Roll), float(Twist)])
continue
else:
# end of records (empty line)
read_data = False
if self.x3dna_param is False:
frame_x3dna_output = np.rec.fromrecords(records, formats="f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8",
names="Shear,Stretch,Stagger,Buckle,Propeller,Opening,"
"Shift,Slide,Rise,Tilt,Roll,Twist")
else:
frame_x3dna_output = np.rec.fromrecords(records, formats="f8,f8,f8,f8,f8,f8",
names="Shift,Slide,Rise,Tilt,Roll,Twist")
# store the profile
self.profiles[x3dna_profile_no] = frame_x3dna_output
logger.debug("Collected X3DNA profile for frame %d (%d datapoints)",
x3dna_profile_no, len(frame_x3dna_output))
# save a profile for each frame (for debugging and scripted processing)
# a tmp folder for each trajectory
if outdir is not None:
rundir = os.path.join(outdir, "run_" + str(run))
os.system("rm -f tmp*.out")
if not os.path.exists(rundir):
os.makedirs(rundir)
frame_x3dna_txt = os.path.join(rundir, "bp_step_{0!s}_{1:04d}.dat.gz".format(run, x3dna_profile_no))
np.savetxt(frame_x3dna_txt, frame_x3dna_output)
logger.debug("Finished with frame %d, saved as %r", x3dna_profile_no, frame_x3dna_txt)
# if we get here then we haven't found anything interesting
if len(self.profiles) == length:
logger.info("Collected X3DNA profiles for %d frames", len(self.profiles))
else:
logger.warning("Missing data: Found %d | |
<gh_stars>10-100
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implements the abstract Conary Model, as well as the Conary Model
Language (CML) serialization of the abstract model. This conary
model is written explicitly in terms of labels and versions, and is
interpreted relative to system configuration items such as flavor,
pinTroves, excludeTroves, and so forth.
"""
import shlex
from conary import errors
from conary import trovetup
from conary import versions
from conary.conaryclient import cmdline
from conary.conaryclient.update import UpdateError
from conary.lib.compat import namedtuple as _namedtuple
# The schema for a system model is, roughly:
#
# searchOp := troveTuples or label
# systemOp := searchOp or list of troveOperations
# troveOperations := updateTroves | eraseTroves | installTroves | patchTroves
# | offerTroves | searchOp
# updateTroves := list of troveTuples
# eraseTroves := list of troveTuples
# installTroves := list of troveTuples
# patchTroves := list of troveTuples
# offerTroves := list of troveTuples
# includeTrove := troveTuple
# There are four kinds of string formatting used in these objects:
# * __str__() is the most minimal representation of the contents as
# a python string
# * __repr__() is used only for good representation in debugging contexts
# * asString() is the string representation as it will be consumed,
# with shlex if appropriate for that object type
# * format() (defined for types that represent file contents) has the
# CML file representation, including type/key
def shellStr(s):
if len(shlex.split(s)) > 1:
return "'%s'" % s
return s
class CMError(UpdateError):
pass
class CMLocation(_namedtuple('CMLocation', 'line context op spec')):
"""
line: line number (should be 1-indexed)
context: file name or other similar context, or C{None}
op: containing operation, or C{None}
spec: containing operation, or C{None}
"""
def __new__(cls, line, context=None, op=None, spec=None):
if isinstance(line, cls):
if context is None:
context = line.context
else:
context = context
if op is None:
op = line.op
else:
op = op
if spec is None:
spec = line.spec
else:
spec = spec
line = line.line
return tuple.__new__(cls, (line, context, op, spec))
def __repr__(self):
op = None
if self.op:
op = self.op
spec = None
if self.spec:
spec = self.spec
return "%s(line=%r, context=%r, op=%r, spec=%r)" % (
self.__class__.__name__, self.line, self.context, op, spec)
def __str__(self):
if self.context:
context = str(self.context)
else:
context = ''
if self.spec:
spec = self.spec.asString()
else:
spec = ''
if self.line is None:
line = 'new-line'
else:
line = str(self.line)
return ':'.join((x for x in (context, line, spec) if x))
asString = __str__
class CMTroveSpec(trovetup.TroveSpec):
'''
Like parent class L{trovetup.TroveSpec} except that:
- Parses a version separator of C{==} to be like C{=} but sets
the C{pinned} member to C{True} (defaults to C{False}).
- Has a C{snapshot} member that determines whether the version
should be updated to latest, and a C{labelSpec()} method
used to get the label on which to look for the latest version.
Note that equality is tested only on name, version, and flavor,
and that it is acceptable to test equality against an instance of
C{trovetup.TroveSpec} or a simple C{(name, version, flavor)}
tuple.
'''
def __new__(cls, name, version=None, flavor=None, **kwargs):
if isinstance(name, (tuple, list)):
name = list(name)
name[0] = name[0].replace('==', '=')
else:
name = name.replace('==', '=')
name, version, flavor = trovetup.TroveSpec(
name, version, flavor, **kwargs)
newTuple = tuple.__new__(cls, (name, version, flavor))
if newTuple.version:
newTuple.pinned = '==' in newTuple.version
newTuple.local = '@local' in newTuple.version
newTuple._has_branch = '/' in newTuple.version[1:]
else:
newTuple.pinned = False
newTuple._has_branch = False
newTuple.local = False
newTuple.snapshot = (not newTuple.pinned and not newTuple.local
and newTuple._has_branch)
return newTuple
def __init__(self, *args, **kwargs):
self.pinned = '==' in args[0]
if self.version is not None:
self._has_branch = '/' in self.version[1:]
self.local = '@local' in self.version
else:
self._has_branch = False
self.local = False
self.snapshot = not self.pinned and not self.local and self._has_branch
def labelSpec(self):
# This is used only to look up newest versions on a label
assert(self._has_branch)
return self.name, self.version.rsplit('/', 1)[0], self.flavor
def asString(self, withTimestamp=False):
s = trovetup.TroveSpec.asString(self, withTimestamp=withTimestamp)
if self.pinned:
s = s.replace('=', '==', 1)
return s
__str__ = asString
format = asString
def __eq__(self, other):
# We need to use indices so that we can compare to pure tuples,
# as well as to trovetup.TroveSpec and to CMTroveSpec
if not isinstance(other, tuple):
return False
return self[0:3] == other[0:3]
# CMTroveSpec objects are pickled into the model cache, but there
# only the TroveSpec parts are used
def __getnewargs__(self):
return (self.name, self.version, self.flavor)
def __getstate__(self):
return None
def __setstate__(self, state):
pass
class _CMOperation(object):
def __init__(self, text=None, item=None, modified=True,
index=None, context=None):
self.modified = modified
self.index = index
self.context = context
assert(text is not None or item is not None)
assert(not(text is None and item is None))
if item is not None:
self.item = item
else:
self.parse(text=text)
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.index == other.index and
self.modified == other.modified and
self.context == other.context and
self.item == other.item)
def __iter__(self):
yield self.item
def getLocation(self, spec = None):
return CMLocation(self.index, context = self.context, op = self,
spec = spec)
def update(self, item, modified=True):
self.parse(item)
self.modified = modified
def parse(self, text=None):
raise NotImplementedError
def format(self):
return self.key + ' ' + self.asString()
def __str__(self):
return str(self.item)
def __repr__(self):
return "%s(text='%s', modified=%s, index=%s)" % (
self.__class__.__name__,
self.asString(), self.modified, self.index)
class SearchOperation(_CMOperation):
key = 'search'
def asString(self):
return shellStr(self.item.asString())
class SearchTrove(SearchOperation):
def parse(self, text):
self.item = CMTroveSpec(text)
class SearchLabel(SearchOperation):
def parse(self, text):
self.item = versions.Label(text)
class IncludeOperation(_CMOperation):
key = 'include'
def asString(self):
return shellStr(self.item.asString())
def parse(self, text):
self.item = CMTroveSpec(text)
class _TextOp(_CMOperation):
def parse(self, text):
self.item = text
def __str__(self):
return self.item
asString = __str__
def __repr__(self):
return "%s(text='%s', modified=%s, index=%s)" % (
self.__class__.__name__, self.item, self.modified, self.index)
class NoOperation(_TextOp):
'Represents comments and blank lines'
format = _TextOp.__str__
class VersionOperation(_TextOp):
'''
Version string for this model. This is not a schema version;
it is a version identifier for the contents of the model.
This must be a legal conary upstream version, because it is
used to provide the conary upstream version when building the
model into a group.
'''
key = 'version'
def parse(self, text):
# ensure that this is a legal conary upstream version
rev = versions.Revision(text + '-1')
if rev.buildCount != None:
raise errors.ParseError('%s: not a conary upstream version' % text)
_TextOp.parse(self, text)
class TroveOperation(_CMOperation):
def parse(self, text):
if isinstance(text, str):
text = [text]
self.item = [CMTroveSpec(x) for x in text]
def isEmpty(self):
return not(self.item)
def removeSpec(self, spec):
self.item.remove(spec)
self.modified = True
def replaceSpec(self, spec, newSpec):
i = self.item.index(spec)
self.item[i] = newSpec
self.modified = True
def __repr__(self):
return "%s(text=%s, modified=%s, index=%s)" % (
self.__class__.__name__,
str([x.asString() for x in self.item]),
self.modified, self.index)
def __str__(self):
return ' '.join(x.asString() for x in self.item)
def __iter__(self):
return iter(self.item)
def asString(self):
return ' '.join(shellStr(x.asString()) for x in self.item)
class UpdateTroveOperation(TroveOperation):
key = 'update'
class EraseTroveOperation(TroveOperation):
key = 'erase'
class InstallTroveOperation(TroveOperation):
key = 'install'
class OfferTroveOperation(TroveOperation):
key = 'offer'
class PatchTroveOperation(TroveOperation):
key = 'patch'
opMap = {
UpdateTroveOperation.key : UpdateTroveOperation,
EraseTroveOperation.key : EraseTroveOperation,
InstallTroveOperation.key : InstallTroveOperation,
OfferTroveOperation.key : OfferTroveOperation,
PatchTroveOperation.key : PatchTroveOperation,
IncludeOperation.key : IncludeOperation,
}
class CM:
# Make the operation objects available via models, avoiding the
# need to import this module when a model is provided
SearchTrove = SearchTrove
SearchLabel = SearchLabel
SearchOperation = SearchOperation
IncludeOperation = IncludeOperation
NoOperation = NoOperation
UpdateTroveOperation = UpdateTroveOperation
EraseTroveOperation = EraseTroveOperation
InstallTroveOperation = InstallTroveOperation
OfferTroveOperation = OfferTroveOperation
PatchTroveOperation = PatchTroveOperation
VersionOperation = VersionOperation
def __init__(self, cfg, context=None):
'''
@type cfg: L{conarycfg.ConaryConfiguration}
@param context: optional description of source of data (e.g. filename)
@type context: string
'''
self.cfg = cfg
self.context = context
self.reset()
def reset(self):
self.modelOps = []
self.noOps = []
self.indexes = {}
self.version = None
# Keep track of modifications that do not involve setting
# an operation as modified
| |
<reponame>brandherd/PyCosmic<gh_stars>0
from astropy.io import fits as pyfits
import numpy
from scipy import ndimage
from scipy import stats
__author__ = "<NAME>"
__credit__ = ['<NAME>', '<NAME>', '<NAME>']
__copyright__ = "Copyright 2020, <NAME>"
__license__ = "MIT"
__url__ = 'https://github.com/brandherd/PyCosmic'
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__version__ = "0.6"
class Image(object):
def __init__(self, data=None, error=None, mask=None):
self._data = data
if self._data is not None:
self._dim = self._data.shape
else:
self._dim = None
self._mask = mask
self._error = error
def __add__(self, other):
"""
Operator to add two Images or add another type if possible
"""
if isinstance(other, Image):
# define behaviour if the other is of the same instance
img = Image()
# add data if contained in both
if self._data is not None and other._data is not None:
new_data = self._data+other._data
img.data = new_data
else:
img.data = self._data
# add error if contained in both
if self._error is not None and other._error is not None:
new_error = numpy.sqrt(self._error**2+other._error**2)
img.error = new_error
else:
img.error = self._error
# combined mask of valid pixels if contained in both
if self._mask is not None and other._mask is not None:
new_mask = numpy.logical_or(self._mask, other._mask)
img.mask = new_mask
else:
img.mask = self._mask
return img
elif isinstance(other, numpy.ndarray):
img = Image(error=self._error, mask=self._mask)
if self._data is not None: # check if there is data in the object
dim = other.shape
# add ndarray according do its dimensions
if self._dim == dim:
new_data = self._data+other
elif len(dim) == 1:
if self._dim[0] == dim[0]:
new_data = self._data+other[:, numpy.newaxis]
elif self._dim[1] == dim[0]:
new_data = self._data+other[numpy.newaxis, :]
else:
new_data = self._data
img.data = new_data
return img
else:
# try to do addition for other types, e.g. float, int, etc.
try:
new_data = self._data+other
img = Image(data=new_data, error=self._error, mask=self._mask)
return img
except:
# raise exception if the type are not matching in general
raise TypeError("unsupported operand type(s) for +: %s and %s" %
(str(type(self)).split("'")[1], str(type(other)).split("'")[1]))
def __radd__(self, other):
self.__add__(other)
def __sub__(self, other):
"""
Operator to subtract two Images or subtract another type if possible
"""
if isinstance(other, Image):
# define behaviour if the other is of the same instance
img = Image()
# subtract data if contained in both
if self._data is not None and other._data is not None:
new_data = self._data-other._data
img.data = new_data
else:
img.data = self._data
# add error if contained in both
if self._error is not None and other._error is not None:
new_error = numpy.sqrt(self._error**2+other._error**2)
img.error = new_error
else:
img.error = self._error
# combined mask of valid pixels if contained in both
if self._mask is not None and other._mask is not None:
new_mask = numpy.logical_or(self._mask, other._mask)
img.mask = new_mask
else:
img.mask = self._mask
return img
elif isinstance(other, numpy.ndarray):
img = Image(error=self._error, mask=self._mask)
if self._data is not None: # check if there is data in the object
dim = other.shape
# add ndarray according do its dimensions
if self._dim == dim:
new_data = self._data-other
elif len(dim) == 1:
if self._dim[0] == dim[0]:
new_data = self._data-other[:, numpy.newaxis]
elif self._dim[1] == dim[0]:
new_data = self._data-other[numpy.newaxis, :]
else:
new_data = self._data
img.data = new_data
return img
else:
# try to do addtion for other types, e.g. float, int, etc.
try:
new_data = self._data-other
img = Image(data=new_data, error=self._error, mask=self._mask)
return img
except:
# raise exception if the type are not matching in general
raise TypeError("unsupported operand type(s) for -: %s and %s" %
(str(type(self)).split("'")[1], str(type(other)).split("'")[1]))
def __truediv__(self, other):
"""
Operator to divide two Images or divide by another type if possible
"""
if isinstance(other, Image):
# define behaviour if the other is of the same instance
img = Image()
# subtract data if contained in both
if self._data is not None and other._data is not None:
new_data = self._data/other._data
img.data = new_data
else:
img.data = self._data
# add error if contained in both
if self._error is not None and other._error is not None:
new_error = numpy.sqrt((self._error/other._data)**2+(self._data*other._error/other._data**2)**2)
img.error = new_error
else:
img.error = self._error
# combined mask of valid pixels if contained in both
if self._mask is not None and other._mask is not None:
new_mask = numpy.logical_or(self._mask, other._mask)
img.mask = new_mask
else:
img.mask = self._mask
return img
elif isinstance(other, numpy.ndarray):
img = Image(error=self._error, mask=self._mask)
if self._data is not None: # check if there is data in the object
dim = other.shape
# add ndarray according do its dimensions
if self._dim == dim:
new_data = self._data/other
if self._error is not None:
new_error = self._error/other
else:
new_error = None
elif len(dim) == 1:
if self._dim[0] == dim[0]:
new_data = self._data/other[:, numpy.newaxis]
if self._error is not None:
new_error = self._error/other[:, numpy.newaxis]
else:
new_error = None
elif self._dim[1] == dim[0]:
new_data = self._data/other[numpy.newaxis, :]
if self._error is not None:
new_error = self._error/other[numpy.newaxis, :]
else:
new_error = None
else:
new_data = self._data
img.data = new_data
img.error = new_error
return img
else:
# try to do addtion for other types, e.g. float, int, etc.
try:
new_data = self._data/other
if self._error is not None:
new_error = self._error/other
else:
new_error = None
img = Image(data=new_data, error=new_error, mask=self._mask)
return img
except:
# raise exception if the type are not matching in general
raise TypeError("unsupported operand type(s) for /: %s and %s" %
(str(type(self)).split("'")[1], str(type(other)).split("'")[1]))
def __mul__(self, other):
"""
Operator to divide two Images or divide by another type if possible
"""
if isinstance(other, Image):
# define behaviour if the other is of the same instance
img = Image()
# subtract data if contained in both
if self._data is not None and other._data is not None:
new_data = self._data*other._data
img.data = new_data
else:
img.data = self._data
# add error if contained in both
if self._error is not None and other._error is not None:
new_error = numpy.sqrt((self._error*other._data)**2+(self._data*other._error)**2)
img.error = new_error
else:
img.error = self._error
# combined mask of valid pixels if contained in both
if self._mask is not None and other._mask is not None:
new_mask = numpy.logical_or(self._mask, other._mask)
img.mask = new_mask
else:
img.mask = self._mask
return img
elif isinstance(other, numpy.ndarray):
img = Image(error=self._error, mask=self._mask)
if self._data is not None: # check if there is data in the object
dim = other.shape
# add ndarray according do its dimensions
if self._dim == dim:
new_data = self._data*other
elif len(dim) == 1:
if self._dim[0] == dim[0]:
new_data = self._data*other[:, numpy.newaxis]
elif self._dim[1] == dim[0]:
new_data = self._data*other[numpy.newaxis, :]
else:
new_data = self._data
img.data = new_data
return img
else:
# try to do addtion for other types, e.g. float, int, etc.
try:
new_data = self._data*other
img = Image(data=new_data, error=self._error, mask=self._mask)
return img
except:
# raise exception if the type are not matching in general
raise TypeError("unsupported operand type(s) for *: %s and %s" %
(str(type(self)).split("'")[1], str(type(other)).split("'")[1]))
## define comparison operators as a comparison with
def __rmul__(self, other):
self.__mul__(other)
def __lt__(self, other):
return self._data < other
def __le__(self, other):
return self._data <= other
def __eq__(self, other):
return self._data == other
def __ne__(self, other):
return self._data != other
def __gt__(self, other):
return self._data > other
def __ge__(self, other):
return self._data >= other
def sqrt(self):
"""
Computes the square root of the image
Returns
-----------
Image : data_model.Image object
A full Image object
"""
if self._data is not None:
new_data = numpy.sqrt(self._data) # sqrt of the data
else:
new_data = None
if self._error is not None and self._data is not None:
new_error = 1/(2*new_data)*self._error # corresponding error
else:
new_error = None
# return new Image object with corresponding data
return Image(data=new_data, error=new_error, mask=self._mask)
@property
def dim(self):
"""
Returns the dimension of the image
Returns
-----------
_dim : tuple
The dimension of the image (y,x)
"""
return self._dim
@property
def data(self):
"""
Returns the stored data of the image
Returns
-----------
_data : numpy.ndarray
The stored data | |
__all__ = [
'get_summary_mapping',
'generate_summaryxref_files',
'merge_oed_to_mapping',
'write_exposure_summary',
'write_summary_levels',
'write_mapping_file',
]
import io
import json
import os
import warnings
import pandas as pd
from ..utils.coverages import SUPPORTED_COVERAGE_TYPES
from ..utils.data import (
factorize_dataframe,
factorize_ndarray,
get_dataframe,
get_json,
merge_dataframes,
set_dataframe_column_dtypes,
get_dtypes_and_required_cols,
)
from ..utils.defaults import (
find_exposure_fp,
SOURCE_IDX,
SUMMARY_MAPPING,
SUMMARY_OUTPUT,
get_loc_dtypes,
get_acc_dtypes,
)
from ..utils.exceptions import OasisException
from ..utils.log import oasis_log
from ..utils.path import as_path
from ..utils.peril import PERILS, PERIL_GROUPS
from ..utils.status import OASIS_KEYS_STATUS
from .gul_inputs import get_gul_input_items
@oasis_log
def get_summary_mapping(inputs_df, oed_hierarchy, is_fm_summary=False):
"""
Create a DataFrame with linking information between Ktools `OasisFiles`
And the Exposure data
:param inputs_df: datafame from gul_inputs.get_gul_input_items(..) / il_inputs.get_il_input_items(..)
:type inputs_df: pandas.DataFrame
:param is_fm_summary: Indicates whether an FM summary mapping is required
:type is_fm_summary: bool
:return: Subset of columns from gul_inputs_df / il_inputs_df
:rtype: pandas.DataFrame
"""
acc_num = oed_hierarchy['accnum']['ProfileElementName'].lower()
loc_num = oed_hierarchy['locnum']['ProfileElementName'].lower()
policy_num = oed_hierarchy['polnum']['ProfileElementName'].lower()
portfolio_num = oed_hierarchy['portnum']['ProfileElementName'].lower()
# Case GUL+FM (based on il_inputs_df)
if is_fm_summary:
summary_mapping = inputs_df[inputs_df['level_id'] == inputs_df['level_id'].max()].drop_duplicates(subset=['gul_input_id', 'layer_id'], keep='first')
summary_mapping['agg_id'] = summary_mapping['gul_input_id']
summary_mapping['output_id'] = factorize_ndarray(
summary_mapping.loc[:, ['gul_input_id', 'layer_id']].values,
col_idxs=range(2)
)[0]
summary_mapping.drop('item_id', axis=1, inplace=True)
# GUL Only
else:
summary_mapping = inputs_df.copy(deep=True)
usecols = [
acc_num,
loc_num,
'loc_id',
policy_num,
portfolio_num,
SOURCE_IDX['loc'],
SOURCE_IDX['acc'],
'item_id',
'layer_id',
'coverage_id',
'peril_id',
'agg_id',
'output_id',
'coverage_type_id',
'tiv'
]
summary_mapping.drop(
[c for c in summary_mapping.columns if c not in usecols],
axis=1,
inplace=True
)
dtypes = {
**{t: 'str' for t in [portfolio_num, policy_num, acc_num, loc_num, 'peril_id']},
**{t: 'uint8' for t in ['coverage_type_id']},
**{t: 'uint32' for t in [SOURCE_IDX['loc'], SOURCE_IDX['acc'], 'loc_id', 'item_id', 'layer_id', 'coverage_id', 'agg_id', 'output_id']},
**{t: 'float64' for t in ['tiv']}
}
summary_mapping = set_dataframe_column_dtypes(summary_mapping, dtypes)
return summary_mapping
def merge_oed_to_mapping(summary_map_df, exposure_df, oed_column_set, defaults=None):
"""
Create a factorized col (summary ids) based on a list of oed column names
:param :summary_map_df dataframe return from get_summary_mapping
:type summary_map_df: pandas.DataFrame
:param exposure_df: Summary map file path
:type exposure_df: pandas.DataFrame
:param defaults: Dictionary of vaules to fill NaN columns with
:type defaults: dict
{'Col_A': 0, 'Col_B': 1, 'Col_C': 2}
:return: New DataFrame of summary_map_df + exposure_df merged on exposure index
:rtype: pandas.DataFrame
"""
column_set = [c.lower() for c in oed_column_set]
columns_found = [c for c in column_set if c in exposure_df.columns.to_list()]
columns_missing = list(set(column_set) - set(columns_found))
# Select DF with matching cols
exposure_col_df = exposure_df.loc[:, columns_found]
# Add default value if optional column is missing
for col in columns_missing:
if col in defaults:
exposure_col_df[col] = defaults[col]
else:
raise OasisException('Column to merge "{}" not in locations dataframe or defined with a default value'.format(col))
exposure_col_df[SOURCE_IDX['loc']] = exposure_df.index
new_summary_map_df = merge_dataframes(summary_map_df, exposure_col_df, join_on=SOURCE_IDX['loc'], how='inner')
if defaults:
new_summary_map_df.fillna(value=defaults, inplace=True)
return new_summary_map_df
def group_by_oed(oed_col_group, summary_map_df, exposure_df, sort_by, accounts_df=None):
"""
Adds list of OED fields from `column_set` to summary map file
:param :summary_map_df dataframe return from get_summary_mapping
:type summary_map_df: pandas.DataFrame
:param exposure_df: DataFrame loaded from location.csv
:type exposure_df: pandas.DataFrame
:param accounts_df: DataFrame loaded from accounts.csv
:type accounts_df: pandas.DataFrame
:return: subset of columns from exposure_df to merge
:rtype: list
summary_ids[0] is an int list 1..n array([1, 2, 1, 2, 1, 2, 1, 2, 1, 2, ... ])
summary_ids[1] is an array of values used to factorize `array(['Layer1', 'Layer2'], dtype=object)`
"""
oed_cols = [c.lower() for c in oed_col_group] # All requred columns
unmapped_cols = [c for c in oed_cols if c not in summary_map_df.columns] # columns which in locations / Accounts file
mapped_cols = [c for c in oed_cols + [SOURCE_IDX['loc'], SOURCE_IDX['acc'], sort_by] if c in summary_map_df.columns] # Columns already in summary_map_df
# Extract mapped_cols from summary_map_df
summary_group_df = summary_map_df.loc[:, mapped_cols]
# Search Loc / Acc files and merge in remaing
if unmapped_cols is not []:
# Location file columns
exposure_cols = [c for c in unmapped_cols if c in exposure_df.columns]
exposure_col_df = exposure_df.loc[:, exposure_cols + [SOURCE_IDX['loc']]]
summary_group_df = merge_dataframes(summary_group_df, exposure_col_df, join_on=SOURCE_IDX['loc'], how='left')
# Account file columns
if isinstance(accounts_df, pd.DataFrame):
accounts_cols = [c for c in unmapped_cols if c in set(accounts_df.columns) - set(exposure_df.columns)]
if accounts_cols:
accounts_col_df = accounts_df.loc[:, accounts_cols + [SOURCE_IDX['acc']]]
summary_group_df = merge_dataframes(summary_group_df, accounts_col_df, join_on=SOURCE_IDX['acc'], how='left')
summary_group_df.fillna(0, inplace=True)
summary_group_df.sort_values(by=[sort_by], inplace=True)
summary_ids = factorize_dataframe(summary_group_df, by_col_labels=oed_cols)
return summary_ids[0], summary_ids[1]
def write_summary_levels(exposure_df, accounts_fp, target_dir):
'''
Json file with list Available / Recommended columns for use in the summary reporting
Available: Columns which exists in input files and has at least one non-zero / NaN value
Recommended: Columns which are available + also in the list of `useful` groupings SUMMARY_LEVEL_LOC
{
'GUL': {
'available': ['accnumber',
'locnumber',
'istenant',
'buildingid',
'countrycode',
'latitude',
'longitude',
'streetaddress',
'postalcode',
'occupancycode',
'constructioncode',
'locperilscovered',
'buildingtiv',
'contentstiv',
'bitiv',
'portnumber'],
'IL': {
... etc ...
}
}
'''
# Manage internal columns, (Non-OED exposure input)
int_excluded_cols = ['loc_id', SOURCE_IDX['loc']]
desc_non_oed = 'Not an OED field'
int_oasis_cols = {
'coverage_type_id': 'Oasis coverage type',
'peril_id': 'OED peril code',
'coverage_id': 'Oasis coverage identifier',
}
# GUL perspective (loc columns only)
l_col_list = exposure_df.loc[:, exposure_df.any()].columns.to_list()
l_col_info = get_loc_dtypes()
for k in list(l_col_info.keys()):
l_col_info[k.lower()] = l_col_info[k]
del l_col_info[k]
gul_avail = {k: l_col_info[k]['desc'] if k in l_col_info else desc_non_oed
for k in set([c.lower() for c in l_col_list]).difference(int_excluded_cols)}
gul_summary_lvl = {'GUL': {'available': {**gul_avail, **int_oasis_cols}}}
# IL perspective (join of acc + loc col with no dups)
il_summary_lvl = {}
if accounts_fp:
accounts_df = pd.read_csv(accounts_fp)
a_col_list = accounts_df.loc[:, accounts_df.any()].columns.to_list()
a_col_info = get_acc_dtypes()
a_avail = set([c.lower() for c in a_col_list])
il_avail = {k: a_col_info[k]['desc'] if k in a_col_info else desc_non_oed
for k in a_avail.difference(gul_avail.keys())}
il_summary_lvl = {'IL': {'available': {**gul_avail, **il_avail, **int_oasis_cols}}}
with io.open(os.path.join(target_dir, 'exposure_summary_levels.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps({**gul_summary_lvl, **il_summary_lvl}, sort_keys=True, ensure_ascii=False, indent=4))
@oasis_log
def write_mapping_file(sum_inputs_df, target_dir, is_fm_summary=False):
"""
Writes a summary map file, used to build summarycalc xref files.
:param summary_mapping: dataframe return from get_summary_mapping
:type summary_mapping: pandas.DataFrame
:param sum_mapping_fp: Summary map file path
:type sum_mapping_fp: str
:param is_fm_summary: Indicates whether an FM summary mapping is required
:type is_fm_summary: bool
:return: Summary xref file path
:rtype: str
"""
target_dir = as_path(
target_dir,
'Target IL input files directory',
is_dir=True,
preexists=False
)
# Set chunk size for writing the CSV files - default is max 20K, min 1K
chunksize = min(2 * 10**5, max(len(sum_inputs_df), 1000))
if is_fm_summary:
sum_mapping_fp = os.path.join(target_dir, SUMMARY_MAPPING['fm_map_fn'])
else:
sum_mapping_fp = os.path.join(target_dir, SUMMARY_MAPPING['gul_map_fn'])
try:
sum_inputs_df.to_csv(
path_or_buf=sum_mapping_fp,
encoding='utf-8',
mode=('w' if os.path.exists(sum_mapping_fp) else 'a'),
chunksize=chunksize,
index=False
)
except (IOError, OSError) as e:
raise OasisException from e
return sum_mapping_fp
def get_column_selection(summary_set):
"""
Given a analysis_settings summary definition, return either
1. the set of OED columns requested to group by
2. If no information key 'oed_fields', then group all outputs into a single summary_set
:param summary_set: summary group dictionary from the `analysis_settings.json`
:type summary_set: dict
:return: List of selected OED columns to create summary groups from
:rtype: list
"""
if "oed_fields" not in summary_set:
return None
if not summary_set["oed_fields"]:
return None
# Use OED column list set in analysis_settings file
elif isinstance(summary_set['oed_fields'], list) and len(summary_set['oed_fields']) > 0:
return [c.lower() for c in summary_set['oed_fields']]
else:
raise OasisException('Unable to process settings file')
def get_ri_settings(run_dir):
"""
Return the contents of ri_layers.json
Example:
{
"1": {
"inuring_priority": 1,
"risk_level": "LOC",
"directory": " ... /runs/ProgOasis-20190501145127/RI_1"
}
}
:param run_dir: The file path of the model run directory
:type run_dir: str
:return: metadata for the Reinsurance layers
:rtype: dict
"""
return get_json(src_fp=os.path.join(run_dir, 'ri_layers.json'))
@oasis_log
def write_df_to_file(df, target_dir, filename):
"""
Write a generated summary xref dataframe to disk
:param df: The dataframe output of get_df( .. )
:type df: pandas.DataFrame
:param target_dir: Abs directory to write a summary_xref file to
:type target_dir: str
:param filename: Name of file to store as
:type filename: str
"""
target_dir = as_path(target_dir, 'Input files directory', is_dir=True, preexists=False)
chunksize = min(2 * 10**5, max(len(df), 1000))
csv_fp = os.path.join(target_dir, filename)
try:
df.to_csv(
path_or_buf=csv_fp,
encoding='utf-8',
mode=('w'),
chunksize=chunksize,
index=False
)
except (IOError, OSError) as e:
raise OasisException from e
return csv_fp
@oasis_log
def get_summary_xref_df(map_df, exposure_df, accounts_df, summaries_info_dict, summaries_type, gul_items=False):
"""
Create a Dataframe for either gul / il / ri based on a section
from the analysis settings
:param map_df: Summary Map dataframe (GUL / IL)
:type map_df: pandas.DataFrame
:param exposure_df: Location OED data
:type exposure_df: pandas.DataFrame
:param accounts_df: Accounts OED data
:type accounts_df: pandas.DataFrame
:param summaries_info_dict: list of dictionary definitionfor a summary group from the analysis_settings file
:type summaries_info_dict: list
[{
"summarycalc": true,
"eltcalc": true,
"aalcalc": true,
"pltcalc": true,
"id": 1,
"oed_fields": "prog",
"lec_output": true,
| |
m.b596 <= 0)
m.e871 = Constraint(expr= m.b533 - m.b597 <= 0)
m.e872 = Constraint(expr= -m.b533 + m.b534 - m.b598 <= 0)
m.e873 = Constraint(expr= m.b535 - m.b599 <= 0)
m.e874 = Constraint(expr= -m.b535 + m.b536 - m.b600 <= 0)
m.e875 = Constraint(expr= m.b539 - m.b603 <= 0)
m.e876 = Constraint(expr= -m.b539 + m.b540 - m.b604 <= 0)
m.e877 = Constraint(expr= m.b541 - m.b605 <= 0)
m.e878 = Constraint(expr= -m.b541 + m.b542 - m.b606 <= 0)
m.e879 = Constraint(expr= m.b543 - m.b607 <= 0)
m.e880 = Constraint(expr= -m.b543 + m.b544 - m.b608 <= 0)
m.e881 = Constraint(expr= m.b547 - m.b611 <= 0)
m.e882 = Constraint(expr= -m.b547 + m.b548 - m.b612 <= 0)
m.e883 = Constraint(expr= m.b549 - m.b613 <= 0)
m.e884 = Constraint(expr= -m.b549 + m.b550 - m.b614 <= 0)
m.e885 = Constraint(expr= m.b551 - m.b615 <= 0)
m.e886 = Constraint(expr= -m.b551 + m.b552 - m.b616 <= 0)
m.e887 = Constraint(expr= m.x9 - m.x63 - m.x617 == 0)
m.e888 = Constraint(expr= m.x10 - m.x64 - m.x618 == 0)
m.e889 = Constraint(expr= m.x17 - m.x65 - m.x639 == 0)
m.e890 = Constraint(expr= m.x18 - m.x66 - m.x640 == 0)
m.e891 = Constraint(expr= m.x39 - m.x67 == 0)
m.e892 = Constraint(expr= m.x40 - m.x68 == 0)
m.e893 = Constraint(expr= m.x41 - m.x69 == 0)
m.e894 = Constraint(expr= m.x42 - m.x70 == 0)
m.e895 = Constraint(expr= m.x617 - m.x619 - m.x621 == 0)
m.e896 = Constraint(expr= m.x618 - m.x620 - m.x622 == 0)
m.e897 = Constraint(expr= -m.x623 - m.x625 + m.x627 == 0)
m.e898 = Constraint(expr= -m.x624 - m.x626 + m.x628 == 0)
m.e899 = Constraint(expr= m.x627 - m.x629 - m.x631 == 0)
m.e900 = Constraint(expr= m.x628 - m.x630 - m.x632 == 0)
m.e901 = Constraint(expr= m.x631 - m.x633 - m.x635 - m.x637 == 0)
m.e902 = Constraint(expr= m.x632 - m.x634 - m.x636 - m.x638 == 0)
m.e903 = Constraint(expr= m.x641 - m.x647 - m.x649 == 0)
m.e904 = Constraint(expr= m.x642 - m.x648 - m.x650 == 0)
m.e905 = Constraint(expr= m.x645 - m.x651 - m.x653 - m.x655 == 0)
m.e906 = Constraint(expr= m.x646 - m.x652 - m.x654 - m.x656 == 0)
m.e907 = Constraint(expr= (m.x675 / (0.001 + 0.999 * m.b751) - log(m.x667 / (
0.001 + 0.999 * m.b751) + 1)) * (0.001 + 0.999 * m.b751) <= 0)
m.e908 = Constraint(expr= (m.x676 / (0.001 + 0.999 * m.b752) - log(m.x668 / (
0.001 + 0.999 * m.b752) + 1)) * (0.001 + 0.999 * m.b752) <= 0)
m.e909 = Constraint(expr= m.x669 == 0)
m.e910 = Constraint(expr= m.x670 == 0)
m.e911 = Constraint(expr= m.x677 == 0)
m.e912 = Constraint(expr= m.x678 == 0)
m.e913 = Constraint(expr= m.x619 - m.x667 - m.x669 == 0)
m.e914 = Constraint(expr= m.x620 - m.x668 - m.x670 == 0)
m.e915 = Constraint(expr= m.x623 - m.x675 - m.x677 == 0)
m.e916 = Constraint(expr= m.x624 - m.x676 - m.x678 == 0)
m.e917 = Constraint(expr= m.x667 - 40 * m.b751 <= 0)
m.e918 = Constraint(expr= m.x668 - 40 * m.b752 <= 0)
m.e919 = Constraint(expr= m.x669 + 40 * m.b751 <= 40)
m.e920 = Constraint(expr= m.x670 + 40 * m.b752 <= 40)
m.e921 = Constraint(expr= m.x675 - 3.71357206670431 * m.b751 <= 0)
m.e922 = Constraint(expr= m.x676 - 3.71357206670431 * m.b752 <= 0)
m.e923 = Constraint(expr= m.x677 + 3.71357206670431 * m.b751
<= 3.71357206670431)
m.e924 = Constraint(expr= m.x678 + 3.71357206670431 * m.b752
<= 3.71357206670431)
m.e925 = Constraint(expr= (m.x679 / (0.001 + 0.999 * m.b753) - 1.2 * log(m.x671
/ (0.001 + 0.999 * m.b753) + 1)) * (0.001 + 0.999 * m.b753) <= 0)
m.e926 = Constraint(expr= (m.x680 / (0.001 + 0.999 * m.b754) - 1.2 * log(m.x672
/ (0.001 + 0.999 * m.b754) + 1)) * (0.001 + 0.999 * m.b754) <= 0)
m.e927 = Constraint(expr= m.x673 == 0)
m.e928 = Constraint(expr= m.x674 == 0)
m.e929 = Constraint(expr= m.x681 == 0)
m.e930 = Constraint(expr= m.x682 == 0)
m.e931 = Constraint(expr= m.x621 - m.x671 - m.x673 == 0)
m.e932 = Constraint(expr= m.x622 - m.x672 - m.x674 == 0)
m.e933 = Constraint(expr= m.x625 - m.x679 - m.x681 == 0)
m.e934 = Constraint(expr= m.x626 - m.x680 - m.x682 == 0)
m.e935 = Constraint(expr= m.x671 - 40 * m.b753 <= 0)
m.e936 = Constraint(expr= m.x672 - 40 * m.b754 <= 0)
m.e937 = Constraint(expr= m.x673 + 40 * m.b753 <= 40)
m.e938 = Constraint(expr= m.x674 + 40 * m.b754 <= 40)
m.e939 = Constraint(expr= m.x679 - 4.45628648004517 * m.b753 <= 0)
m.e940 = Constraint(expr= m.x680 - 4.45628648004517 * m.b754 <= 0)
m.e941 = Constraint(expr= m.x681 + 4.45628648004517 * m.b753
<= 4.45628648004517)
m.e942 = Constraint(expr= m.x682 + 4.45628648004517 * m.b754
<= 4.45628648004517)
m.e943 = Constraint(expr= -0.75 * m.x683 + m.x699 == 0)
m.e944 = Constraint(expr= -0.75 * m.x684 + m.x700 == 0)
m.e945 = Constraint(expr= m.x685 == 0)
m.e946 = Constraint(expr= m.x686 == 0)
m.e947 = Constraint(expr= m.x701 == 0)
m.e948 = Constraint(expr= m.x702 == 0)
m.e949 = Constraint(expr= m.x633 - m.x683 - m.x685 == 0)
m.e950 = Constraint(expr= m.x634 - m.x684 - m.x686 == 0)
m.e951 = Constraint(expr= m.x641 - m.x699 - m.x701 == 0)
m.e952 = Constraint(expr= m.x642 - m.x700 - m.x702 == 0)
m.e953 = Constraint(expr= m.x683 - 4.45628648004517 * m.b755 <= 0)
m.e954 = Constraint(expr= m.x684 - 4.45628648004517 * m.b756 <= 0)
m.e955 = Constraint(expr= m.x685 + 4.45628648004517 * m.b755
<= 4.45628648004517)
m.e956 = Constraint(expr= m.x686 + 4.45628648004517 * m.b756
<= 4.45628648004517)
m.e957 = Constraint(expr= m.x699 - 3.34221486003388 * m.b755 <= 0)
m.e958 = Constraint(expr= m.x700 - 3.34221486003388 * m.b756 <= 0)
m.e959 = Constraint(expr= m.x701 + 3.34221486003388 * m.b755
<= 3.34221486003388)
m.e960 = Constraint(expr= m.x702 + 3.34221486003388 * m.b756
<= 3.34221486003388)
m.e961 = Constraint(expr= (m.x703 / (0.001 + 0.999 * m.b757) - 1.5 * log(m.x687
/ (0.001 + 0.999 * m.b757) + 1)) * (0.001 + 0.999 * m.b757) <= 0)
m.e962 = Constraint(expr= (m.x704 / (0.001 + 0.999 * m.b758) - 1.5 * log(m.x688
/ (0.001 + 0.999 * m.b758) + 1)) * (0.001 + 0.999 * m.b758) <= 0)
m.e963 = Constraint(expr= m.x689 == 0)
m.e964 = Constraint(expr= m.x690 == 0)
m.e965 = Constraint(expr= m.x707 == 0)
m.e966 = Constraint(expr= m.x708 == 0)
m.e967 = Constraint(expr= m.x635 - m.x687 - m.x689 == 0)
m.e968 = Constraint(expr= m.x636 - m.x688 - m.x690 == 0)
m.e969 = Constraint(expr= m.x643 - m.x703 - m.x707 == 0)
m.e970 = Constraint(expr= m.x644 - m.x704 - m.x708 == 0)
m.e971 = Constraint(expr= m.x687 - 4.45628648004517 * m.b757 <= 0)
m.e972 = Constraint(expr= m.x688 - 4.45628648004517 * m.b758 <= 0)
m.e973 = Constraint(expr= m.x689 + 4.45628648004517 * m.b757
<= 4.45628648004517)
m.e974 = Constraint(expr= m.x690 + 4.45628648004517 * m.b758
<= 4.45628648004517)
m.e975 = Constraint(expr= m.x703 - 2.54515263975353 * m.b757 <= 0)
m.e976 = Constraint(expr= m.x704 - 2.54515263975353 * m.b758 <= 0)
m.e977 = Constraint(expr= m.x707 + 2.54515263975353 * m.b757
<= 2.54515263975353)
m.e978 = Constraint(expr= m.x708 + 2.54515263975353 * m.b758
<= 2.54515263975353)
m.e979 = Constraint(expr= -m.x691 + m.x711 == 0)
m.e980 = Constraint(expr= -m.x692 + m.x712 == 0)
m.e981 = Constraint(expr= -0.5 * m.x695 + m.x711 == 0)
m.e982 = Constraint(expr= -0.5 * m.x696 + m.x712 == 0)
m.e983 = Constraint(expr= m.x693 == 0)
m.e984 = Constraint(expr= m.x694 == 0)
m.e985 = Constraint(expr= m.x697 == 0)
m.e986 = Constraint(expr= m.x698 == 0)
m.e987 = Constraint(expr= m.x713 == 0)
m.e988 = Constraint(expr= m.x714 == 0)
m.e989 = Constraint(expr= m.x637 - m.x691 - m.x693 == 0)
m.e990 = Constraint(expr= m.x638 - m.x692 - m.x694 == 0)
m.e991 = Constraint(expr= m.x639 - m.x695 - m.x697 == 0)
m.e992 = Constraint(expr= m.x640 - m.x696 - m.x698 == 0)
m.e993 = Constraint(expr= m.x645 - m.x711 - m.x713 == 0)
m.e994 = Constraint(expr= m.x646 - m.x712 - m.x714 == 0)
m.e995 = Constraint(expr= m.x691 - 4.45628648004517 * m.b759 <= 0)
m.e996 = Constraint(expr= m.x692 - 4.45628648004517 * m.b760 <= 0)
m.e997 = Constraint(expr= m.x693 + 4.45628648004517 * m.b759
<= 4.45628648004517)
m.e998 = Constraint(expr= m.x694 + 4.45628648004517 * m.b760
<= 4.45628648004517)
m.e999 = Constraint(expr= m.x695 - 30 * m.b759 <= 0)
m.e1000 = Constraint(expr= m.x696 - 30 * m.b760 <= 0)
m.e1001 = Constraint(expr= m.x697 + 30 * m.b759 <= 30)
m.e1002 = Constraint(expr= m.x698 + 30 * m.b760 <= 30)
m.e1003 = Constraint(expr= m.x711 - 15 * m.b759 <= 0)
m.e1004 = Constraint(expr= m.x712 - 15 * m.b760 <= 0)
m.e1005 = Constraint(expr= m.x713 + 15 * m.b759 <= 15)
m.e1006 = Constraint(expr= m.x714 + 15 * m.b760 <= 15)
m.e1007 = Constraint(expr= (m.x731 / (0.001 + 0.999 * m.b761) - 1.25 * log(
m.x715 / (0.001 + 0.999 * m.b761) + 1)) * (0.001 + 0.999 * m.b761) <= 0)
m.e1008 = Constraint(expr= (m.x732 / (0.001 + 0.999 * m.b762) - 1.25 * log(
| |
desc limit 1",
(base_id,),
)
row = cur.fetchone()
latest = None
if row:
latest = row[0]
else:
warnings.warn(
"Failed to fetch latest version number for JASPAR motif"
f" with base ID '{base_id}'. No JASPAR motif with this"
" base ID appears to exist in the database.",
BiopythonWarning,
)
return latest
def _fetch_internal_id(self, base_id, version):
"""Fetch the internal id for a base id + version (PRIVATE).
Also checks if this combo exists or not.
"""
cur = self.dbh.cursor()
cur.execute(
"select id from MATRIX where BASE_id = %s and VERSION = %s",
(base_id, version),
)
row = cur.fetchone()
int_id = None
if row:
int_id = row[0]
else:
warnings.warn(
"Failed to fetch internal database ID for JASPAR motif"
f" with matrix ID '{base_id}.{version}'. No JASPAR motif"
" with this matrix ID appears to exist.",
BiopythonWarning,
)
return int_id
def _fetch_motif_by_internal_id(self, int_id):
"""Fetch basic motif information (PRIVATE)."""
cur = self.dbh.cursor()
cur.execute(
"select BASE_ID, VERSION, COLLECTION, NAME from MATRIX where id = %s",
(int_id,),
)
row = cur.fetchone()
# This should never happen as it is an internal method. If it does
# we should probably raise an exception
if not row:
warnings.warn(
f"Could not fetch JASPAR motif with internal ID = {int_id}",
BiopythonWarning,
)
return None
base_id = row[0]
version = row[1]
collection = row[2]
name = row[3]
matrix_id = "".join([base_id, ".", str(version)])
# fetch the counts matrix
counts = self._fetch_counts_matrix(int_id)
# Create new JASPAR motif
motif = jaspar.Motif(matrix_id, name, collection=collection, counts=counts)
# fetch species
cur.execute("select TAX_ID from MATRIX_SPECIES where id = %s", (int_id,))
tax_ids = []
rows = cur.fetchall()
for row in rows:
tax_ids.append(row[0])
# Many JASPAR motifs (especially those not in the CORE collection)
# do not have taxonomy IDs. So this warning would get annoying.
# if not tax_ids:
# warnings.warn("Could not fetch any taxonomy IDs for JASPAR motif"
# " {0}".format(motif.matrix_id), BiopythonWarning)
motif.species = tax_ids
# fetch protein accession numbers
cur.execute("select ACC FROM MATRIX_PROTEIN where id = %s", (int_id,))
accs = []
rows = cur.fetchall()
for row in rows:
accs.append(row[0])
# Similarly as for taxonomy IDs, it would get annoying to print
# warnings for JASPAR motifs which do not have accession numbers.
motif.acc = accs
# fetch remaining annotation as tags from the ANNOTATION table
cur.execute("select TAG, VAL from MATRIX_ANNOTATION where id = %s", (int_id,))
rows = cur.fetchall()
for row in rows:
attr = row[0]
val = row[1]
if attr == "class":
motif.tf_class = val
elif attr == "family":
motif.tf_family = val
elif attr == "tax_group":
motif.tax_group = val
elif attr == "type":
motif.data_type = val
elif attr == "pazar_tf_id":
motif.pazar_id = val
elif attr == "medline":
motif.medline = val
elif attr == "comment":
motif.comment = val
else:
# TODO If we were to implement additional abitrary tags
# motif.tag(attr, val)
pass
return motif
def _fetch_counts_matrix(self, int_id):
"""Fetch the counts matrix from the JASPAR DB by the internal ID (PRIVATE).
Returns a Bio.motifs.matrix.GenericPositionMatrix
"""
counts = {}
cur = self.dbh.cursor()
for base in "ACGT":
base_counts = []
cur.execute(
"select val from MATRIX_DATA where ID = %s and row = %s order by col",
(int_id, base),
)
rows = cur.fetchall()
for row in rows:
base_counts.append(row[0])
counts[base] = [float(x) for x in base_counts]
return matrix.GenericPositionMatrix("ACGT", counts)
def _fetch_internal_id_list(
self,
collection=JASPAR_DFLT_COLLECTION,
tf_name=None,
tf_class=None,
tf_family=None,
matrix_id=None,
tax_group=None,
species=None,
pazar_id=None,
data_type=None,
medline=None,
all=False,
all_versions=False,
):
"""Fetch list of internal JASPAR motif IDs.
Fetch a list of internal JASPAR motif IDs based on various passed
parameters which may then be used to fetch the rest of the motif data.
Caller:
fetch_motifs()
Arguments:
See arguments sections of fetch_motifs()
Returns:
A list of internal JASPAR motif IDs which match the given
selection criteria arguments.
Build an SQL query based on the selection arguments provided.
1: First add table joins and sub-clauses for criteria corresponding to
named fields from the MATRIX and MATRIX_SPECIES tables such as
collection, matrix ID, name, species etc.
2: Then add joins/sub-clauses for tag/value parameters from the
MATRIX_ANNOTATION table.
For the surviving matrices, the responsibility to do matrix-based
feature filtering such as ic, number of sites etc, fall on the
calling fetch_motifs() method.
"""
int_ids = []
cur = self.dbh.cursor()
"""
Special case 1: fetch ALL motifs. Highest priority.
Ignore all other selection arguments.
"""
if all:
cur.execute("select ID from MATRIX")
rows = cur.fetchall()
for row in rows:
int_ids.append(row[0])
return int_ids
"""
Special case 2: fetch specific motifs by their JASPAR IDs. This
has higher priority than any other except the above 'all' case.
Ignore all other selection arguments.
"""
if matrix_id:
"""
These might be either stable IDs or stable_ID.version.
If just stable ID and if all_versions == 1, return all versions,
otherwise just the latest
"""
if all_versions:
for id in matrix_id:
# ignore vesion here, this is a stupidity filter
(base_id, version) = jaspar.split_jaspar_id(id)
cur.execute("select ID from MATRIX where BASE_ID = %s", (base_id,))
rows = cur.fetchall()
for row in rows:
int_ids.append(row[0])
else:
# only the lastest version, or the requested version
for id in matrix_id:
(base_id, version) = jaspar.split_jaspar_id(id)
if not version:
version = self._fetch_latest_version(base_id)
int_id = None
if version:
int_id = self._fetch_internal_id(base_id, version)
if int_id:
int_ids.append(int_id)
return int_ids
tables = ["MATRIX m"]
where_clauses = []
# Select by MATRIX.COLLECTION
if collection:
if isinstance(collection, list):
# Multiple collections passed in as a list
clause = "m.COLLECTION in ('"
clause = "".join([clause, "','".join(collection)])
clause = "".join([clause, "')"])
else:
# A single collection - typical usage
clause = "m.COLLECTION = '%s'" % collection
where_clauses.append(clause)
# Select by MATRIX.NAME
if tf_name:
if isinstance(tf_name, list):
# Multiple names passed in as a list
clause = "m.NAME in ('"
clause = "".join([clause, "','".join(tf_name)])
clause = "".join([clause, "')"])
else:
# A single name
clause = "m.NAME = '%s'" % tf_name
where_clauses.append(clause)
# Select by MATRIX_SPECIES.TAX_ID
if species:
tables.append("MATRIX_SPECIES ms")
where_clauses.append("m.ID = ms.ID")
"""
NOTE: species are numeric taxonomy IDs but stored as varchars
in the DB.
"""
if isinstance(species, list):
# Multiple tax IDs passed in as a list
clause = "ms.TAX_ID in ('"
clause = "".join([clause, "','".join(str(s) for s in species)])
clause = "".join([clause, "')"])
else:
# A single tax ID
clause = "ms.TAX_ID = '%s'" % species
where_clauses.append(clause)
"""
Tag based selection from MATRIX_ANNOTATION
Differs from perl TFBS module in that the matrix class explicitly
has a tag attribute corresponding to the tags in the database. This
provides tremendous flexibility in adding new tags to the DB and
being able to select based on those tags with out adding new code.
In the JASPAR Motif class we have elected to use specific attributes
for the most commonly used tags and here correspondingly only allow
selection on these attributes.
The attributes corresponding to the tags for which selection is
provided are:
Attribute Tag
tf_class class
tf_family family
pazar_id pazar_tf_id
medline medline
data_type type
tax_group tax_group
"""
# Select by TF class(es) (MATRIX_ANNOTATION.TAG="class")
if tf_class:
tables.append("MATRIX_ANNOTATION ma1")
where_clauses.append("m.ID = ma1.ID")
clause = "ma1.TAG = 'class'"
if isinstance(tf_class, list):
# A list of TF classes
clause = "".join([clause, " and ma1.VAL in ('"])
clause = "".join([clause, "','".join(tf_class)])
clause = "".join([clause, "')"])
else:
# A single TF class
clause = "".join([clause, " and ma1.VAL = '%s' " % tf_class])
where_clauses.append(clause)
# Select by TF families (MATRIX_ANNOTATION.TAG="family")
if tf_family:
tables.append("MATRIX_ANNOTATION ma2")
where_clauses.append("m.ID = ma2.ID")
clause = "ma2.TAG = 'family'"
if isinstance(tf_family, list):
# A list of TF families
clause = "".join([clause, " and ma2.VAL in ('"])
clause = "".join([clause, "','".join(tf_family)])
clause = "".join([clause, "')"])
else:
# A single TF family
clause = "".join([clause, " and ma2.VAL = '%s' " % tf_family])
where_clauses.append(clause)
# Select by PAZAR TF ID(s) (MATRIX_ANNOTATION.TAG="pazar_tf_id")
if pazar_id:
tables.append("MATRIX_ANNOTATION ma3")
where_clauses.append("m.ID = ma3.ID")
clause = "ma3.TAG = 'pazar_tf_id'"
if isinstance(pazar_id, list):
# A list of PAZAR IDs
clause = "".join([clause, " and ma3.VAL in ('"])
clause = "".join([clause, "','".join(pazar_id)])
clause = "".join([clause, "')"])
else:
# A single | |
<gh_stars>1-10
import random
import time
from time import sleep
from uuid import uuid4
from datetime import datetime
from Jumpscale import j
from Jumpscale.data.schema.tests.schema import Schema
import unittest
T = unittest.TestCase()
def log(msg):
j.core.tools.log(msg, level=20)
def random_string():
return "s" + str(uuid4()).replace("-", "")[:10]
schema = Schema
def test_001_validate_list_of_strings():
"""
SCM-022
*Test case for validating list of strings *
**Test Scenario:**
#. Create schema with list of strings parameter, should succeed.
#. Try to set parameter with string type, should succeed.
"""
log("Create schema with list of strings parameter, should succeed.")
scm = """
@url = test.schema
list_names = (LS)
list_str = ["test", "example"] (LS)
"""
schema_nw = schema(scm)
schema_obj = schema_nw.new()
log("Try to set parameter with string type, should succeed.")
list_names = [random_string(), random_string()]
schema_obj.list_names = list_names
assert schema_obj.list_names == list_names
value = random_string()
list_names.append(value)
schema_obj.list_names.append(value)
assert schema_obj.list_names == list_names
log("schema list %s" % schema_obj.list_str)
assert schema_obj.list_str == ["test", "example"]
def test_002_validate_list_of_integers():
"""
SCM-023
*Test case for validating list of integers *
**Test Scenario:**
#. Create schema with list of integers parameter, should succeed.
#. Try to set parameter with non integer type, should fail.
#. Try to set parameter with integer type, should succeed.
"""
log("Create schema with list of integers parameter, should succeed.")
scm = """
@url = test.schema
list_numbers = (LI)
list_int = [1, 2, 3] (LI)
"""
schema_new = schema(scm)
schema_obj = schema_new.new()
log("Try to set parameter with non integer type, should fail.")
with T.assertRaises(Exception):
schema_obj.list_numbers = [random.randint(1, 1000), random_string()]
with T.assertRaises(Exception):
schema_obj.list_numbers.append(random_string())
log("Try to set parameter with integer type, should succeed.")
list_numbers = [random.randint(1, 1000), random.randint(1, 1000)]
schema_obj.list_numbers = list_numbers
assert schema_obj.list_numbers == list_numbers
value = random.randint(1, 100)
list_numbers.append(value)
schema_obj.list_numbers.append(value)
assert schema_obj.list_numbers == list_numbers
log("schema list %s" % schema_obj.list_int)
assert schema_obj.list_int == [1, 2, 3]
def test_003_validate_list_floats():
"""
SCM-024
*Test case for validating list of floats *
**Test Scenario:**
#. Create schema with list of floats parameter, should succeed.
#. Try to set parameter with non float type, should fail.
#. Try to set parameter with float type, should succeed.
"""
log("Create schema with list of floats parameter, should succeed.")
scm = """
@url = test.schema
list_numbers = (LF)
list_floats = [1.5, 2.67, 3.7] (LF)
"""
schema_new = schema(scm)
schema_obj = schema_new.new()
log("Try to set parameter with non float type, should fail.")
with T.assertRaises(Exception):
schema_obj.list_numbers = [random.uniform(1, 1000), random_string()]
with T.assertRaises(Exception):
schema_obj.list_numbers.append(random_string())
log("Try to set parameter with float type, should succeed.")
list_numbers = [random.uniform(1, 1000), random.uniform(1, 1000)]
schema_obj.list_numbers = list_numbers
assert schema_obj.list_numbers == list_numbers
value = random.uniform(1, 100)
list_numbers.append(value)
schema_obj.list_numbers.append(value)
assert schema_obj.list_numbers == list_numbers
log("schema list %s" % schema_obj.list_floats)
assert schema_obj.list_floats == [1.5, 2.67, 3.7]
def test_004_validate_list_of_boolean():
"""
SCM-025
*Test case for validating list of boolean *
**Test Scenario:**
#. Create schema with list of boolean parameter, should succeed.
#. Try to set parameter[P1] with False or non True value, should be False.
#. Try to set parameter[P1] with True value, should be True.
"""
log("Create schema with list of boolean parameter, should succeed.")
scm = """
@url = test.schema
list_check = (LB)
list_bool = [1, 'yes', 'y', 'true', 'f'] (LB)
"""
schema_new = schema(scm)
schema_obj = schema_new.new()
log("Try to set parameter[P1] with False or non True value, should be False.")
schema_obj.list_check = [random.uniform(2, 1000), random_string()]
assert schema_obj.list_check == [False, False]
log("Try to set parameter[P1] with True value, should be True.")
schema_obj.list_check = [1, "yes", "y", "true", "f"]
check = [True, True, True, True, False]
assert schema_obj.list_check == check
log("schema list %s" % schema_obj.list_bool)
assert schema_obj.list_bool == [True, True, True, True, False]
def test_005_validate_list_of_mobiles():
"""
SCM-026
*Test case for validating list of mobiles *
**Test Scenario:**
#. Create schema with list of mobiles parameter, should succeed.
#. Try to set parameter with non mobile type, should fail.
#. Try to set parameter with mobile type, should succeed.
"""
log("Create schema with list of mobiles parameter, should succeed.")
scm = """
@url = test.schema
mobile_list = (Ltel)
list_tel = [464-4564-464, +45687941, 468716420] (Ltel)
"""
schema_new = schema(scm)
schema_obj = schema_new.new()
log("Try to set parameter with non mobile type, should fail.")
with T.assertRaises(Exception):
schema_obj.mobile_list.append(random.uniform(1, 100))
log("Try to set parameter with mobile type, should succeed.")
mobile_list = ["{}".format(random.randint(100000, 1000000)), "{}".format(random.randint(100000, 1000000))]
schema_obj.mobile_list = mobile_list
assert schema_obj.mobile_list == mobile_list
value = "{}".format(random.randint(100000, 1000000))
mobile_list.append(value)
schema_obj.mobile_list.append(value)
assert schema_obj.mobile_list == mobile_list
log("schema list %s" % schema_obj.list_tel)
assert schema_obj.list_tel == ["4644564464", "+45687941", "468716420"]
def test_006_validate_list_of_emails():
"""
SCM-027
*Test case for validating list of emails *
**Test Scenario:**
#. Create schema with list of emails parameter, should succeed.
#. Try to set parameter with non email type, should fail.
#. Try to set parameter with email type, should succeed.
"""
log("Create schema with list of emails parameter, should succeed.")
scm = """
@url = test.schema
email_list = (Lemail)
list_emails = ['<EMAIL>', "<EMAIL>"] (Lemail)
"""
schema_new = schema(scm)
schema_obj = schema_new.new()
log("Try to set parameter with non email type, should fail.")
with T.assertRaises(Exception):
schema_obj.email_list = [random.uniform(1, 100), "<EMAIL>"]
with T.assertRaises(Exception):
schema_obj.email_list.append(random.uniform(1, 100))
log("Try to set parameter with email type, should succeed.")
email_list = ["<EMAIL>", "<EMAIL>"]
schema_obj.email_list = email_list
log("schema list %s" % schema_obj.list_emails)
assert schema_obj.email_list == email_list
assert schema_obj.list_emails == email_list
value = "<EMAIL>"
email_list.append(value)
schema_obj.email_list.append(value)
log("schema list %s" % schema_obj.email_list)
assert schema_obj.email_list == email_list
def test_007_validate_list_of_ipports():
"""
SCM-028
*Test case for validating list of ipports *
**Test Scenario:**
#. Create schema with list of ipports parameter, should succeed.
#. Try to set parameter with non ipport type, should fail.
#. Try to set parameter with ipport type, should succeed.
"""
log("Create schema with list of ipports parameter, should succeed.")
scm = """
@url = test.schema
port_list = (Lipport)
list_ports = [3164, 15487] (Lipport)
"""
schema_new = schema(scm)
schema_obj = schema_new.new()
log("Try to set parameter with non ipport type, should fail.")
with T.assertRaises(Exception):
schema_obj.port_list = [random_string(), random.randint(1, 10000)]
log("Try to set parameter with ipport type, should succeed.")
port_list = [random.randint(1, 10000), random.randint(1, 10000)]
schema_obj.port_list = port_list
assert schema_obj.port_list == port_list
value = random.randint(1, 10000)
port_list.append(value)
schema_obj.port_list.append(value)
assert schema_obj.port_list == port_list
log("schema list %s" % schema_obj.list_ports)
for i, j in zip(schema_obj.list_ports, [3164, 15487]):
assert int(i) == j
def test_008_validate_list_of_ipaddrs():
"""
SCM-029
*Test case for validating list of ipaddrs *
**Test Scenario:**
#. Create schema with list of ipaddrs parameter, should succeed.
#. Try to set parameter with non ipaddr type, should fail.
#. Try to set parameter with ipaddr type, should succeed.
"""
log("Create schema with list of ipaddrs parameter, should succeed.")
scm = """
@url = test.schema
ip_list = (Lipaddr)
list_ip = ['127.0.0.1', "192.168.1.1"] (Lipaddr)
"""
schema_new = schema(scm)
schema_obj = schema_new.new()
log("Try to set parameter with non ipaddr type, should fail.")
with T.assertRaises(Exception):
schema_obj.ip_list = [random_string(), random.randint(1, 10000)]
with T.assertRaises(Exception):
schema_obj.ip_list.append(random.uniform(1, 100))
log("Try to set parameter with ipaddr type, should succeed.")
ip_list = ["10.15.{}.1".format(random.randint(0, 255)), "192.168.{}.1".format(random.randint(0, 255))]
schema_obj.ip_list = ip_list
assert schema_obj.ip_list == ip_list
value = "127.0.{}.1".format(random.randint(0, 255))
ip_list.append(value)
schema_obj.ip_list.append(value)
log("schema list %s" % schema_obj.ip_list)
assert schema_obj.list_ip == ["127.0.0.1", "192.168.1.1"]
def test_009_validate_list_of_ipranges():
"""
SCM-030
*Test case for validating list of ipranges *
**Test Scenario:**
#. Create schema with list of ipranges parameter, should succeed.
#. Try to set parameter with non iprange type, should fail.
#. Try to set parameter with iprange type, should succeed.
"""
log("Create schema with list of ipranges parameter, should succeed.")
scm = """
@url = test.schema
range_list = (Liprange)
list_ranges = ['127.0.0.1/24', "192.168.1.1/16"] (Liprange)
"""
schema_new = schema(scm)
schema_obj = schema_new.new()
log("Try to set parameter with non iprange type, should fail.")
with T.assertRaises(Exception):
schema_obj.range_list = [random_string(), "10.15.{}.1/24".format(random.randint(0, 255))]
with T.assertRaises(Exception):
schema_obj.range_list.append(random.uniform(1, 100))
log("Try to set parameter with iprange type, should succeed.")
range_list = ["10.15.{}.1/24".format(random.randint(0, 255)), "10.15.{}.1/24".format(random.randint(0, 255))]
schema_obj.range_list = range_list
assert schema_obj.range_list == range_list
value = "127.0.{}.1/16".format(random.randint(0, 255))
range_list.append(value)
schema_obj.range_list.append(value)
assert schema_obj.range_list == range_list
log("schema list %s" % schema_obj.list_ranges)
assert schema_obj.list_ranges == ["127.0.0.1/24", "192.168.1.1/16"]
def test_010_validate_list_of_dates():
"""
SCM-031
*Test case for validating list of dates *
**Test Scenario:**
#. Create schema with list of dates parameter, should succeed.
#. Try to set parameter with non date type, should fail.
#. Try to | |
# Copyright (c) 2015, University of Memphis, MD2K Center of Excellence
# - <NAME> <<EMAIL>>
# - <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import json
from collections import Sized
from pprint import pprint
import numpy as np
from pathlib import Path
from sklearn import svm, metrics, preprocessing
from sklearn.base import clone, is_classifier
from sklearn.cross_validation import LabelKFold
from sklearn.cross_validation import check_cv
from sklearn.externals.joblib import Parallel, delayed
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV, ParameterSampler, ParameterGrid
from sklearn.utils.validation import _num_samples, indexable
# Command line parameter configuration
parser = argparse.ArgumentParser(description='Train and evaluate the cStress model')
parser.add_argument('--featureFolder', dest='featureFolder', required=True,
help='Directory containing feature files')
parser.add_argument('--scorer', type=str, required=True, dest='scorer',
help='Specify which scorer function to use (f1 or twobias)')
parser.add_argument('--whichsearch', type=str, required=True, dest='whichsearch',
help='Specify which search function to use (GridSearch or RandomizedSearch')
parser.add_argument('--n_iter', type=int, required=False, dest='n_iter',
help='If Randomized Search is used, how many iterations to use')
parser.add_argument('--modelOutput', type=str, required=True, dest='modelOutput',
help='Model file to write')
parser.add_argument('--featureFile', type=str, required=True, dest='featureFile',
help='Feature vector file name')
parser.add_argument('--puffGroundtruth', type=str, required=True, dest='puffGroundtruth',
help='puffMarker ground truth filename')
args = parser.parse_args()
def cv_fit_and_score(estimator, X, y, scorer, parameters, cv, ):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
parameters : dict or None
Parameters to be set on the estimator.
cv: Cross-validation fold indeces
Returns
-------
score : float
CV score on whole set.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
estimator.set_params(**parameters)
cv_probs_ = cross_val_probs(estimator, X, y, cv)
score = scorer(cv_probs_, y)
return [score, parameters] # scoring_time]
class ModifiedGridSearchCV(GridSearchCV):
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(ModifiedGridSearchCV, self).__init__(
estimator, param_grid, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
def fit(self, X, y):
"""Actual fitting, performing the search over parameters."""
parameter_iterable = ParameterGrid(self.param_grid)
estimator = self.estimator
cv = self.cv
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(cv_fit_and_score)(clone(base_estimator), X, y, self.scoring,
parameters, cv=cv)
for parameters in parameter_iterable)
best = sorted(out, reverse=True)[0]
self.best_params_ = best[1]
self.best_score_ = best[0]
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best[1])
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class ModifiedRandomizedSearchCV(RandomizedSearchCV):
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
super(ModifiedRandomizedSearchCV, self).__init__(estimator=estimator, param_distributions=param_distributions,
n_iter=n_iter, scoring=scoring, random_state=random_state,
fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit,
cv=cv, verbose=verbose, pre_dispatch=pre_dispatch,
error_score=error_score)
def fit(self, X, y):
"""Actual fitting, performing the search over parameters."""
parameter_iterable = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
estimator = self.estimator
cv = self.cv
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(cv_fit_and_score)(clone(base_estimator), X, y, self.scoring,
parameters, cv=cv)
for parameters in parameter_iterable)
best = sorted(out, reverse=True)[0]
self.best_params_ = best[1]
self.best_score_ = best[0]
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best[1])
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
def readFeatures(folder, filename):
features = []
path = Path(folder)
files = list(path.glob('p*/s*/' + filename))
for f in files:
participantID = int(f.parent.parent.name[1:])
# if participantID > 2:
with f.open() as file:
for line in file.readlines():
parts = [x.strip() for x in line.split(',')]
featureVector = [participantID, int(parts[0]), int(parts[0]) + int(float(parts[24]))]
featureVector.extend([float(p) for p in parts[1:]])
features.append(featureVector)
return features
def readPuffMarkerGroundtruth(folder, filename):
features = []
path = Path(folder)
files = list(path.glob('p*/s*/' + filename))
for f in files:
participantID = int(f.parent.parent.name[1:])
with f.open() as file:
for line in file.readlines():
parts = [x.strip() for x in line.split(',')]
features.append([participantID, int(float(parts[0]))])
return features
def readSmokingEpisodeStartEndTIme(folder, filename):
epiStartTime = []
epiEndTime = []
path = Path(folder)
files = list(path.glob('p*/s*/' + filename))
for f in files:
participantID = int(f.parent.parent.name[1:])
with f.open() as file:
for line in file.readlines():
parts = [x.strip() for x in line.split(',')]
epiStartTime.append(int(float(parts[0])));
epiEndTime.append(int(float(parts[1])));
# features.append([participantID, int(float(parts[0]))])
return epiStartTime, epiEndTime
# analyze_events_with_features_filter_episode(features, groundtruth, epiStartTime, epiEndTime)
def analyze_events_with_features_filter_episode(features, puff_marks, epiStartTime, epiEndTime):
featureLabels = []
finalFeatures = []
subjects = []
cnt01 = 0;
for line in features:
id = line[0]
starttime = line[1]
endtime = line[2]
f = line[3:]
found = 0
for puffID, puffTS in puff_marks:
if puffTS >= starttime and puffTS <= endtime:
found = 1
break
if found == 0:
inside = 0
for i in range(0, len(epiStartTime)):
if starttime >= epiStartTime[i] and starttime <= epiEndTime[i]:
inside = 1
break
if inside == 1:
continue
cnt01 = cnt01 + 1
featureLabels.append(found)
finalFeatures.append(f)
subjects.append(id)
cnt01
return finalFeatures, featureLabels, subjects
def analyze_events_with_features(features, puff_marks):
featureLabels = []
finalFeatures = []
subjects = []
for line in features:
id = line[0]
starttime = line[1]
endtime = line[2]
f = line[3:]
found = 0
for puffID, puffTS in puff_marks:
if puffTS >= starttime and puffTS <= endtime:
found = 1
break
featureLabels.append(found)
finalFeatures.append(f)
subjects.append(id)
return finalFeatures, featureLabels, subjects
def get_svmdataset(traindata, trainlabels):
input = []
output = []
foldinds = []
for i in range(len(trainlabels)):
if trainlabels[i] == 1:
foldinds.append(i)
if trainlabels[i] == 0:
foldinds.append(i)
input = np.array(input, dtype='float64')
return output, input, foldinds
def reduceData(data, r):
result = []
for d in data:
result.append([d[i] for i in r])
return result
def f1Bias_scorer(estimator, X, y, ret_bias=False):
probas_ = estimator.predict_proba(X)
precision, recall, thresholds = metrics.precision_recall_curve(y, probas_[:, 1])
f1 = 0.0
for i in range(0, len(thresholds)):
if not (precision[i] == 0 and recall[i] == 0):
f = 2 * (precision[i] * recall[i]) / (precision[i] + recall[i])
if f > f1:
f1 = f
bias = thresholds[i]
if ret_bias:
return f1, bias
else:
return f1
def Twobias_scorer_CV(probs, y, ret_bias=False):
db = np.transpose(np.vstack([probs, y]))
db = db[np.argsort(db[:, 0]), :]
pos = np.sum(y == 1)
n = len(y)
neg = n - pos
tp, tn = pos, 0
lost = 0
optbias = []
minloss = 1
for i | |
<filename>pyxrf/model/load_data_from_db.py
from __future__ import absolute_import, division, print_function, unicode_literals
import h5py
import numpy as np
import os
import json
import multiprocessing
import pandas as pd
import platform
import math
import time as ttime
import copy
from distutils.version import LooseVersion
import logging
import warnings
try:
import databroker
except ImportError:
pass
from ..core.utils import convert_time_to_nexus_string
from .scan_metadata import ScanMetadataXRF
import pyxrf
pyxrf_version = pyxrf.__version__
logger = logging.getLogger(__name__)
warnings.filterwarnings("ignore")
sep_v = os.sep
try:
beamline_name = None
# Attempt to find the configuration file first
config_path = "/etc/pyxrf/pyxrf.json"
if os.path.isfile(config_path):
try:
with open(config_path, "r") as beamline_pyxrf:
beamline_config_pyxrf = json.load(beamline_pyxrf)
beamline_name = beamline_config_pyxrf["beamline_name"]
except Exception as ex:
raise IOError(f"Error while opening configuration file {config_path!r}") from ex
else:
# Otherwise try to identify the beamline using host name
hostname = platform.node()
beamline_names = {
"xf03id": "HXN",
"xf05id": "SRX",
"xf08bm": "TES",
"xf04bm": "XFM",
}
for k, v in beamline_names.items():
if hostname.startswith(k):
beamline_name = v
if beamline_name is None:
raise Exception("Beamline is not identified")
if beamline_name == "HXN":
from pyxrf.db_config.hxn_db_config import db
elif beamline_name == "SRX":
from pyxrf.db_config.srx_db_config import db
elif beamline_name == "XFM":
from pyxrf.db_config.xfm_db_config import db
elif beamline_name == "TES":
from pyxrf.db_config.tes_db_config import db
else:
db = None
db_analysis = None
print(f"Beamline Database is not used in pyxrf: unknown beamline {beamline_name!r}")
except Exception as ex:
db = None
print(f"Beamline Database is not used in pyxrf: {ex}")
def flip_data(input_data, subscan_dims=None):
"""
Flip 2D or 3D array. The flip happens on the second index of shape.
.. warning :: This function mutates the input values.
Parameters
----------
input_data : 2D or 3D array.
Returns
-------
flipped data
"""
new_data = np.asarray(input_data)
data_shape = input_data.shape
if len(data_shape) == 2:
if subscan_dims is None:
new_data[1::2, :] = new_data[1::2, ::-1]
else:
i = 0
for nx, ny in subscan_dims:
start = i + 1
end = i + ny
new_data[start:end:2, :] = new_data[start:end:2, ::-1]
i += ny
if len(data_shape) == 3:
if subscan_dims is None:
new_data[1::2, :, :] = new_data[1::2, ::-1, :]
else:
i = 0
for nx, ny in subscan_dims:
start = i + 1
end = i + ny
new_data[start:end:2, :, :] = new_data[start:end:2, ::-1, :]
i += ny
return new_data
def fetch_run_info(run_id_uid):
"""
Fetches key data from start document of the selected run
Parameters
----------
run_id_uid: int or str
Run ID (positive or negative int) or UID (str, full or short) of the run.
Returns
-------
int or str
Run ID (always positive int) or Run UID (str, always full UID). Returns
`run_id=-1` and `run_uid=""` in case of failure.
Raises
------
RuntimeError
failed to fetch the run from Databroker
"""
try:
hdr = db[run_id_uid]
run_id = hdr.start["scan_id"]
run_uid = hdr.start["uid"]
except Exception:
if isinstance(run_id_uid, int):
msg = f"ID {run_id_uid}"
else:
msg = f"UID '{run_id_uid}'"
raise RuntimeError(f"Failed to find run with {msg}.")
return run_id, run_uid
def fetch_data_from_db(
run_id_uid,
fpath=None,
create_each_det=False,
fname_add_version=False,
completed_scans_only=False,
file_overwrite_existing=False,
output_to_file=False,
save_scaler=True,
num_end_lines_excluded=None,
):
"""
Read data from databroker.
This is the place where new beamlines can be easily added
to pyxrf GUI.
Save the data from databroker to hdf file if needed.
.. note:: Requires the databroker package from NSLS2
Parameters
----------
runid : int
id number for given run
fpath: str, optional
path to save hdf file
create_each_det: bool, optional
Do not create data for each detector is data size is too large,
if set as false. This will slow down the speed of creating hdf file
with large data size. srx beamline only.
fname_add_version : bool
True: if file already exists, then file version is added to the file name
so that it becomes unique in the current directory. The version is
added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.
False: then conversion fails.
completed_scans_only : bool
True: process only completed scans (for which ``stop`` document exists in
the database). Failed scan for which ``stop`` document exists are considered
completed even if not the whole image was scanned. If incomplete scan is
encountered, an exception is thrown.
False: the feature is disabled, incomplete scan will be processed.
file_overwrite_existing : bool, keyword parameter
This option should be used if the existing file should be deleted and replaced
with the new file with the same name. This option should be used with caution,
since the existing file may contain processed data, which will be permanently deleted.
True: overwrite existing files if needed. Note, that if ``fname_add_version`` is ``True``,
then new versions of the existing file will always be created.
False: do not overwrite existing files. If the file already exists, then the exception
will be raised (loading the single scan) or the scan will be skipped (loading the range
of scans).
output_to_file : bool, optional
save data to hdf5 file if True
save_scaler : bool, optional
choose to save scaler data or not for srx beamline, test purpose only.
num_end_lines_excluded : int, optional
remove the last few bad lines
Returns
-------
dict of data in 2D format matching x,y scanning positions
"""
hdr = db[-1]
print("Loading data from database.")
if hdr.start.beamline_id == "HXN":
data = map_data2D_hxn(
run_id_uid,
fpath,
create_each_det=create_each_det,
fname_add_version=fname_add_version,
completed_scans_only=completed_scans_only,
file_overwrite_existing=file_overwrite_existing,
output_to_file=output_to_file,
)
elif hdr.start.beamline_id == "xf05id" or hdr.start.beamline_id == "SRX":
data = map_data2D_srx(
run_id_uid,
fpath,
create_each_det=create_each_det,
fname_add_version=fname_add_version,
completed_scans_only=completed_scans_only,
file_overwrite_existing=file_overwrite_existing,
output_to_file=output_to_file,
save_scaler=save_scaler,
num_end_lines_excluded=num_end_lines_excluded,
)
elif hdr.start.beamline_id == "XFM":
data = map_data2D_xfm(
run_id_uid,
fpath,
create_each_det=create_each_det,
fname_add_version=fname_add_version,
completed_scans_only=completed_scans_only,
file_overwrite_existing=file_overwrite_existing,
output_to_file=output_to_file,
)
elif hdr.start.beamline_id == "TES":
data = map_data2D_tes(
run_id_uid,
fpath,
create_each_det=create_each_det,
fname_add_version=fname_add_version,
completed_scans_only=completed_scans_only,
file_overwrite_existing=file_overwrite_existing,
output_to_file=output_to_file,
)
else:
print("Databroker is not setup for this beamline")
return
free_memory_from_handler()
return data
def make_hdf(
start,
end=None,
*,
fname=None,
wd=None,
fname_add_version=False,
completed_scans_only=False,
file_overwrite_existing=False,
prefix="scan2D_",
create_each_det=False,
save_scaler=True,
num_end_lines_excluded=None,
):
"""
Load data from database and save it in HDF5 files.
Parameters
----------
start : int
Run ID (positive or negative int) or of the first scan to convert or Run UID
(str, full or short). If `start` is UID, then `end` must not be provided or set to None.
end : int, optional
scan ID of the last scan to convert. If ``end`` is not specified or None, then
only the scan with ID ``start`` is converted and an exception is raised if an
error occurs during the conversion. If ``end`` is specified, then scans in the
range ``scan``..``end`` are converted and a scan in the sequence is skipped
if there is an issue during the conversion. For example:
.. code-block:: python
make_hdf(2342)
will process scan #2342 and throw an exception if error occurs. On the other hand
.. code-block:: python
make_hdf(2342, 2342)
will process scan #2342 and write data to file if conversion is successful, otherwise
no file will be created. The scans with IDs in the range 2342..2441 can be processed by
calling
.. code-block:: python
make_hdf(2342, 2441)
Scans with IDs in specified range, but not existing in the database, or scans causing errors
during conversion will be skipped.
fname : string, optional keyword parameter
path to save data file when ``end`` is ``None`` (only one scan is processed).
File name is created automatically if ``fname`` is not specified.
wd : str
working directory, the file(s) will be created in this directory. The directory
will be created if it does not exist. If ``wd`` is not specified, then the file(s)
will be saved to the current directory.
fname_add_version : bool, keyword parameter
True: if file already exists, then file version is added to the file name
so that it becomes unique in the current directory. The version is
added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.
False: then conversion fails. If ``end`` is ``None``, then
the exception is raised. If ``end`` is specified, the scan is skipped
and the next scan in the range is processed.
completed_scans_only : bool, keyword parameter
True: process only completed scans (for which ``stop`` document exists in
the database). Failed scan for which ``stop`` document exists are considered
completed even if not the whole image was scanned. If incomplete scan is
encountered: an exception is thrown (``end`` is not specified) or the scan
is skipped (``end`` is specified). This feature allows to use
``make_hdf`` as | |
Passed to `ax.plot`
# """
# top, bottom = self.get_border()
# color = kwargs.pop("color", "cyan")
# label = kwargs.pop("label", None)
# etop = self._plot_one_edge(
# ax, top, smooth, sg_kwargs, color=color, label=label, **kwargs
# )
# ebottom = self._plot_one_edge(
# ax, bottom, smooth, sg_kwargs, color=color, **kwargs
# )
# return etop, ebottom
# def _get_contour_levels(self, levels):
# if (levels is not None) or (self.axnorm is None):
# pass
# elif (levels is None) and (self.axnorm == "t"):
# levels = [0.01, 0.1, 0.3, 0.7, 0.99]
# elif (levels is None) and (self.axnorm == "d"):
# levels = [3e-5, 1e-4, 3e-4, 1e-3, 1.7e-3, 2.3e-3]
# elif (levels is None) and (self.axnorm in ["r", "c"]):
# levels = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
# else:
# raise ValueError(
# f"Unrecognized axis normalization {self.axnorm} for default levels."
# )
# return levels
# def _verify_contour_passthrough_kwargs(
# self, ax, clabel_kwargs, edges_kwargs, cbar_kwargs
# ):
# if clabel_kwargs is None:
# clabel_kwargs = dict()
# if edges_kwargs is None:
# edges_kwargs = dict()
# if cbar_kwargs is None:
# cbar_kwargs = dict()
# if "cax" not in cbar_kwargs.keys() and "ax" not in cbar_kwargs.keys():
# cbar_kwargs["ax"] = ax
# return clabel_kwargs, edges_kwargs, cbar_kwargs
# def plot_contours(
# self,
# ax=None,
# label_levels=True,
# cbar=True,
# limit_color_norm=False,
# cbar_kwargs=None,
# fcn=None,
# plot_edges=True,
# edges_kwargs=None,
# clabel_kwargs=None,
# skip_max_clbl=True,
# use_contourf=False,
# gaussian_filter_std=0,
# gaussian_filter_kwargs=None,
# **kwargs,
# ):
# f"""Make a contour plot on `ax` using `ax.contour`.
# Paremeters
# ----------
# ax: mpl.axes.Axes, None
# If None, create an `Axes` instance from `plt.subplots`.
# label_levels: bool
# If True, add labels to contours with `ax.clabel`.
# cbar: bool
# If True, create color bar with `labels.z`.
# limit_color_norm: bool
# If True, limit the color range to 0.001 and 0.999 percentile range
# of the z-value, count or otherwise.
# cbar_kwargs: dict, None
# If not None, kwargs passed to `self._make_cbar`.
# fcn: FunctionType, None
# Aggregation function. If None, automatically select in :py:meth:`agg`.
# plot_edges: bool
# If True, plot the smoothed, extreme edges of the 2D histogram.
# edges_kwargs: None, dict
# Passed to {self.plot_edges!s}.
# clabel_kwargs: None, dict
# If not None, dictionary of kwargs passed to `ax.clabel`.
# skip_max_clbl: bool
# If True, don't label the maximum contour. Primarily used when the maximum
# contour is, effectively, a point.
# maximum_color:
# The color for the maximum of the PDF.
# use_contourf: bool
# If True, use `ax.contourf`. Else use `ax.contour`.
# gaussian_filter_std: int
# If > 0, apply `scipy.ndimage.gaussian_filter` to the z-values using the
# standard deviation specified by `gaussian_filter_std`.
# gaussian_filter_kwargs: None, dict
# If not None and gaussian_filter_std > 0, passed to :py:meth:`scipy.ndimage.gaussian_filter`
# kwargs:
# Passed to :py:meth:`ax.pcolormesh`.
# If row or column normalized data, `norm` defaults to `mpl.colors.Normalize(0, 1)`.
# """
# levels = kwargs.pop("levels", None)
# cmap = kwargs.pop("cmap", None)
# norm = kwargs.pop(
# "norm",
# mpl.colors.BoundaryNorm(np.linspace(0, 1, 11), 256, clip=True)
# if self.axnorm in ("c", "r")
# else None,
# )
# linestyles = kwargs.pop(
# "linestyles",
# [
# "-",
# ":",
# "--",
# (0, (7, 3, 1, 3, 1, 3, 1, 3, 1, 3)),
# "--",
# ":",
# "-",
# (0, (7, 3, 1, 3, 1, 3)),
# ],
# )
# if ax is None:
# fig, ax = plt.subplots()
# clabel_kwargs, edges_kwargs, cbar_kwargs = self._verify_contour_passthrough_kwargs(
# ax, clabel_kwargs, edges_kwargs, cbar_kwargs
# )
# inline = clabel_kwargs.pop("inline", True)
# inline_spacing = clabel_kwargs.pop("inline_spacing", -3)
# fmt = clabel_kwargs.pop("fmt", "%s")
# agg = self.agg(fcn=fcn).unstack("x")
# x = self.intervals["x"].mid
# y = self.intervals["y"].mid
# # assert x.size == agg.shape[1]
# # assert y.size == agg.shape[0]
# # HACK: Works around `gb.agg(observed=False)` pandas bug. (GH32381)
# if x.size != agg.shape[1]:
# # agg = agg.reindex(columns=self.intervals["x"])
# agg = agg.reindex(columns=self.categoricals["x"])
# if y.size != agg.shape[0]:
# # agg = agg.reindex(index=self.intervals["y"])
# agg = agg.reindex(index=self.categoricals["y"])
# x, y = self._maybe_convert_to_log_scale(x, y)
# XX, YY = np.meshgrid(x, y)
# C = agg.values
# if gaussian_filter_std:
# from scipy.ndimage import gaussian_filter
# if gaussian_filter_kwargs is None:
# gaussian_filter_kwargs = dict()
# C = gaussian_filter(C, gaussian_filter_std, **gaussian_filter_kwargs)
# C = np.ma.masked_invalid(C)
# assert XX.shape == C.shape
# assert YY.shape == C.shape
# class nf(float):
# # Source: https://matplotlib.org/3.1.0/gallery/images_contours_and_fields/contour_label_demo.html
# # Define a class that forces representation of float to look a certain way
# # This remove trailing zero so '1.0' becomes '1'
# def __repr__(self):
# return str(self).rstrip("0")
# levels = self._get_contour_levels(levels)
# contour_fcn = ax.contour
# if use_contourf:
# contour_fcn = ax.contourf
# if levels is None:
# args = [XX, YY, C]
# else:
# args = [XX, YY, C, levels]
# qset = contour_fcn(*args, linestyles=linestyles, cmap=cmap, norm=norm, **kwargs)
# try:
# args = (qset, levels[:-1] if skip_max_clbl else levels)
# except TypeError:
# # None can't be subscripted.
# args = (qset,)
# lbls = None
# if label_levels:
# qset.levels = [nf(level) for level in qset.levels]
# lbls = ax.clabel(
# *args, inline=inline, inline_spacing=inline_spacing, fmt=fmt
# )
# if plot_edges:
# etop, ebottom = self.plot_edges(ax, **edges_kwargs)
# cbar_or_mappable = qset
# if cbar:
# # Pass `norm` to `self._make_cbar` so that we can choose the ticks to use.
# cbar = self._make_cbar(qset, norm=norm, **cbar_kwargs)
# cbar_or_mappable = cbar
# self._format_axis(ax)
# return ax, lbls, cbar_or_mappable, qset
# def project_1d(self, axis, only_plotted=True, project_counts=False, **kwargs):
# f"""Make a `Hist1D` from the data stored in this `His2D`.
# Parameters
# ----------
# axis: str
# "x" or "y", specifying the axis to project into 1D.
# only_plotted: bool
# If True, only pass data that appears in the {self.__class__.__name__} plot
# to the :py:class:`Hist1D`.
# project_counts: bool
# If True, only send the variable plotted along `axis` to :py:class:`Hist1D`.
# Otherwise, send both axes (but not z-values).
# kwargs:
# Passed to `Hist1D`. Primarily to allow specifying `bin_precision`.
# Returns
# -------
# h1: :py:class:`Hist1D`
# """
# axis = axis.lower()
# assert axis in ("x", "y")
# data = self.data
# if data.loc[:, "z"].unique().size >= 2:
# # Either all 1 or 1 and NaN.
# other = "z"
# else:
# possible_axes = {"x", "y"}
# possible_axes.remove(axis)
# other = possible_axes.pop()
# logx = self.log._asdict()[axis]
# x = self.data.loc[:, axis]
# if logx:
# # Need to convert back to regular from log-space for data setting.
# x = 10.0 ** x
# y = self.data.loc[:, other] if not project_counts else None
# logy = False # Defined b/c project_counts option.
# if y is not None:
# # Only select y-values plotted.
# logy = self.log._asdict()[other]
# yedges = self.edges[other].values
# y = y.where((yedges[0] <= y) & (y <= yedges[-1]))
# if logy:
# y = 10.0 ** y
# if only_plotted:
# tk = self.get_plotted_data_boolean_series()
# x = x.loc[tk]
# if y is not None:
# y = y.loc[tk]
# h1 = Hist1D(
# x,
# y=y,
# logx=logx,
# clip_data=False, # Any clipping will be addressed by bins.
# nbins=self.edges[axis].values,
# **kwargs,
# )
# h1.set_log(y=logy) # Need to propagate logy.
# h1.set_labels(x=self.labels._asdict()[axis])
# if not project_counts:
# h1.set_labels(y=self.labels._asdict()[other])
# h1.set_path("auto")
# return h1
# class GridHist2D(object):
# r"""A grid of 2D heatmaps separating the data based on a categorical value.
# Properties
# ----------
# data: pd.DataFrame
# axnorm: str or None
# Specify if column, row, total, or density normalization should be used.
# log: namedtuple
# Contains booleans identifying axes to log-scale.
# nbins: int or str
# Pass to `np.histogram_bin_edges` or `astropy.stats.knuth_bin_width`
# depending on the input.
# labels: namedtuple
# Contains axis labels. Recomend using `labels.TeXlabel` so
# grouped: pd.Groupeby
# The data grouped by the categorical.
# hist2ds: pd.Series
# The `Hist2D` objects created for each axis. Index is the unique
# categorical values.
# fig: mpl.figure.Figure
# The figure upon which the axes are placed.
# axes: pd.Series
# Contains the mpl axes upon which plots are drawn. Index should be
# identical to `hist2ds`.
# cbars: pd.Series
# Contains the colorbar instances. Similar to `hist2ds` and `axes`.
# cnorms: mpl.color.Normalize or pd.Series
# mpl.colors.Normalize instance or a pd.Series of them with one for
# each unique categorical value.
# use_gs: bool
# An attempt at the code is written, but not implemented because some
# minor details need to be worked out. Ideally, if True, use a single
# colorbar for the entire grid.
# Methods
# -------
# set_<>: setters
# For data, nbins, axnorm, log, labels, cnorms.
# make_h2ds:
# Make the `Hist2D` objects.
# make_plots:
# Make the `Hist2D` plots.
# """
# def __init__(self, x, y, cat, z=None):
# r"""Create 2D heatmaps of x, y, and optional z data in a grid for which
# each unique element in `cat` specifies one plot.
# Parameters
# ----------
# x, y, z: pd.Series or np.array
# The data to aggregate. pd.Series is prefered.
# cat: pd.Categorial
# The categorial series used to create subsets of the data for each
# grid element.
# """
# self.set_nbins(101)
# self.set_axnorm(None)
# self.set_log(x=False, y=False)
# self.set_data(x, y, cat, z)
# self._labels = base.AxesLabels("x", "y") # Unsure how else to set defaults.
# self.set_cnorms(None)
# @property
# def data(self):
# return self._data
# @property
# def axnorm(self):
# r"""Axis normalization."""
# return self._axnorm
# @property
# def logger(self):
# return self._log
# @property
# def nbins(self):
# return self._nbins
# @property
# def log(self):
# r"""LogAxes booleans.
# """
# return self._log
# @property
# def labels(self):
# return self._labels
# @property
# def grouped(self):
# return self.data.groupby("cat")
# @property
# def hist2ds(self):
# try:
# return self._h2ds
# except AttributeError:
# return self.make_h2ds()
# @property
# def fig(self):
# try:
# return self._fig
# except AttributeError:
# return self.init_fig()[0]
# @property
# def axes(self):
# | |
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Ceres requires Python 2.6 or newer
import os
import struct
import json
import errno
import time
from math import isnan
from itertools import izip
from os.path import isdir, exists, join, dirname, abspath, getsize, getmtime
from glob import glob
from bisect import bisect_left
TIMESTAMP_FORMAT = "!L"
TIMESTAMP_SIZE = struct.calcsize(TIMESTAMP_FORMAT)
DATAPOINT_FORMAT = "!d"
DATAPOINT_SIZE = struct.calcsize(DATAPOINT_FORMAT)
NAN = float('nan')
PACKED_NAN = struct.pack(DATAPOINT_FORMAT, NAN)
MAX_SLICE_GAP = 80
DEFAULT_TIMESTEP = 60
DEFAULT_SLICE_CACHING_BEHAVIOR = 'none'
SLICE_PERMS = 0644
DIR_PERMS = 0755
class CeresTree:
"""Represents a tree of Ceres metrics contained within a single path on disk
This is the primary Ceres API.
:param root: The directory root of the Ceres tree
See :func:`setDefaultSliceCachingBehavior` to adjust caching behavior
"""
def __init__(self, root):
if isdir(root):
self.root = abspath(root)
else:
raise ValueError("Invalid root directory '%s'" % root)
self.nodeCache = {}
def __repr__(self):
return "<CeresTree[0x%x]: %s>" % (id(self), self.root)
__str__ = __repr__
@classmethod
def createTree(cls, root, **props):
"""Create and returns a new Ceres tree with the given properties
:param root: The root directory of the new Ceres tree
:keyword \*\*props: Arbitrary key-value properties to store as tree metadata
:returns: :class:`CeresTree`
"""
ceresDir = join(root, '.ceres-tree')
if not isdir(ceresDir):
os.makedirs(ceresDir, DIR_PERMS)
for prop,value in props.items():
propFile = join(ceresDir, prop)
fh = open(propFile, 'w')
fh.write(str(value))
fh.close()
return cls(root)
def walk(self, **kwargs):
"""Iterate through the nodes contained in this :class:`CeresTree`
:keyword \*\*kwargs: Options to pass to `os.walk`
:returns: An iterator yielding :class:`CeresNode` objects
"""
for (fsPath, subdirs, filenames) in os.walk(self.root, **kwargs):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
yield CeresNode(self, nodePath, fsPath)
def getFilesystemPath(self, nodePath):
"""Get the on-disk path of a Ceres node given a metric name"""
return join(self.root, nodePath.replace('.', os.sep))
def getNodePath(self, fsPath):
"""Get the metric name of a Ceres node given the on-disk path"""
fsPath = abspath(fsPath)
if not fsPath.startswith(self.root):
raise ValueError("path '%s' not beneath tree root '%s'" % (fsPath, self.root))
nodePath = fsPath[len(self.root):].strip(os.sep).replace(os.sep, '.')
return nodePath
def hasNode(self, nodePath):
"""Returns whether the Ceres tree contains the given metric"""
return isdir(self.getFilesystemPath(nodePath))
def getNode(self, nodePath):
"""Returns a Ceres node given a metric name
:param nodePath: A metric name
:returns: :class:`CeresNode` or `None`
"""
if nodePath not in self.nodeCache:
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
self.nodeCache[nodePath] = CeresNode(self, nodePath, fsPath)
else:
return None
return self.nodeCache[nodePath]
def find(self, nodePattern, fromTime=None, untilTime=None):
"""Find nodes which match a wildcard pattern, optionally filtering on
a time range
:keyword nodePattern: A glob-style metric wildcard
:keyword fromTime: Optional interval start time in unix-epoch.
:keyword untilTime: Optional interval end time in unix-epoch.
:returns: An iterator yielding :class:`CeresNode` objects
"""
for fsPath in glob(self.getFilesystemPath(nodePattern)):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
node = self.getNode(nodePath)
if fromTime is None and untilTime is None:
yield node
elif node.hasDataForInterval(fromTime, untilTime):
yield node
def createNode(self, nodePath, **properties):
"""Creates a new metric given a new metric name and optional per-node metadata
:keyword nodePath: The new metric name.
:keyword \*\*properties: Arbitrary key-value properties to store as metric metadata.
:returns: :class:`CeresNode`
"""
return CeresNode.create(self, nodePath, **properties)
def store(self, nodePath, datapoints):
"""Store a list of datapoints associated with a metric
:keyword nodePath: The metric name to write to
:keyword datapoints: A list of datapoint tuples: (timestamp, value)
"""
node = self.getNode(nodePath)
if node is None:
raise NodeNotFound("The node '%s' does not exist in this tree" % nodePath)
node.write(datapoints)
def fetch(self, nodePath, fromTime, untilTime):
"""Fetch data within a given interval from the given metric
:keyword nodePath: The metric name to fetch from
:keyword fromTime: Requested interval start time in unix-epoch.
:keyword untilTime: Requested interval end time in unix-epoch.
:returns: :class:`TimeSeriesData`
:raises: :class:`NodeNotFound`, :class:`InvalidRequest`, :class:`NoData`
"""
node = self.getNode(nodePath)
if not node:
raise NodeNotFound("the node '%s' does not exist in this tree" % nodePath)
return node.read(fromTime, untilTime)
class CeresNode(object):
__slots__ = ('tree', 'nodePath', 'fsPath',
'metadataFile', 'timeStep',
'sliceCache', 'sliceCachingBehavior')
def __init__(self, tree, nodePath, fsPath):
self.tree = tree
self.nodePath = nodePath
self.fsPath = fsPath
self.metadataFile = join(fsPath, '.ceres-node')
self.timeStep = None
self.sliceCache = None
self.sliceCachingBehavior = DEFAULT_SLICE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresNode[0x%x]: %s>" % (id(self), self.nodePath)
__str__ = __repr__
@classmethod
def create(cls, tree, nodePath, **properties):
# Create the node directory
fsPath = tree.getFilesystemPath(nodePath)
os.makedirs(fsPath, DIR_PERMS)
# Create the initial metadata
timeStep = properties['timeStep'] = properties.get('timeStep', DEFAULT_TIMESTEP)
node = cls(tree, nodePath, fsPath)
node.writeMetadata(properties)
# Create the initial data file
#now = int( time.time() )
#baseTime = now - (now % timeStep)
#slice = CeresSlice.create(node, baseTime, timeStep)
return node
@staticmethod
def isNodeDir(path):
return isdir(path) and exists(join(path, '.ceres-node'))
@classmethod
def fromFilesystemPath(cls, fsPath):
dirPath = dirname(fsPath)
while True:
ceresDir = join(dirPath, '.ceres-tree')
if isdir(ceresDir):
tree = CeresTree(dirPath)
nodePath = tree.getNodePath(fsPath)
return cls(tree, nodePath, fsPath)
dirPath = dirname(dirPath)
if dirPath == '/':
raise ValueError("the path '%s' is not in a ceres tree" % fsPath)
@property
def slice_info(self):
return [(slice.startTime, slice.endTime, slice.timeStep) for slice in self.slices]
def readMetadata(self):
metadata = json.load(open(self.metadataFile, 'r'))
self.timeStep = int(metadata['timeStep'])
return metadata
def writeMetadata(self, metadata):
self.timeStep = int(metadata['timeStep'])
f = open(self.metadataFile, 'w')
json.dump(metadata, f)
f.close()
@property
def slices(self):
if self.sliceCache:
if self.sliceCachingBehavior == 'all':
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
yield self.sliceCache
infos = self.readSlices()
for info in infos[1:]:
yield CeresSlice(self, *info)
else:
if self.sliceCachingBehavior == 'all':
self.sliceCache = [CeresSlice(self, *info) for info in self.readSlices()]
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
infos = self.readSlices()
if infos:
self.sliceCache = CeresSlice(self, *infos[0])
yield self.sliceCache
for info in infos[1:]:
yield CeresSlice(self, *info)
elif self.sliceCachingBehavior == 'none':
for info in self.readSlices():
yield CeresSlice(self, *info)
else:
raise ValueError("invalid caching behavior configured '%s'" % self.sliceCachingBehavior)
def readSlices(self):
if not exists(self.fsPath):
raise NodeDeleted()
slice_info = []
for filename in os.listdir(self.fsPath):
if filename.endswith('.slice'):
startTime, timeStep = filename[:-6].split('@')
slice_info.append((int(startTime), int(timeStep)))
slice_info.sort(reverse=True)
return slice_info
def setSliceCachingBehavior(self, behavior):
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.sliceCachingBehavior = behavior
self.sliceCache = None
def clearSliceCache(self):
self.sliceCache = None
def hasDataForInterval(self, fromTime, untilTime):
slices = list(self.slices)
if not slices:
return False
earliestData = slices[-1].startTime
latestData = slices[0].endTime
return ((fromTime is None) or (fromTime < latestData)) and \
((untilTime is None) or (untilTime > earliestData))
def read(self, fromTime, untilTime):
# get biggest timeStep
if self.timeStep is None:
self.readMetadata()
# Normalize the timestamps to fit proper intervals
fromTime = int(fromTime - (fromTime % self.timeStep) + self.timeStep)
untilTime = int(untilTime - (untilTime % self.timeStep) + self.timeStep)
sliceBoundary = None # to know when to split up queries across slices
resultValues = []
earliestData = None
# calculate biggest timeStep in slices with data in requested period
biggest_timeStep = 1
slices_map = {}
for slice_tmp in self.slices:
slices_map[slice_tmp.fsPath] = [slice_tmp.startTime, slice_tmp.endTime, slice_tmp.timeStep]
if fromTime >= slice_tmp.startTime:
if biggest_timeStep < slice_tmp.timeStep: biggest_timeStep = slice_tmp.timeStep
break
elif untilTime >= slice_tmp.startTime:
if biggest_timeStep < slice_tmp.timeStep: biggest_timeStep = slice_tmp.timeStep
slices_arr = []
for slice_tmp in self.slices:
bogus = 0
for item in slices_map.values():
if slice_tmp.startTime > item[0] and slice_tmp.endTime < item[1]:
bogus = 1
if not bogus: slices_arr.append(slice_tmp)
for slice in slices_arr:
# if the requested interval starts after the start of this slice
if fromTime >= slice.startTime:
try:
series = slice.read(fromTime, untilTime)
if slice.timeStep < biggest_timeStep:
series.values = recalculateSeries(series.values, slice.timeStep, biggest_timeStep)
except NoData:
break
earliestData = series.startTime
rightMissing = (untilTime - series.endTime) / biggest_timeStep
rightNulls = [None for i in range(rightMissing - len(resultValues))]
resultValues = series.values + rightNulls + resultValues
break
# or if slice contains data for part of the requested interval
elif untilTime >= slice.startTime:
# Split the request up if it straddles a slice boundary
if (sliceBoundary is not None) and untilTime > sliceBoundary:
requestUntilTime = sliceBoundary
else:
requestUntilTime = untilTime
try:
series = slice.read(slice.startTime, requestUntilTime)
if slice.timeStep | |
"""
The `ModelSerializer` and `HyperlinkedModelSerializer` classes are essentially
shortcuts for automatically creating serializers based on a given model class.
These tests deal with ensuring that we correctly map the model fields onto
an appropriate set of serializer fields for each case.
"""
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.core.validators import MaxValueValidator, MinValueValidator, MinLengthValidator
from django.db import models
from django.test import TestCase
from django.utils import six
from rest_framework import serializers
from rest_framework.compat import unicode_repr
def dedent(blocktext):
return '\n'.join([line[12:] for line in blocktext.splitlines()[1:-1]])
# Tests for regular field mappings.
# ---------------------------------
class CustomField(models.Field):
"""
A custom model field simply for testing purposes.
"""
pass
class OneFieldModel(models.Model):
char_field = models.CharField(max_length=100)
class RegularFieldsModel(models.Model):
"""
A model class for testing regular flat fields.
"""
auto_field = models.AutoField(primary_key=True)
big_integer_field = models.BigIntegerField()
boolean_field = models.BooleanField(default=False)
char_field = models.CharField(max_length=100)
comma_separated_integer_field = models.CommaSeparatedIntegerField(max_length=100)
date_field = models.DateField()
datetime_field = models.DateTimeField()
decimal_field = models.DecimalField(max_digits=3, decimal_places=1)
email_field = models.EmailField(max_length=100)
float_field = models.FloatField()
integer_field = models.IntegerField()
null_boolean_field = models.NullBooleanField()
positive_integer_field = models.PositiveIntegerField()
positive_small_integer_field = models.PositiveSmallIntegerField()
slug_field = models.SlugField(max_length=100)
small_integer_field = models.SmallIntegerField()
text_field = models.TextField()
time_field = models.TimeField()
url_field = models.URLField(max_length=100)
custom_field = CustomField()
def method(self):
return 'method'
COLOR_CHOICES = (('red', 'Red'), ('blue', 'Blue'), ('green', 'Green'))
class FieldOptionsModel(models.Model):
value_limit_field = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)])
length_limit_field = models.CharField(validators=[MinLengthValidator(3)], max_length=12)
blank_field = models.CharField(blank=True, max_length=10)
null_field = models.IntegerField(null=True)
default_field = models.IntegerField(default=0)
descriptive_field = models.IntegerField(help_text='Some help text', verbose_name='A label')
choices_field = models.CharField(max_length=100, choices=COLOR_CHOICES)
class TestModelSerializer(TestCase):
def test_create_method(self):
class TestSerializer(serializers.ModelSerializer):
non_model_field = serializers.CharField()
class Meta:
model = OneFieldModel
fields = ('char_field', 'non_model_field')
serializer = TestSerializer(data={
'char_field': 'foo',
'non_model_field': 'bar',
})
serializer.is_valid()
with self.assertRaises(TypeError) as excinfo:
serializer.save()
msginitial = 'Got a `TypeError` when calling `OneFieldModel.objects.create()`.'
assert str(excinfo.exception).startswith(msginitial)
class TestRegularFieldMappings(TestCase):
def test_regular_fields(self):
"""
Model fields should map to their equivelent serializer fields.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=True)
big_integer_field = IntegerField()
boolean_field = BooleanField(required=False)
char_field = CharField(max_length=100)
comma_separated_integer_field = CharField(max_length=100, validators=[<django.core.validators.RegexValidator object>])
date_field = DateField()
datetime_field = DateTimeField()
decimal_field = DecimalField(decimal_places=1, max_digits=3)
email_field = EmailField(max_length=100)
float_field = FloatField()
integer_field = IntegerField()
null_boolean_field = NullBooleanField(required=False)
positive_integer_field = IntegerField()
positive_small_integer_field = IntegerField()
slug_field = SlugField(max_length=100)
small_integer_field = IntegerField()
text_field = CharField(style={'base_template': 'textarea.html'})
time_field = TimeField()
url_field = URLField(max_length=100)
custom_field = ModelField(model_field=<tests.test_model_serializer.CustomField: custom_field>)
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_field_options(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = FieldOptionsModel
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
value_limit_field = IntegerField(max_value=10, min_value=1)
length_limit_field = CharField(max_length=12, min_length=3)
blank_field = CharField(allow_blank=True, max_length=10, required=False)
null_field = IntegerField(allow_null=True, required=False)
default_field = IntegerField(required=False)
descriptive_field = IntegerField(help_text='Some help text', label='A label')
choices_field = ChoiceField(choices=[('red', 'Red'), ('blue', 'Blue'), ('green', 'Green')])
""")
if six.PY2:
# This particular case is too awkward to resolve fully across
# both py2 and py3.
expected = expected.replace(
"('red', 'Red'), ('blue', 'Blue'), ('green', 'Green')",
"(u'red', u'Red'), (u'blue', u'Blue'), (u'green', u'Green')"
)
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_method_field(self):
"""
Properties and methods on the model should be allowed as `Meta.fields`
values, and should map to `ReadOnlyField`.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'method')
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=True)
method = ReadOnlyField()
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_pk_fields(self):
"""
Both `pk` and the actual primary key name are valid in `Meta.fields`.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('pk', 'auto_field')
expected = dedent("""
TestSerializer():
pk = IntegerField(label='Auto field', read_only=True)
auto_field = IntegerField(read_only=True)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_extra_field_kwargs(self):
"""
Ensure `extra_kwargs` are passed to generated fields.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'char_field')
extra_kwargs = {'char_field': {'default': 'extra'}}
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=True)
char_field = CharField(default='extra', max_length=100)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_invalid_field(self):
"""
Field names that do not map to a model field or relationship should
raise a configuration errror.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'invalid')
with self.assertRaises(ImproperlyConfigured) as excinfo:
TestSerializer().fields
expected = 'Field name `invalid` is not valid for model `RegularFieldsModel`.'
assert str(excinfo.exception) == expected
def test_missing_field(self):
"""
Fields that have been declared on the serializer class must be included
in the `Meta.fields` if it exists.
"""
class TestSerializer(serializers.ModelSerializer):
missing = serializers.ReadOnlyField()
class Meta:
model = RegularFieldsModel
fields = ('auto_field',)
with self.assertRaises(AssertionError) as excinfo:
TestSerializer().fields
expected = (
"The field 'missing' was declared on serializer TestSerializer, "
"but has not been included in the 'fields' option."
)
assert str(excinfo.exception) == expected
def test_missing_superclass_field(self):
"""
Fields that have been declared on a parent of the serializer class may
be excluded from the `Meta.fields` option.
"""
class TestSerializer(serializers.ModelSerializer):
missing = serializers.ReadOnlyField()
class Meta:
model = RegularFieldsModel
class ChildSerializer(TestSerializer):
missing = serializers.ReadOnlyField()
class Meta:
model = RegularFieldsModel
fields = ('auto_field',)
ChildSerializer().fields
# Tests for relational field mappings.
# ------------------------------------
class ForeignKeyTargetModel(models.Model):
name = models.CharField(max_length=100)
class ManyToManyTargetModel(models.Model):
name = models.CharField(max_length=100)
class OneToOneTargetModel(models.Model):
name = models.CharField(max_length=100)
class ThroughTargetModel(models.Model):
name = models.CharField(max_length=100)
class Supplementary(models.Model):
extra = models.IntegerField()
forwards = models.ForeignKey('ThroughTargetModel')
backwards = models.ForeignKey('RelationalModel')
class RelationalModel(models.Model):
foreign_key = models.ForeignKey(ForeignKeyTargetModel, related_name='reverse_foreign_key')
many_to_many = models.ManyToManyField(ManyToManyTargetModel, related_name='reverse_many_to_many')
one_to_one = models.OneToOneField(OneToOneTargetModel, related_name='reverse_one_to_one')
through = models.ManyToManyField(ThroughTargetModel, through=Supplementary, related_name='reverse_through')
class TestRelationalFieldMappings(TestCase):
def test_pk_relations(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
foreign_key = PrimaryKeyRelatedField(queryset=ForeignKeyTargetModel.objects.all())
one_to_one = PrimaryKeyRelatedField(queryset=OneToOneTargetModel.objects.all(), validators=[<UniqueValidator(queryset=RelationalModel.objects.all())>])
many_to_many = PrimaryKeyRelatedField(many=True, queryset=ManyToManyTargetModel.objects.all())
through = PrimaryKeyRelatedField(many=True, read_only=True)
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_nested_relations(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
depth = 1
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
foreign_key = NestedSerializer(read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
one_to_one = NestedSerializer(read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
many_to_many = NestedSerializer(many=True, read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
through = NestedSerializer(many=True, read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_hyperlinked_relations(self):
class TestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = RelationalModel
expected = dedent("""
TestSerializer():
url = HyperlinkedIdentityField(view_name='relationalmodel-detail')
foreign_key = HyperlinkedRelatedField(queryset=ForeignKeyTargetModel.objects.all(), view_name='foreignkeytargetmodel-detail')
one_to_one = HyperlinkedRelatedField(queryset=OneToOneTargetModel.objects.all(), validators=[<UniqueValidator(queryset=RelationalModel.objects.all())>], view_name='onetoonetargetmodel-detail')
many_to_many = HyperlinkedRelatedField(many=True, queryset=ManyToManyTargetModel.objects.all(), view_name='manytomanytargetmodel-detail')
through = HyperlinkedRelatedField(many=True, read_only=True, view_name='throughtargetmodel-detail')
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_nested_hyperlinked_relations(self):
class TestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = RelationalModel
depth = 1
expected = dedent("""
TestSerializer():
url = HyperlinkedIdentityField(view_name='relationalmodel-detail')
foreign_key = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='foreignkeytargetmodel-detail')
name = CharField(max_length=100)
one_to_one = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='onetoonetargetmodel-detail')
name = CharField(max_length=100)
many_to_many = NestedSerializer(many=True, read_only=True):
url = HyperlinkedIdentityField(view_name='manytomanytargetmodel-detail')
name = CharField(max_length=100)
through = NestedSerializer(many=True, read_only=True):
url = HyperlinkedIdentityField(view_name='throughtargetmodel-detail')
name = CharField(max_length=100)
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_pk_reverse_foreign_key(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeyTargetModel
fields = ('id', 'name', 'reverse_foreign_key')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_foreign_key = PrimaryKeyRelatedField(many=True, queryset=RelationalModel.objects.all())
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_pk_reverse_one_to_one(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = OneToOneTargetModel
fields = ('id', 'name', 'reverse_one_to_one')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_one_to_one = PrimaryKeyRelatedField(queryset=RelationalModel.objects.all())
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_pk_reverse_many_to_many(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = ManyToManyTargetModel
fields = ('id', 'name', 'reverse_many_to_many')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_many_to_many = PrimaryKeyRelatedField(many=True, queryset=RelationalModel.objects.all())
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_pk_reverse_through(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = ThroughTargetModel
fields = ('id', 'name', 'reverse_through')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_through = PrimaryKeyRelatedField(many=True, read_only=True)
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
class TestIntegration(TestCase):
def setUp(self):
self.foreign_key_target = ForeignKeyTargetModel.objects.create(
name='foreign_key'
)
self.one_to_one_target = OneToOneTargetModel.objects.create(
name='one_to_one'
)
self.many_to_many_targets = [
ManyToManyTargetModel.objects.create(
name='many_to_many (%d)' % idx
) for idx in range(3)
]
self.instance = RelationalModel.objects.create(
foreign_key=self.foreign_key_target,
one_to_one=self.one_to_one_target,
)
self.instance.many_to_many = self.many_to_many_targets
self.instance.save()
def test_pk_retrival(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
serializer = TestSerializer(self.instance)
expected = {
'id': self.instance.pk,
'foreign_key': self.foreign_key_target.pk,
'one_to_one': self.one_to_one_target.pk,
'many_to_many': [item.pk for item in self.many_to_many_targets],
'through': []
}
self.assertEqual(serializer.data, expected)
def test_pk_create(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
new_foreign_key = ForeignKeyTargetModel.objects.create(
name='foreign_key'
)
new_one_to_one = OneToOneTargetModel.objects.create(
name='one_to_one'
)
new_many_to_many = [
ManyToManyTargetModel.objects.create(
name='new many_to_many (%d)' % idx
) for idx in range(3)
]
data = {
'foreign_key': new_foreign_key.pk,
'one_to_one': new_one_to_one.pk,
'many_to_many': [item.pk for item in new_many_to_many],
}
# Serializer should validate okay.
serializer = TestSerializer(data=data)
assert serializer.is_valid()
# Creating the instance, relationship attributes should be set.
instance = serializer.save()
assert instance.foreign_key.pk == new_foreign_key.pk
assert instance.one_to_one.pk == new_one_to_one.pk
assert [
item.pk for item in instance.many_to_many.all()
] == [
item.pk for item in new_many_to_many
]
assert list(instance.through.all()) == []
# Representation should be correct.
expected = {
'id': instance.pk,
'foreign_key': new_foreign_key.pk,
'one_to_one': new_one_to_one.pk,
'many_to_many': [item.pk | |
<reponame>IBM/graph4nlp<gh_stars>10-100
from collections import namedtuple
import dgl
import numpy as np
import scipy.sparse
import torch
from .utils import SizeMismatchException, NodeNotFoundException, EdgeNotFoundException
from .utils import entail_zero_padding, slice_to_list
from .views import NodeView, NodeFeatView, EdgeView
EdgeIndex = namedtuple('EdgeIndex', ['src', 'tgt'])
node_feat_factory = dict
node_attr_factory = dict
single_node_attr_factory = dict
res_init_node_attr = {'node_attr': None}
res_init_node_features = {'node_feat': None, 'node_emb': None}
eid_nids_mapping_factory = dict
nids_eid_mapping_factory = dict
edge_feature_factory = dict
edge_attribute_factory = dict
single_edge_attr_factory = dict
res_init_edge_features = {'edge_feat': None, 'edge_emb': None}
res_init_edge_attributes = {'edge_attr': None}
graph_data_factory = dict
class GraphData(object):
"""
Represent a single graph with additional attributes.
"""
def __init__(self):
self._node_attributes = node_attr_factory()
self._node_features = node_feat_factory(res_init_node_features)
self._edge_indices = EdgeIndex(src=[], tgt=[])
self._eid_nids_mapping = eid_nids_mapping_factory()
self._nids_eid_mapping = nids_eid_mapping_factory()
self._edge_features = edge_feature_factory(res_init_edge_features)
self._edge_attributes = edge_attribute_factory()
self.graph_attributes = graph_data_factory()
# # Graph level data
# @property
# def graph_attributes(self):
# return self._graph_attributes
# Node operations
@property
def nodes(self) -> NodeView:
"""
Return a node view through which the user can access the features and attributes
Returns
-------
node: NodeView
The node view
"""
return NodeView(self)
def get_node_num(self) -> int:
"""
Get the number of nodes in the graph.
Returns
-------
num_nodes: int
The number of nodes in the graph.
"""
return len(self._node_attributes)
def add_nodes(self, node_num: int) -> None:
"""
Add a number of nodes to the graph.
Parameters
------
node_num: int
The number of nodes to be added
"""
current_num_nodes = self.get_node_num()
# Create placeholders in the node attribute dictionary
for new_node_idx in range(current_num_nodes, current_num_nodes + node_num):
self._node_attributes[new_node_idx] = single_node_attr_factory(**res_init_node_attr)
# Do padding in the node feature dictionary
for key in self._node_features.keys():
self._node_features[key] = entail_zero_padding(self._node_features[key], node_num)
# Node feature operations
@property
def node_features(self) -> NodeFeatView:
"""
Access and modify node feature vectors (tensor).
This property can be accessed in a dict-of-dict fashion, with the order being [name][index].
'name' indicates the name of the feature vector. 'index' selects the specific nodes to be accessed.
When accessed independently, returns the feature dictionary with the format {name: tensor}
Examples
--------
>>> g = GraphData()
>>> g.add_nodes(10)
>>> import torch
>>> g.node_features['x'] = torch.rand((10, 10))
>>> g.node_features['x'][0]
torch.Tensor([0.1036, 0.6757, 0.4702, 0.8938, 0.6337, 0.3290, 0.6739, 0.1091, 0.7996, 0.0586])
Returns
-------
NodeFeatView
"""
return self.nodes[:].features
def get_node_features(self, nodes: int or slice) -> torch.tensor:
"""
Get the node feature dictionary of the `nodes`
Parameters
----------
nodes: int or slice
The nodes to be accessed
Returns
-------
node_features: dict
The reference dict of the actual tensor
"""
ret = dict()
for key in self._node_features.keys():
if self._node_features[key] is None:
ret[key] = None
else:
ret[key] = self._node_features[key][nodes]
return ret
def get_node_feature_names(self):
return self._node_features.keys()
def set_node_features(self, nodes: int or slice, new_data: dict) -> None:
"""
Set the features of the `nodes` with the given `new_data`.
Parameters
----------
nodes: int or slice
The nodes involved
new_data: dict
The new data to write. Key indicates feature name and value indicates the actual value
Raises
----------
SizeMismatchException
If the size of the new features does not match the node number
"""
# Consistency check
for key in new_data.keys():
if key not in self._node_features or self._node_features[key] is None: # A new feature is added
# If the shape of the new feature does not match the number of existing nodes, then error occurs
if (not isinstance(nodes, slice)) or (
len(slice_to_list(nodes, self.get_node_num())) != self.get_node_num()):
raise SizeMismatchException(
'The new feature `{}\' should cover all existing {} nodes!'.format(key, self.get_node_num()))
# Modification
for key, value in new_data.items():
assert isinstance(value, torch.Tensor), "`{}' is not a tensor. Node features are expected to be tensor."
if key not in self._node_features or self._node_features[key] is None:
self._node_features[key] = value
else:
self._node_features[key][nodes] = value
# Node attribute operations
@property
def node_attributes(self) -> dict:
"""
Access node attribute dictionary
Returns
-------
node_attribute_dict: dict
The dict of node attributes
"""
return self._node_attributes
def get_node_attrs(self, nodes: int or slice):
"""
Get the attributes of the given `nodes`.
Parameters
----------
nodes: int or slice
The given node index
Returns
-------
dict
The node attribute dictionary.
"""
if isinstance(nodes, slice):
node_idx = slice_to_list(nodes, self.get_node_num())
else:
node_idx = [nodes]
ret = {}
for idx in node_idx:
ret[idx] = self._node_attributes[idx]
return ret
# Edge views and operations
@property
def edges(self):
"""
Return an edge view of the edges and the corresponding data
Returns
-------
edges: EdgeView
"""
return EdgeView(self)
def get_edge_num(self) -> int:
"""
Get the number of edges in the graph
Returns
-------
num_edges: int
The number of edges
"""
return len(self._edge_indices.src)
def add_edge(self, src: int, tgt: int):
"""
Add one edge.
Parameters
----------
src: int
Source node index
tgt: int
Tatget node index
"""
# Consistency check
if (src not in range(self.get_node_num())) or (tgt not in range(self.get_node_num())):
raise NodeNotFoundException('Endpoint not in the graph.')
# Append to the mapping list
endpoint_tuple = (src, tgt)
eid = self.get_edge_num()
self._eid_nids_mapping[eid] = endpoint_tuple
self._nids_eid_mapping[endpoint_tuple] = eid
# Add edge
self._edge_indices.src.append(src)
self._edge_indices.tgt.append(tgt)
# Initialize edge feature and attribute
# 1. create placeholder in edge attribute dictionary
self._edge_attributes[eid] = single_edge_attr_factory(res_init_edge_attributes)
# 2. perform zero padding
for key in self._edge_features.keys():
self._edge_features[key] = entail_zero_padding(self._edge_features[key], 1)
def add_edges(self, src: list, tgt: list):
"""
Add a bunch of edges to the graph.
Parameters
----------
src: list of int
Source node indices
tgt: list of int
Target node indices
Raises
------
SizeMismatchException
If the lengths of `src` and `tgt` don't match or one of the list contains no element.
"""
if len(src) == 0:
raise SizeMismatchException('No endpoint in `src`.')
elif len(tgt) == 0:
raise SizeMismatchException('No endpoint in `tgt`.')
else:
if len(src) != len(tgt) and len(src) > 1 and len(tgt) > 1:
raise SizeMismatchException('The numbers of nodes in `src` and `tgt` don\'t match.')
if len(src) == 1:
src = [src[0]] * len(tgt)
elif len(tgt) == 1:
tgt = [tgt[0]] * len(src)
for src_idx, tgt_idx in zip(src, tgt):
self.add_edge(src_idx, tgt_idx)
def edge_ids(self, src: int or list, tgt: int or list) -> list:
"""
Convert the given endpoints to edge indices.
Parameters
----------
src: int or list
The index of source node(s).
tgt: int or list
The index of target node(s).
Returns
-------
list
The index of corresponding edges.
"""
if isinstance(src, int):
if isinstance(tgt, int):
try:
return [self._nids_eid_mapping[(src, tgt)]]
except KeyError:
raise EdgeNotFoundException('Edge {} does not exist!'.format((src, tgt)))
elif isinstance(tgt, list):
eid_list = []
try:
for tgt_idx in tgt:
eid_list.append(self._nids_eid_mapping[(src, tgt_idx)])
except KeyError:
raise EdgeNotFoundException('Edge {} does not exist!'.format((src, tgt)))
return eid_list
else:
raise AssertionError("`tgt' must be int or list!")
elif isinstance(src, list):
if isinstance(tgt, int):
eid_list = []
try:
for src_idx in src:
eid_list.append(self._nids_eid_mapping[(src_idx, tgt)])
except KeyError:
raise EdgeNotFoundException('Edge {} does not exist!'.format((src, tgt)))
return eid_list
elif isinstance(tgt, list):
if not len(src) == len(tgt):
raise SizeMismatchException("The length of `src' and `tgt' don't match!")
eid_list = []
try:
for src_idx, tgt_idx in zip(src, tgt):
eid_list.append(self._nids_eid_mapping[(src_idx, tgt_idx)])
except KeyError:
raise EdgeNotFoundException('Edge {} does not exist!'.format((src, tgt)))
return eid_list
else:
raise AssertionError("`tgt' must be int or list!")
else:
raise AssertionError("`src' must be int or list!")
def get_all_edges(self):
"""
Get all the edges in the graph
Returns
-------
edges: list
List of edges
"""
edges = list()
for i in range(self.get_edge_num()):
edges.append((self._edge_indices.src[i], self._edge_indices.tgt[i]))
return edges
# Edge feature operations
@property
def edge_features(self):
return self.edges[:].features
def get_edge_feature(self, edges: list):
"""
Get the feature of the given edges.
Parameters
----------
edges: list
Edge indices
Returns
-------
dict
The dictionary containing all relevant features.
"""
ret = {}
for key in self._edge_features.keys():
if self._edge_features[key] is None:
ret[key] = None
else:
ret[key] = self._edge_features[key][edges]
return ret
def get_edge_feature_names(self):
return self._edge_features.keys()
def set_edge_feature(self, edges: int or slice or list, new_data: dict):
"""
Set edge feature
Parameters
----------
edges: list
Edge indices
new_data: dict
New data
Raises
----------
SizeMismatchException
If the size of the new features does not match the node number
"""
# Consistency check
for key in new_data.keys():
if key not in self._edge_features or self._edge_features[key] is None: # A new feature is added
# If the shape of the new feature does not match the number of existing nodes, then error occurs
if (not isinstance(edges, slice)) or (
len(slice_to_list(edges, self.get_edge_num())) != self.get_edge_num()):
raise SizeMismatchException(
'The new feature `{}\' should cover | |
<reponame>jvazquez77/marvin
#!/usr/bin/python
# -------------------------------------------------------------------
# Import statements
# -------------------------------------------------------------------
import math
import os
import re
import sys
from decimal import *
from operator import *
import marvin.db.models.SampleModelClasses as sampledb
import numpy as np
from astropy.io import fits
from flask_login import UserMixin
from marvin.core.caching_query import RelationshipCache
from marvin.db.ArrayUtils import ARRAY_D
from marvin.db.database import db
from sqlalchemy import and_, func, select # for aggregate, other functions
from sqlalchemy.dialects.postgresql import *
from sqlalchemy.engine import reflection
from sqlalchemy.ext.hybrid import hybrid_method, hybrid_property
from sqlalchemy.orm import configure_mappers, deferred, relationship
from sqlalchemy.orm.session import Session
from sqlalchemy.schema import Column
from sqlalchemy.sql import column
from sqlalchemy.types import JSON, Float, Integer, String
from sqlalchemy_utils import Timestamp
from werkzeug.security import check_password_hash, generate_password_hash
try:
from sdss_access.path import Path
except ImportError as e:
Path = None
# ========================
# Define database classes
# ========================
Base = db.Base
class ArrayOps(object):
''' this class adds array functionality '''
__tablename__ = 'arrayops'
__table_args__ = {'extend_existing': True}
@property
def cols(self):
return list(self.__table__.columns._data.keys())
@property
def collist(self):
return ['wavelength', 'flux', 'ivar', 'mask', 'xpos', 'ypos', 'specres']
def getTableName(self):
return self.__table__.name
def matchIndex(self, name=None):
# Get index of correct column
incols = [x for x in self.cols if x in self.collist]
if not any(incols):
return None
elif len(incols) == 1:
idx = self.cols.index(incols[0])
else:
if not name:
print('Multiple columns found. Column name must be specified!')
return None
elif name in self.collist:
idx = self.cols.index(name)
else:
return None
return idx
def filter(self, start, end, name=None):
# Check input types or map string operators
startnum = type(start) == int or type(start) == float
endnum = type(end) == int or type(end) == float
opdict = {'=': eq, '<': lt, '<=': le, '>': gt, '>=': ge, '!=': ne}
if start in opdict.keys() or end in opdict.keys():
opind = list(opdict.keys()).index(start) if start in opdict.keys() else list(opdict.keys()).index(end)
if start in opdict.keys():
start = opdict[list(opdict.keys())[opind]]
if end in opdict.keys():
end = opdict[list(opdict.keys())[opind]]
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
if startnum and endnum:
arr = [x for x in data if x >= start and x <= end]
elif not startnum and endnum:
arr = [x for x in data if start(x, end)]
elif startnum and not endnum:
arr = [x for x in data if end(x, start)]
elif startnum == eq or endnum == eq:
arr = [x for x in data if start(x, end)] if start == eq else [x for x in data if end(x, start)]
return arr
else:
return None
def equal(self, num, name=None):
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
arr = [x for x in data if x == num]
return arr
else:
return None
def less(self, num, name=None):
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
arr = [x for x in data if x <= num]
return arr
else:
return None
def greater(self, num, name=None):
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
arr = [x for x in data if x >= num]
return arr
else:
return None
def getIndices(self, arr):
if self.idx:
indices = [self.__getattribute__(self.cols[self.idx]).index(a) for a in arr]
else:
return None
return indices
class Cube(Base, ArrayOps):
__tablename__ = 'cube'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb', 'extend_existing': True}
specres = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
specresd = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
prespecres = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
prespecresd = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
def __repr__(self):
return '<Cube (pk={0}, plate={1}, ifudesign={2}, tag={3})>'.format(self.pk, self.plate, self.ifu.name, self.pipelineInfo.version.version)
@property
def header(self):
'''Returns an astropy header'''
session = Session.object_session(self)
data = session.query(FitsHeaderKeyword.label, FitsHeaderValue.value,
FitsHeaderValue.comment).join(FitsHeaderValue).filter(
FitsHeaderValue.cube == self).all()
hdr = fits.Header(data)
return hdr
@property
def name(self):
return 'manga-{0}-{1}-LOGCUBE.fits.gz'.format(self.plate, self.ifu.name)
@property
def default_mapsname(self):
return 'mangadap-{0}-{1}-default.fits.gz'.format(self.plate, self.ifu.name)
def getPath(self):
sasurl = os.getenv('SAS_URL')
if sasurl:
sasredux = os.path.join(sasurl, 'sas/mangawork/manga/spectro/redux')
path = sasredux
else:
redux = os.getenv('MANGA_SPECTRO_REDUX')
path = redux
version = self.pipelineInfo.version.version
cubepath = os.path.join(path, version, str(self.plate), 'stack')
return cubepath
@property
def location(self):
name = self.name
path = self.getPath()
loc = os.path.join(path, name)
return loc
@property
def image(self):
ifu = '{0}.png'.format(self.ifu.name)
path = self.getPath()
imageloc = os.path.join(path, 'images', ifu)
return imageloc
def header_to_dict(self):
'''Returns a simple python dictionary header'''
values = self.headervals
hdrdict = {str(val.keyword.label): val.value for val in values}
return hdrdict
@property
def plateclass(self):
'''Returns a plate class'''
plate = Plate(self)
return plate
def testhead(self, key):
''' Test existence of header keyword'''
try:
if self.header_to_dict()[key]:
return True
except:
return False
def getFlags(self, bits, name):
session = Session.object_session(self)
# if bits not a digit, return None
if not str(bits).isdigit():
return 'NULL'
else:
bits = int(bits)
# Convert the integer value to list of bits
bitlist = [int(i) for i in '{0:08b}'.format(bits)]
bitlist.reverse()
indices = [i for i, bit in enumerate(bitlist) if bit]
labels = []
for i in indices:
maskbit = session.query(MaskBit).filter_by(flag=name, bit=i).one()
labels.append(maskbit.label)
return labels
def getQualBits(self, stage='3d'):
''' get quality flags '''
col = 'DRP2QUAL' if stage == '2d' else 'DRP3QUAL'
hdr = self.header_to_dict()
bits = hdr.get(col, None)
return bits
def getQualFlags(self, stage='3d'):
''' get quality flags '''
name = 'MANGA_DRP2QUAL' if stage == '2d' else 'MANGA_DRP3QUAL'
bits = self.getQualBits(stage=stage)
if bits:
return self.getFlags(bits, name)
else:
return None
def getTargFlags(self, targtype=1):
''' get target flags '''
name = 'MANGA_TARGET1' if targtype == 1 else 'MANGA_TARGET2' if targtype == 2 else 'MANGA_TARGET3'
bits = self.getTargBits(targtype=targtype)
if bits:
return self.getFlags(bits, name)
else:
return None
def getTargBits(self, targtype=1):
''' get target bits '''
assert targtype in [1,2,3], 'target type can only 1, 2 or 3'
hdr = self.header_to_dict()
newcol = 'MNGTARG{0}'.format(targtype)
oldcol = 'MNGTRG{0}'.format(targtype)
bits = hdr.get(newcol, hdr.get(oldcol, None))
return bits
def get3DCube(self, extension='flux'):
"""Returns a 3D array of ``extension`` from the cube spaxels.
For example, ``cube.get3DCube('flux')`` will return the original
flux cube with the same ordering as the FITS data cube.
Note that this method seems to be really slow retrieving arrays (this
is especially serious for large IFUs).
"""
session = Session.object_session(self)
spaxels = session.query(getattr(Spaxel, extension)).filter(
Spaxel.cube_pk == self.pk).order_by(Spaxel.x, Spaxel.y).all()
# Assumes cubes are always square (!)
nx = ny = int(np.sqrt(len(spaxels)))
nwave = len(spaxels[0][0])
spArray = np.array(spaxels)
return spArray.transpose().reshape((nwave, ny, nx)).transpose(0, 2, 1)
@hybrid_property
def plateifu(self):
'''Returns parameter plate-ifu'''
return '{0}-{1}'.format(self.plate, self.ifu.name)
@plateifu.expression
def plateifu(cls):
return func.concat(Cube.plate, '-', IFUDesign.name)
@hybrid_property
def restwave(self):
if self.target:
redshift = self.target.NSA_objects[0].z
wave = np.array(self.wavelength.wavelength)
restwave = wave / (1 + redshift)
return restwave
else:
return None
@restwave.expression
def restwave(cls):
restw = (func.rest_wavelength(sampledb.NSA.z))
return restw
def has_modelspaxels(self, name=None):
if not name:
name = '(SPX|HYB)'
has_ms = False
model_cubes = [f.modelcube for f in self.dapfiles if re.search('LOGCUBE-{0}'.format(name), f.filename)]
if model_cubes:
mc = sum(model_cubes, [])
if mc:
from marvin.db.models.DapModelClasses import ModelSpaxel
session = Session.object_session(mc[0])
ms = session.query(ModelSpaxel).filter_by(modelcube_pk=mc[0].pk).first()
has_ms = True if ms else False
return has_ms
def has_spaxels(self):
if len(self.spaxels) > 0:
return True
else:
return False
def has_fibers(self):
if len(self.fibers) > 0:
return True
else:
return False
def set_quality(stage):
''' produces cube quality flag '''
col = 'DRP2QUAL' if stage == '2d' else 'DRP3QUAL'
label = 'cubequal{0}'.format(stage)
kwarg = 'DRP{0}QUAL'.format(stage[0])
@hybrid_property
def quality(self):
bits = self.getQualBits(stage=stage)
return int(bits)
@quality.expression
def quality(cls):
return select([FitsHeaderValue.value.cast(Integer)]).\
where(and_(FitsHeaderKeyword.pk==FitsHeaderValue.fits_header_keyword_pk,
FitsHeaderKeyword.label.ilike(kwarg),
FitsHeaderValue.cube_pk==cls.pk)).\
label(label)
return quality
def set_manga_target(targtype):
''' produces manga_target flags '''
label = 'mngtrg{0}'.format(targtype)
kwarg = 'MNGT%RG{0}'.format(targtype)
@hybrid_property
def target(self):
bits = self.getTargBits(targtype=targtype)
return int(bits)
@target.expression
def target(cls):
return select([FitsHeaderValue.value.cast(Integer)]).\
where(and_(FitsHeaderKeyword.pk==FitsHeaderValue.fits_header_keyword_pk,
FitsHeaderKeyword.label.ilike(kwarg),
FitsHeaderValue.cube_pk==cls.pk)).\
label(label)
return target
setattr(Cube, 'manga_target1', set_manga_target(1))
setattr(Cube, 'manga_target2', set_manga_target(2))
setattr(Cube, 'manga_target3', set_manga_target(3))
setattr(Cube, 'quality', set_quality('3d'))
class Wavelength(Base, ArrayOps):
__tablename__ = 'wavelength'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb', 'extend_existing': True}
wavelength = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
def __repr__(self):
return '<Wavelength (pk={0})>'.format(self.pk)
class Spaxel(Base, ArrayOps):
__tablename__ = 'spaxel'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb', 'extend_existing': True}
flux = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
ivar = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
mask = deferred(Column(ARRAY_D(Integer, zero_indexes=True)))
disp = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
predisp = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
def __repr__(self):
return '<Spaxel (pk={0}, x={1}, y={2})'.format(self.pk, self.x, self.y)
@hybrid_method
def sum(self, name=None):
total = sum(self.flux)
return total
@sum.expression
def sum(cls):
# return select(func.sum(func.unnest(cls.flux))).select_from(func.unnest(cls.flux)).label('totalflux')
return select([func.sum(column('totalflux'))]).select_from(func.unnest(cls.flux).alias('totalflux'))
class RssFiber(Base, ArrayOps):
__tablename__ = 'rssfiber'
__table_args__ = {'autoload': | |
1 0 1 0 1 1 2 2
V 2 -1 0 1 0 0 0 2 1 2 0 1 0 1 1 1 1 2
W 0 -1 -1 -1 0 -1 -1 0 -1 0 -1 -1 -1 -1 0 0 -1 0 2
Y 0 -1 0 0 2 0 0 0 0 0 0 0 -1 0 0 0 0 0 0 2
A C D E F G H I K L M N P Q R S T V W Y""",
)
mat = SubsMat.SeqMat(MatrixInfo.structure)
self.assertEqual(len(mat), 210)
self.checkMatrix(
mat,
"""\
A 4
C -2 11
D -1 -7 6
E 0 -3 2 5
F -3 -2 -5 -4 7
G 0 -6 -1 -2 -6 5
H -2 -6 0 -2 -2 -3 8
I -2 -4 -3 -3 1 -5 -5 6
K -1 -4 -1 1 -3 -3 0 -3 5
L -2 -6 -6 -4 2 -5 -3 2 -2 5
M 0 -5 -4 -2 0 -4 -2 1 -1 3 8
N -1 -6 2 0 -3 -1 2 -3 0 -3 -2 5
P -1 -8 -1 -1 -5 -2 -3 -4 -1 -3 -6 -2 7
Q 0 -3 0 2 -4 -2 0 -5 1 -3 1 0 -2 6
R -1 -2 -2 0 -4 -2 0 -3 2 -3 -4 -1 -2 1 7
S 0 -4 0 -1 -3 -1 -2 -3 -1 -4 -4 0 -1 -1 0 4
T -1 -5 -1 0 -3 -3 -2 -2 0 -3 -2 0 -1 0 -1 1 5
V 0 -4 -4 -2 -1 -4 -2 2 -3 1 0 -4 -4 -2 -3 -3 -1 5
W -3 -6 -6 -6 2 -4 -3 -2 -3 -1 -2 -5 -4 -5 -2 -5 -5 -4 10
Y -3 -6 -3 -2 3 -3 0 -1 -2 -2 -1 -1 -6 -3 -1 -2 -2 -1 2 7
A C D E F G H I K L M N P Q R S T V W Y""",
)
def test_sequence_matrix_make_entropy(self):
blosum30 = SubsMat.SeqMat(MatrixInfo.blosum30)
blosum30.make_entropy()
self.assertAlmostEqual(blosum30.entropy, -644.0119, places=4)
def test_sequence_matrix_str(self):
blosum30 = SubsMat.SeqMat(MatrixInfo.blosum30)
self.assertEqual(
str(blosum30),
"""\
A 4
B 0 5
C -3 -2 17
D 0 5 -3 9
E 0 0 1 1 6
F -2 -3 -3 -5 -4 10
G 0 0 -4 -1 -2 -3 8
H -2 -2 -5 -2 0 -3 -3 14
I 0 -2 -2 -4 -3 0 -1 -2 6
K 0 0 -3 0 2 -1 -1 -2 -2 4
L -1 -1 0 -1 -1 2 -2 -1 2 -2 4
M 1 -2 -2 -3 -1 -2 -2 2 1 2 2 6
N 0 4 -1 1 -1 -1 0 -1 0 0 -2 0 8
P -1 -2 -3 -1 1 -4 -1 1 -3 1 -3 -4 -3 11
Q 1 -1 -2 -1 2 -3 -2 0 -2 0 -2 -1 -1 0 8
R -1 -2 -2 -1 -1 -1 -2 -1 -3 1 -2 0 -2 -1 3 8
S 1 0 -2 0 0 -1 0 -1 -1 0 -2 -2 0 -1 -1 -1 4
T 1 0 -2 -1 -2 -2 -2 -2 0 -1 0 0 1 0 0 -3 2 5
V 1 -2 -2 -2 -3 1 -3 -3 4 -2 1 0 -2 -4 -3 -1 -1 1 5
W -5 -5 -2 -4 -1 1 1 -5 -3 -2 -2 -3 -7 -3 -1 0 -3 -5 -3 20
X 0 -1 -2 -1 -1 -1 -1 -1 0 0 0 0 0 -1 0 -1 0 0 0 -2 -1
Y -4 -3 -6 -1 -2 3 -3 0 -1 -1 3 -1 -4 -2 -1 0 -2 -1 1 5 -1 9
Z 0 0 0 0 5 -4 -2 0 -3 1 -1 -1 -1 0 4 0 -1 -1 -3 -1 0 -2 4
A B C D E F G H I K L M N P Q R S T V W X Y Z""",
)
def test_sequence_matrix_operations(self):
blosum90 = SubsMat.SeqMat(MatrixInfo.blosum90)
blosum30 = SubsMat.SeqMat(MatrixInfo.blosum30)
# subtraction
seqmatdiff = blosum90 - blosum30
self.assertEqual(seqmatdiff, -321)
# multiplication
seqmatmul = blosum90 * blosum30
self.assertEqual(
str(seqmatmul),
"""\
A 20
B 0 20
C 3 8 153
D 0 20 15 63
E 0 0 -6 1 36
F 6 12 9 25 20 70
G 0 0 16 2 6 15 48
H 4 2 25 4 0 6 9 112
I 0 10 4 20 12 0 5 8 30
K 0 0 12 0 0 4 2 2 8 24
L 2 5 0 5 4 0 10 4 2 6 20
M -2 8 4 12 3 2 8 -6 1 -4 4 42
N 0 16 4 1 1 4 0 0 0 0 8 0 56
P 1 6 12 3 -2 16 3 -3 12 -2 12 12 9 88
Q -1 1 8 1 4 12 6 0 8 0 6 0 0 0 56
R 2 4 10 3 1 4 6 0 12 2 6 0 2 3 3 48
S 1 0 4 0 0 3 0 2 3 0 6 4 0 2 1 1 20
T 0 0 4 2 2 6 6 4 0 1 0 0 0 0 0 6 2 30
V -1 8 4 10 9 -2 15 12 12 6 0 0 8 12 9 3 2 -1 25
W 20 30 8 24 5 0 -4 15 12 10 6 6 35 15 3 0 12 20 9 220
X 0 2 6 2 2 2 2 2 0 0 0 0 0 2 0 2 0 0 0 6 2
Y 12 12 24 4 8 9 15 0 2 3 -6 2 12 8 3 0 6 2 -3 10 2 72
Z 0 0 0 0 20 16 6 0 12 1 4 2 1 0 16 0 1 1 9 4 0 6 16
A B C D E F G H I K L M N P Q R S T V W X Y Z""",
)
# Addition
seqmatadd = blosum90 + blosum30
self.assertEqual(
str(seqmatadd),
"""\
A 9
B -2 9
C -4 -6 26
D -3 9 -8 16
E -1 0 -5 2 12
F -5 -7 -6 -10 -9 17
G 0 -2 -8 -3 -5 -8 14
H -4 -3 -10 -4 -1 -5 -6 22
I -2 -7 -4 -9 -7 -1 -6 -6 11
K -1 -1 -7 -1 2 -5 -3 -3 -6 10
L -3 -6 -2 -6 -5 2 -7 -5 3 -5 9
M -1 -6 -4 -7 -4 -3 -6 -1 2 0 4 13
N -2 8 -5 2 -2 -5 -1 -1 -4 0 -6 -3 15
P -2 -5 -7 -4 -1 -8 -4 -2 -7 -1 -7 -7 -6 19
Q 0 -2 -6 -2 4 -7 -5 1 -6 1 -5 -1 -1 -2 15
R -3 -4 -7 -4 -2 -5 -5 -1 -7 3 -5 -2 -3 -4 4 14
S 2 0 -4 -1 -1 -4 -1 -3 -4 -1 -5 -4 0 -3 -2 -2 9
T 1 -1 -4 -3 -3 -5 -5 -4 -1 -2 -2 -1 1 -2 -1 -5 3 11
V 0 -6 -4 -7 -6 -1 -8 -7 7 -5 1 0 -6 -7 -6 -4 -3 0 10
W -9 -11 -6 -10 -6 1 -3 -8 -7 -7 -5 -5 -12 -8 -4 -4 -7 -9 -6 31
X -1 -3 -5 -3 -3 -3 -3 -3 -2 -1 -2 -1 -2 -3 -1 -3 -1 -1 -2 -5 -3
Y -7 -7 -10 -5 -6 6 -8 1 -3 -4 1 -3 -7 -6 -4 -3 -5 -3 -2 7 -3 17
Z -1 0 -5 0 9 -8 -5 0 -7 2 -5 -3 -2 -2 8 0 -2 -2 -6 -5 -1 -5 8
A B C D E F G H I K L M N P Q R S T V W X Y Z""",
)
def | |
{
'project_id': 'path',
'location_id': 'path',
'replica_id': 'path',
'service_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__compute_project_replica_service_get
)
def __compute_project_replica_service_list(
self,
project_id,
location_id,
replica_id,
**kwargs
):
"""List compute/replica.service # noqa: E501
List compute/replica.service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.compute_project_replica_service_list(project_id, location_id, replica_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
replica_id (str): Replica Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[ResourceService]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['replica_id'] = \
replica_id
return self.call_with_http_info(**kwargs)
self.compute_project_replica_service_list = _Endpoint(
settings={
'response_type': ([ResourceService],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/compute/{locationId}/project/{projectId}/replica/{replicaId}/service',
'operation_id': 'compute_project_replica_service_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'replica_id',
],
'required': [
'project_id',
'location_id',
'replica_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'replica_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'replica_id': 'replicaId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'replica_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__compute_project_replica_service_list
)
def __compute_project_replica_tag_create(
self,
project_id,
location_id,
replica_id,
tag,
**kwargs
):
"""Create compute/replica.tag # noqa: E501
Create compute/replica.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.compute_project_replica_tag_create(project_id, location_id, replica_id, tag, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
replica_id (str): Replica Id
tag (Tag):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Tag
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['replica_id'] = \
replica_id
kwargs['tag'] = \
tag
return self.call_with_http_info(**kwargs)
self.compute_project_replica_tag_create = _Endpoint(
settings={
'response_type': (Tag,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/compute/{locationId}/project/{projectId}/replica/{replicaId}/tag',
'operation_id': 'compute_project_replica_tag_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'replica_id',
'tag',
],
'required': [
'project_id',
'location_id',
'replica_id',
'tag',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'replica_id':
(str,),
'tag':
(Tag,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'replica_id': 'replicaId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'replica_id': 'path',
'tag': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__compute_project_replica_tag_create
)
def __compute_project_replica_tag_delete(
self,
project_id,
location_id,
replica_id,
tag_id,
**kwargs
):
"""Delete compute/replica.tag # noqa: E501
Delete compute/replica.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.compute_project_replica_tag_delete(project_id, location_id, replica_id, tag_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
replica_id (str): Replica Id
tag_id (str): tagId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['replica_id'] = \
replica_id
kwargs['tag_id'] = \
tag_id
return self.call_with_http_info(**kwargs)
self.compute_project_replica_tag_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/compute/{locationId}/project/{projectId}/replica/{replicaId}/tag/{tagId}',
'operation_id': 'compute_project_replica_tag_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'replica_id',
'tag_id',
],
'required': [
'project_id',
'location_id',
'replica_id',
'tag_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'replica_id':
(str,),
'tag_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'replica_id': 'replicaId',
'tag_id': 'tagId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'replica_id': 'path',
'tag_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__compute_project_replica_tag_delete
)
def __compute_project_replica_tag_get(
self,
project_id,
location_id,
replica_id,
tag_id,
**kwargs
):
"""Get compute/replica.tag # noqa: E501
Get compute/replica.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.compute_project_replica_tag_get(project_id, location_id, replica_id, tag_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
replica_id (str): Replica Id
tag_id (str): tagId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Tag
If the method is | |
<reponame>techthiyanes/dalle-mini
# coding=utf-8
# Copyright 2021-2022 The Fairseq Authors and The Google Flax Team Authors And The HuggingFace Inc. team and & DALL·E Mini team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" DalleBart model. """
import math
from functools import partial
from typing import Any, Dict, Optional, Tuple
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from einops import rearrange
from flax.core.frozen_dict import unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen import partitioning as nn_partitioning
from flax.linen.linear import PrecisionLike
from flax.traverse_util import flatten_dict, unflatten_dict
from jax import custom_jvp, lax
from jax.random import PRNGKey
from transformers.generation_flax_utils import FlaxSampleOutput
from transformers.modeling_flax_outputs import (
FlaxBaseModelOutput,
FlaxBaseModelOutputWithPastAndCrossAttentions,
FlaxCausalLMOutputWithCrossAttentions,
FlaxSeq2SeqLMOutput,
)
from transformers.modeling_flax_utils import ACT2FN
from transformers.models.bart.modeling_flax_bart import (
FlaxBartAttention,
FlaxBartForConditionalGeneration,
FlaxBartForConditionalGenerationModule,
FlaxBartModule,
)
from transformers.utils import logging
from .configuration import DalleBartConfig
from .utils import PretrainedFromWandbMixin
logger = logging.get_logger(__name__)
remat = nn_partitioning.remat
def smelu(beta: Any = 1.0):
"""
Implementation of "Real World Large Scale Recommendation Systems Reproducibility and Smooth Activations"
https://arxiv.org/abs/2202.06499
"""
@custom_jvp
@jax.jit
def _smelu(x: Any) -> Any:
x = jnp.where(x <= -beta, 0.0, x)
return jnp.where(x >= beta, x, jnp.square(x + beta) / (4 * beta))
_smelu.defjvps(
lambda g, ans, x: lax.select(
x == -beta,
lax.full_like(g, 0),
lax.select(x == beta, lax.full_like(g, 1), g),
)
)
return _smelu
ACT2FN.update({"smelu": smelu()})
# deepnet initialization
def deepnet_init(gain=1):
init = jax.nn.initializers.glorot_normal()
def _init(*args, **kwargs):
return gain * init(*args, **kwargs)
return _init
# deepnet gain
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
class RMSNorm(nn.Module):
"""
From "Root Mean Square Layer Normalization" by https://arxiv.org/abs/1910.07467
Adapted from flax.linen.LayerNorm
"""
epsilon: float = 1e-6
dtype: Any = jnp.float32
param_dtype: Any = jnp.float32
use_scale: bool = True
scale_init: Any = jax.nn.initializers.ones
@nn.compact
def __call__(self, x):
reduction_axes = (-1,)
feature_axes = (-1,)
rms_sq = self._compute_rms_sq(x, reduction_axes)
return self._normalize(
self,
x,
rms_sq,
reduction_axes,
feature_axes,
self.dtype,
self.param_dtype,
self.epsilon,
self.use_scale,
self.scale_init,
)
def _compute_rms_sq(self, x, axes):
x = jnp.asarray(x, jnp.promote_types(jnp.float32, jnp.result_type(x)))
rms_sq = jnp.mean(jax.lax.square(x), axes)
return rms_sq
def _normalize(
self,
mdl,
x,
rms_sq,
reduction_axes,
feature_axes,
dtype,
param_dtype,
epsilon,
use_scale,
scale_init,
):
reduction_axes = nn.normalization._canonicalize_axes(x.ndim, reduction_axes)
feature_axes = nn.normalization._canonicalize_axes(x.ndim, feature_axes)
stats_shape = list(x.shape)
for axis in reduction_axes:
stats_shape[axis] = 1
rms_sq = rms_sq.reshape(stats_shape)
feature_shape = [1] * x.ndim
reduced_feature_shape = []
for ax in feature_axes:
feature_shape[ax] = x.shape[ax]
reduced_feature_shape.append(x.shape[ax])
mul = lax.rsqrt(rms_sq + epsilon)
if use_scale:
scale = mdl.param(
"scale", scale_init, reduced_feature_shape, param_dtype
).reshape(feature_shape)
mul *= scale
y = mul * x
return jnp.asarray(y, dtype)
def norm(type, *args, **kwargs):
if type == "rmsnorm":
return RMSNorm(*args, **kwargs)
elif type == "layernorm":
return nn.LayerNorm(*args, **kwargs)
else:
raise ValueError(f"Unknown norm type {type}")
def dot_product_attention_weights(
query: Any,
key: Any,
bias: Optional[Any] = None,
mask: Optional[Any] = None,
embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
is_encoder: bool = False,
):
"""
Computes dot-product attention weights given query and key.
mask is included into the bias.
Adapted from flax.linen.attention.dot_product_attention_weights"
"""
assert query.ndim == key.ndim, "q, k must have same rank."
assert query.shape[:-3] == key.shape[:-3], "q, k batch dims must match."
assert query.shape[-2] == key.shape[-2], "q, k num_heads must match."
assert query.shape[-1] == key.shape[-1], "q, k depths must match."
# calculate attention matrix
depth = query.shape[-1]
query = query / jnp.sqrt(depth).astype(dtype)
# attn weight shape is (batch..., num_heads, q_length, kv_length)
attn_weights = jnp.einsum("...qhd,...khd->...hqk", query, key, precision=precision)
# apply attention bias: masking, dropout, proximity bias, etc.
if bias is not None:
attn_weights = attn_weights + bias
# add relative position
if embed_pos is not None:
attn_weights = attn_weights + embed_pos
# normalize the attention weights
if not is_encoder or sinkhorn_iters == 1:
# sinkhorn does not work for causal (leaks info of future tokens into past)
attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
else:
# adapted from https://github.com/lucidrains/sinkhorn-transformer
for i in range(sinkhorn_iters):
# when causal, some attn_weights have been set to -inf through bias
if i % 2 == 0:
attn_weights -= jax.nn.logsumexp(attn_weights, axis=-1, keepdims=True)
else:
attn_weights -= jax.nn.logsumexp(attn_weights, axis=-2, keepdims=True)
if mask is not None:
attn_weights = jnp.where(mask, attn_weights, -jnp.inf)
attn_weights = jnp.exp(attn_weights).astype(dtype)
# apply attention dropout
if not deterministic and dropout_rate > 0.0:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# dropout is broadcast across the batch + head dimensions
dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:]
keep = jax.random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = jax.random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = keep.astype(attn_weights.dtype) / jnp.asarray(
keep_prob, dtype=dtype
)
attn_weights = attn_weights * multiplier
return attn_weights
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
- scale attention heads per NormFormer paper
"""
is_encoder: bool = False
q_length: int = None
k_length: int = None
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {self.num_heads})."
)
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=self.bias,
dtype=self.dtype,
)
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
self.q_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.k_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.v_proj = dense(
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.out_proj = dense(
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.config.use_head_scale:
self.head_scale = self.param(
"head_scale", jax.nn.initializers.ones, (1, 1, self.num_heads, 1)
)
if self.config.use_cosine_attention:
self.tau = self.param(
"tau",
jax.nn.initializers.constant(self.config.tau_init),
(1, self.num_heads, 1, 1),
)
if self.config.use_swin_position_embeddings:
self.rel_bias = nn.Embed(
self.q_length,
self.k_length * self.num_heads,
embedding_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)
if self.causal:
# used only in decoder
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size = hidden_states.shape[0]
# get query proj
query_states = self.q_proj(hidden_states)
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self.k_proj(key_value_states)
value_states = self.v_proj(key_value_states)
else:
# self_attention
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = self._split_heads(query_states)
key_states = self._split_heads(key_states)
value_states = self._split_heads(value_states)
# handle cache prepare causal attention mask
if self.causal:
query_length, key_length = query_states.shape[1], key_states.shape[1]
if self.has_variable("cache", "cached_key"):
mask_shift = self.variables["cache"]["cache_index"]
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
causal_mask = lax.dynamic_slice(
self.causal_mask,
(0, 0, mask_shift, 0),
(1, 1, query_length, max_decoder_length),
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(
causal_mask, (batch_size,) + causal_mask.shape[1:]
)
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(
jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape
)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -jnp.inf).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
if self.config.use_cosine_attention:
# normalize q and k
query_states = query_states / (
jnp.linalg.norm(query_states, axis=-1, keepdims=True) + 1e-8
)
key_states = key_states / (
jnp.linalg.norm(key_states, axis=-1, keepdims=True) | |
157 ],
[ 227, 137, 158 ],
[ 227, 137, 159 ],
[ 227, 138, 128 ],
[ 227, 138, 129 ],
[ 227, 138, 130 ],
[ 227, 138, 131 ],
[ 227, 138, 132 ],
[ 227, 138, 133 ],
[ 227, 138, 134 ],
[ 227, 138, 135 ],
[ 227, 138, 136 ],
[ 227, 138, 137 ],
[ 227, 138, 177 ],
[ 227, 138, 178 ],
[ 227, 138, 179 ],
[ 227, 138, 180 ],
[ 227, 138, 181 ],
[ 227, 138, 182 ],
[ 227, 138, 183 ],
[ 227, 138, 184 ],
[ 227, 138, 185 ],
[ 227, 138, 186 ],
[ 227, 138, 187 ],
[ 227, 138, 188 ],
[ 227, 138, 189 ],
[ 227, 138, 190 ],
[ 227, 138, 191 ],
[ 234, 160, 176 ],
[ 234, 160, 177 ],
[ 234, 160, 178 ],
[ 234, 160, 179 ],
[ 234, 160, 180 ],
[ 234, 160, 181 ],
[ 240, 144, 132, 135 ],
[ 240, 144, 132, 136 ],
[ 240, 144, 132, 137 ],
[ 240, 144, 132, 138 ],
[ 240, 144, 132, 139 ],
[ 240, 144, 132, 140 ],
[ 240, 144, 132, 141 ],
[ 240, 144, 132, 142 ],
[ 240, 144, 132, 143 ],
[ 240, 144, 132, 144 ],
[ 240, 144, 132, 145 ],
[ 240, 144, 132, 146 ],
[ 240, 144, 132, 147 ],
[ 240, 144, 132, 148 ],
[ 240, 144, 132, 149 ],
[ 240, 144, 132, 150 ],
[ 240, 144, 132, 151 ],
[ 240, 144, 132, 152 ],
[ 240, 144, 132, 153 ],
[ 240, 144, 132, 154 ],
[ 240, 144, 132, 155 ],
[ 240, 144, 132, 156 ],
[ 240, 144, 132, 157 ],
[ 240, 144, 132, 158 ],
[ 240, 144, 132, 159 ],
[ 240, 144, 132, 160 ],
[ 240, 144, 132, 161 ],
[ 240, 144, 132, 162 ],
[ 240, 144, 132, 163 ],
[ 240, 144, 132, 164 ],
[ 240, 144, 132, 165 ],
[ 240, 144, 132, 166 ],
[ 240, 144, 132, 167 ],
[ 240, 144, 132, 168 ],
[ 240, 144, 132, 169 ],
[ 240, 144, 132, 170 ],
[ 240, 144, 132, 171 ],
[ 240, 144, 132, 172 ],
[ 240, 144, 132, 173 ],
[ 240, 144, 132, 174 ],
[ 240, 144, 132, 175 ],
[ 240, 144, 132, 176 ],
[ 240, 144, 132, 177 ],
[ 240, 144, 132, 178 ],
[ 240, 144, 132, 179 ],
[ 240, 144, 133, 181 ],
[ 240, 144, 133, 182 ],
[ 240, 144, 133, 183 ],
[ 240, 144, 133, 184 ],
[ 240, 144, 134, 138 ],
[ 240, 144, 134, 139 ],
[ 240, 144, 139, 161 ],
[ 240, 144, 139, 162 ],
[ 240, 144, 139, 163 ],
[ 240, 144, 139, 164 ],
[ 240, 144, 139, 165 ],
[ 240, 144, 139, 166 ],
[ 240, 144, 139, 167 ],
[ 240, 144, 139, 168 ],
[ 240, 144, 139, 169 ],
[ 240, 144, 139, 170 ],
[ 240, 144, 139, 171 ],
[ 240, 144, 139, 172 ],
[ 240, 144, 139, 173 ],
[ 240, 144, 139, 174 ],
[ 240, 144, 139, 175 ],
[ 240, 144, 139, 176 ],
[ 240, 144, 139, 177 ],
[ 240, 144, 139, 178 ],
[ 240, 144, 139, 179 ],
[ 240, 144, 139, 180 ],
[ 240, 144, 139, 181 ],
[ 240, 144, 139, 182 ],
[ 240, 144, 139, 183 ],
[ 240, 144, 139, 184 ],
[ 240, 144, 139, 185 ],
[ 240, 144, 139, 186 ],
[ 240, 144, 139, 187 ],
[ 240, 144, 140, 160 ],
[ 240, 144, 140, 161 ],
[ 240, 144, 140, 162 ],
[ 240, 144, 140, 163 ],
[ 240, 144, 161, 152 ],
[ 240, 144, 161, 153 ],
[ 240, 144, 161, 154 ],
[ 240, 144, 161, 155 ],
[ 240, 144, 161, 156 ],
[ 240, 144, 161, 157 ],
[ 240, 144, 161, 158 ],
[ 240, 144, 161, 159 ],
[ 240, 144, 161, 185 ],
[ 240, 144, 161, 186 ],
[ 240, 144, 161, 187 ],
[ 240, 144, 161, 188 ],
[ 240, 144, 161, 189 ],
[ 240, 144, 161, 190 ],
[ 240, 144, 161, 191 ],
[ 240, 144, 162, 167 ],
[ 240, 144, 162, 168 ],
[ 240, 144, 162, 169 ],
[ 240, 144, 162, 170 ],
[ 240, 144, 162, 171 ],
[ 240, 144, 162, 172 ],
[ 240, 144, 162, 173 ],
[ 240, 144, 162, 174 ],
[ 240, 144, 162, 175 ],
[ 240, 144, 163, 187 ],
[ 240, 144, 163, 188 ],
[ 240, 144, 163, 189 ],
[ 240, 144, 163, 190 ],
[ 240, 144, 163, 191 ],
[ 240, 144, 164, 150 ],
[ 240, 144, 164, 151 ],
[ 240, 144, 164, 152 ],
[ 240, 144, 164, 153 ],
[ 240, 144, 164, 154 ],
[ 240, 144, 164, 155 ],
[ 240, 144, 166, 188 ],
[ 240, 144, 166, 189 ],
[ 240, 144, 167, 128 ],
[ 240, 144, 167, 129 ],
[ 240, 144, 167, 130 ],
[ 240, 144, 167, 131 ],
[ 240, 144, 167, 132 ],
[ 240, 144, 167, 133 ],
[ 240, 144, 167, 134 ],
[ 240, 144, 167, 135 ],
[ 240, 144, 167, 136 ],
[ 240, 144, 167, 137 ],
[ 240, 144, 167, 138 ],
[ 240, 144, 167, 139 ],
[ 240, 144, 167, 140 ],
[ 240, 144, 167, 141 ],
[ 240, 144, 167, 142 ],
[ 240, 144, 167, 143 ],
[ 240, 144, 167, 146 ],
[ 240, 144, 167, 147 ],
[ 240, 144, 167, 148 ],
[ 240, 144, 167, 149 ],
[ 240, 144, 167, 150 ],
[ 240, 144, 167, 151 ],
[ 240, 144, 167, 152 ],
[ 240, 144, 167, 153 ],
[ 240, 144, 167, 154 ],
[ 240, 144, 167, 155 ],
[ 240, 144, 167, 156 ],
[ 240, 144, 167, 157 ],
[ 240, 144, 167, 158 ],
[ 240, 144, 167, 159 ],
[ 240, 144, 167, 160 ],
[ 240, 144, 167, 161 ],
[ 240, 144, 167, 162 ],
[ 240, 144, 167, 163 ],
[ 240, 144, 167, 164 ],
[ 240, 144, 167, 165 ],
[ 240, 144, 167, 166 ],
[ 240, 144, 167, 167 ],
[ 240, 144, 167, 168 ],
[ 240, 144, 167, 169 ],
[ 240, 144, 167, 170 ],
[ 240, 144, 167, 171 ],
[ 240, 144, 167, 172 ],
[ 240, 144, 167, 173 ],
[ 240, 144, 167, 174 ],
[ 240, 144, 167, 175 ],
[ 240, 144, 167, 176 ],
[ 240, 144, 167, 177 ],
[ 240, 144, 167, 178 ],
[ 240, 144, 167, 179 ],
[ 240, 144, 167, 180 ],
[ 240, 144, 167, 181 ],
[ 240, 144, 167, 182 ],
[ 240, 144, 167, 183 ],
[ 240, 144, 167, 184 ],
[ 240, 144, 167, 185 ],
[ 240, 144, 167, 186 ],
[ 240, 144, 167, 187 ],
[ 240, 144, 167, 188 ],
[ 240, 144, 167, 189 ],
[ 240, 144, 167, 190 ],
[ 240, 144, 167, 191 ],
[ 240, 144, 169, 128 ],
[ 240, 144, 169, 129 ],
[ 240, 144, 169, 130 ],
[ 240, 144, 169, 131 ],
[ 240, 144, | |
fieldidx = 0
for field in insertRow:
if field == None:
InsertQuery += "''"
else:
InsertQuery += "'" + field + "'"
if fieldidx < len(insertRow)-1:
InsertQuery += ","
fieldidx += 1
InsertQuery += ");"
SelectQuery = "select ParentID, UserContentsLocation from ProcessContentsTable where ContentsID = '" + self.GetItem(self.SelectedIndex, 3).GetText() + "'"
cursor.execute( SelectQuery )
UserRow = cursor.fetchone()
ParentID = UserRow[0]
UpdateQuery = "Update ProcessContentsTable set isDeleted = 'y' where ContentsID = '" + self.GetItem(self.SelectedIndex, 3).GetText() + "'"
cursor.execute(UpdateQuery)
con.commit()
con = sqlite3.connect( self.Public_Process_SQLite )
cursor = con.cursor()
SelectQuery = "select Sequence from ProcessContentsTable where ContentsID = '" + UserRow[1] + "'"
cursor.execute( SelectQuery )
UserRow = cursor.fetchone()
try:
Sequence = UserRow[0]
except:
Sequence = '-1'
SelectQuery = "select Sequence, ContentsID from ProcessContentsTable where cast(Sequence as integer) >= " + str(int(Sequence)+1) + " and ParentID = '" + ParentID + "'"
cursor.execute( SelectQuery )
ResultList = cursor.fetchall()
for Row in ResultList:
UpdateQuery = "update ProcessContentsTable set Sequence = '" + str(int(Row[0]) + 1) +"' where ContentsID = '" + Row[1] + "'"
cursor.execute( UpdateQuery )
con.commit()
#print InsertQuery
cursor.execute( InsertQuery )
con.commit()
SelectQuery = "select LastContentsID, NextContentsID from ContentsIDTable where IDType = 'Local'"
cursor.execute( SelectQuery )
ResultContentsID = cursor.fetchone()
LastContentsID = int(ResultContentsID[0])
NextContentsID = int(ResultContentsID[1])
UpdateQuery = "Update ProcessContentsTable set isDeleted = 'n', UserContentsLocation = 'public', Sequence = '" + str(int(Sequence)+1) + "' ,ContentsID = '" + str(NextContentsID) + "' where ContentsID = '" + self.GetItem(self.SelectedIndex, 3).GetText() + "'"
cursor.execute(UpdateQuery)
con.commit()
self.SetStringItem(self.SelectedIndex, 3, str(NextContentsID))
LastContentsID += 1
NextContentsID += 1
UpdateQuery = "update ContentsIDTable set LastContentsID = '" + str(LastContentsID) + "', NextContentsID = '" + str(NextContentsID) + "' where IDType = 'Local'"
cursor.execute( UpdateQuery )
con.commit()
self.SetItemBackgroundColour(self.SelectedIndex, '#ffffff')
return
def OnSize(self, event):
size = self.parent.GetSize()
self.SetColumnWidth(0, 20)
self.SetColumnWidth(1, 250)
self.SetColumnWidth(2, 110)
self.SetColumnWidth(3, 160)
self.SetColumnWidth(4, 160)
self.SetColumnWidth(5, 160)
self.SetColumnWidth(6, 160)
self.SetColumnWidth(7, 120)
self.SetColumnWidth(8, 300)
self.SetColumnWidth(9, 0)
self.SetColumnWidth(10, 0)
event.Skip()
return
def ThreadActivation(self):
window0 = self.MainFrame.FindWindowByName('AnalysisCategoryOnList')
window1 = self.MainFrame.FindWindowByName('AnalysisPointOnList')
window2 = self.MainFrame.FindWindowByName('VestigeLocationOnList')
window3 = self.MainFrame.FindWindowByName('RelatedToolsForAcquisitionOnList')
window4 = self.MainFrame.FindWindowByName('AnalysisDescriptionOnList')
Modulewindow = self.MainFrame.FindWindowByName('ModuleListOnList')
#window5 = self.FindWindowByName('RelatedToolsForAnalysisOnList')
window2.ActivationFlag = True
if self.Type == "File":
#Check Disk space
#################
ResultLines = []
cmdARGS = ["fsutil", "volume", "diskfree", "q:"]
self.pipe = subprocess.Popen(cmdARGS, shell = True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
self.pipe.stdin.close()
self.NowTerminalProcessPid = self.pipe.pid
while self.pipe.poll() == None:
result = self.pipe.stdout.readline().decode('cp949')
if result.strip() != "":
ResultLines.append(result.strip())
result = self.pipe.stdout.readline().decode('cp949')
if result.strip() != "":
ResultLines.append(result.strip())
#print ResultLines
#wx.MessageBox("File size : " + str(int(Size)/1024) + " KB \nResidual space : \t" + str(ResultLines[0].split(":")[1].strip()) + " KB \nDisk space : \t\t" + str(ResultLines[1].split(":")[1].strip()) + " KB")
if int(self.FileSize)/1024 <= int(ResultLines[0].split(":")[1].strip()):
TempPath = os.path.abspath(".") + "\\Temp\\"
if self.MainFrame.isCaseSet == True:
TempPath = self.MainFrame.CasePath + "\\Temp\\"
else:
TempPath = os.path.abspath(".") + "\\Temp\\"
dlg = wx.MessageDialog(None, "Are you sure to continue? \n\nFile size : " + str(int(self.FileSize)/1024) + " KB \nResidual space : \t" + str(ResultLines[0].split(":")[1].strip()) + " KB \nDisk space : \t\t" + str(ResultLines[1].split(":")[1].strip()) + " KB\nTemp path : " + TempPath, 'Info', wx.OK | wx.CANCEL | wx.ICON_QUESTION)
result = dlg.ShowModal()
if result == wx.ID_OK:
#def RawCopy_by_Tsk(self, inode_in, Path, OutputPath):
Tempfile = TempPath + str(time.time()) + "_" +self.Name.replace(":", "_")
#self.MainFrame.RawHandlerClass.RawCopy_by_Tsk(int(inode), Path + "\\" + Name, Tempfile)
threads = []
th = threading.Thread(target=self.MainFrame.RawHandlerClass.RawCopy_by_Tsk, args=(int(self.inode), self.Path + "\\" + self.Name, Tempfile))
#th = threading.Thread(target=self.MainFrame.RawHandlerClass.RawCopy_by_Tsk, args=(None, self.Path + "\\" + self.Name, Tempfile))
th.start()
threads.append(th)
progressMax = 100
dialog = wx.ProgressDialog("File extracting progress", "Please wait..", progressMax, style=wx.PD_ELAPSED_TIME )
while th.is_alive() == True:
wx.Sleep(1)
dialog.Pulse()
dialog.Destroy()
#change filetime
######################
hFile = win32file.CreateFile(Tempfile.decode("cp949"), win32con.GENERIC_WRITE, win32con.FILE_SHARE_READ, None, win32con.OPEN_EXISTING, win32con.FILE_ATTRIBUTE_NORMAL | win32con.FILE_FLAG_BACKUP_SEMANTICS,None)
try:
win32file.SetFileTime(hFile, int(self.timeset.split(":")[2]), int(self.timeset.split(":")[1]), int(self.timeset.split(":")[0]))
finally:
win32api.CloseHandle(hFile)
Modulewindow.FileSelect(Tempfile)
window2.ActivationFlag = False
return
else :
wx.MessageBox("There is not enough disk space for temp directory. \n\nFile size : " + str(int(self.FileSize)/1024) + " KB \nResidual space : \t" + str(ResultLines[0].split(":")[1].strip()) + " KB \nDisk space : \t\t" + str(ResultLines[1].split(":")[1].strip()) + " KB\nTemp path : " + os.path.abspath(".") + "\\Temp")
window2.ActivationFlag = False
return
elif self.Type == "Dir":
comboPath = self.parent.GetParent().GetParent().GetParent().combo.GetValue()
if self.Name == "..":
ComboText = self.parent.GetParent().GetParent().GetParent().combo.GetValue()
self.parent.GetParent().GetParent().GetParent().OriginalCombo = ComboText
window2.NowComboSelected = os.path.split(comboPath)[0]
self.parent.GetParent().GetParent().GetParent().combo.SetValue(os.path.split(comboPath)[0])
else:
ComboText = self.parent.GetParent().GetParent().GetParent().combo.GetValue()
self.parent.GetParent().GetParent().GetParent().OriginalCombo = ComboText
NewCombo = ""
if comboPath.split("<")[0].strip()[len(comboPath.split("<")[0].strip())-1] != "\\":
NewCombo = window2.NowComboSelected = comboPath.split("<")[0].strip() + "\\" + self.Name
else:
NewCombo = window2.NowComboSelected = comboPath.split("<")[0].strip() + self.Name
self.parent.GetParent().GetParent().GetParent().combo.SetValue(NewCombo)
threads = []
th = threading.Thread(target=window2.SetFileSystemTreeAndList, args=())
th.start()
threads.append(th)
progressMax = 100
dialog = wx.ProgressDialog("Filesystem Lookup progress", "Please wait..", progressMax, style=wx.PD_ELAPSED_TIME )
while th.is_alive() == True:
wx.Sleep(0.1)
dialog.Pulse()
dialog.Destroy()
size = self.parent.GetSize()
self.SetColumnWidth(0, 20)
window2.ActivationFlag = False
return
window2.ActivationFlag = False
return
def OnActivated(self, event):
window = self.MainFrame.FindWindowByName('VestigeLocationOnList')
if window.ActivationFlag == True:
wx.MessageBox("Please wait. other process is running")
else:
self.Type = self.GetItem(event.GetIndex(),0).GetText()
self.Name = self.GetItem(event.GetIndex(),1).GetText()
self.inode = self.GetItem(event.GetIndex(),2).GetText()
self.mtime = self.GetItem(event.GetIndex(),3).GetText()
self.atime = self.GetItem(event.GetIndex(),4).GetText()
self.ctime = self.GetItem(event.GetIndex(),5).GetText()
self.FileSize = self.GetItem(event.GetIndex(),7).GetText()
self.Path = self.GetItem(event.GetIndex(),8).GetText()
self.timeset = self.GetItem(event.GetIndex(),9).GetText()
threads = []
th = threading.Thread(target=self.ThreadActivation, args=())
th.start()
threads.append(th)
return
class FileSystemMetaTree(CT.CustomTreeCtrl):
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.SUNKEN_BORDER|wx.WANTS_CHARS,
agwStyle=CT.TR_HAS_BUTTONS|CT.TR_HAS_VARIABLE_ROW_HEIGHT|CT.TR_ROW_LINES|CT.TR_TWIST_BUTTONS,
log=None):
CT.CustomTreeCtrl.__init__(self, parent, id, pos, size, style, agwStyle)
self.parent = parent
self.MainFrame = self.parent.GetParent().GetParent().GetParent().GetParent().GetParent().GetParent().GetParent().GetParent()
self.Public_Process_SQLite = "./PFPModule/PFPLib/PublicPFPList/public.process.sqlite"
self.User_Process_SQLite = "./UserModule/userdefine.process.sqlite"
#self.MainFrame = self.parent.GetGrandParent()
self.DBPath = ""
self.PreSelectedID = ""
alldata = dir(CT)
treestyles = []
events = []
for data in alldata:
if data.startswith("TR_"):
treestyles.append(data)
elif data.startswith("EVT_"):
events.append(data)
self.events = events
self.styles = treestyles
self.item = None
il = wx.ImageList(16, 16)
for items in ArtIDs[1:-1]:
bmp = wx.ArtProvider_GetBitmap(eval(items), wx.ART_TOOLBAR, (16, 16))
il.Add(bmp)
self.folder_close_idx = il.Add(bitmap=wx.Bitmap('PFPModule/PFPLib/InternalModules/pfp_sdk/icons/folder_uncheck_16_16.png'))
self.folder_open_idx = il.Add(bitmap=wx.Bitmap('PFPModule/PFPLib/InternalModules/pfp_sdk/icons/folder_check_16_16.png'))
numicons = il.GetImageCount()
self.AssignImageList(il)
self.count = 0
self.log = log
# NOTE: For some reason tree items have to have a data object in
# order to be sorted. Since our compare just uses the labels
# we don't need any real data, so we'll just use None below for
# the item data.
self.root = self.AddRoot("FileSystem Meta Lookup Result")
if not(self.GetAGWWindowStyleFlag() & CT.TR_HIDE_ROOT):
self.SetItemImage(self.root, self.folder_close_idx, wx.TreeItemIcon_Normal)
self.SetItemImage(self.root, self.folder_open_idx, wx.TreeItemIcon_Expanded)
self.PreSelectedItem = self.root
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)
self.Bind(wx.EVT_IDLE, self.OnIdle)
self.eventdict = {'EVT_TREE_BEGIN_DRAG': self.OnBeginDrag, 'EVT_TREE_BEGIN_LABEL_EDIT': self.OnBeginEdit,
'EVT_TREE_BEGIN_RDRAG': self.OnBeginRDrag, 'EVT_TREE_DELETE_ITEM': self.OnDeleteItem,
'EVT_TREE_END_DRAG': self.OnEndDrag, 'EVT_TREE_END_LABEL_EDIT': self.OnEndEdit,
'EVT_TREE_ITEM_ACTIVATED': self.OnActivate, 'EVT_TREE_ITEM_CHECKED': self.OnItemCheck,
'EVT_TREE_ITEM_CHECKING': self.OnItemChecking, 'EVT_TREE_ITEM_COLLAPSED': self.OnItemCollapsed,
'EVT_TREE_ITEM_COLLAPSING': self.OnItemCollapsing, 'EVT_TREE_ITEM_EXPANDED': self.OnItemExpanded,
'EVT_TREE_ITEM_EXPANDING': self.OnItemExpanding, 'EVT_TREE_ITEM_GETTOOLTIP': self.OnToolTip,
'EVT_TREE_ITEM_MENU': self.OnItemMenu, 'EVT_TREE_ITEM_RIGHT_CLICK': self.OnRightDown,
'EVT_TREE_KEY_DOWN': self.OnKey, 'EVT_TREE_SEL_CHANGED': self.OnSelChanged,
'EVT_TREE_SEL_CHANGING': self.OnSelChanging, "EVT_TREE_ITEM_HYPERLINK": self.OnHyperLink}
mainframe = wx.GetTopLevelParent(self)
if not hasattr(mainframe, "leftpanel"):
self.Bind(CT.EVT_TREE_ITEM_EXPANDED, self.OnItemExpanded)
self.Bind(CT.EVT_TREE_ITEM_COLLAPSED, self.OnItemCollapsed)
self.Bind(CT.EVT_TREE_SEL_CHANGED, self.OnSelChanged)
self.Bind(CT.EVT_TREE_SEL_CHANGING, self.OnSelChanging)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
self.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
else:
for combos in mainframe.treeevents:
self.BindEvents(combos)
if hasattr(mainframe, "leftpanel"):
self.ChangeStyle(mainframe.treestyles)
if not(self.GetAGWWindowStyleFlag() & CT.TR_HIDE_ROOT):
self.SelectItem(self.root)
self.Expand(self.root)
self.DoSelectItem(self.root)
def LoadData(self, ParentID, ParentNode = None):
if ParentNode != None:
con = sqlite3.connect( self.PublicDBPath )
cursor = con.cursor()
cursor.execute("Select Location, Text, ContentsPath, Description, ContentsID, UserContentsLocation from ProcessContentsTable where ParentID = '" + ParentID + "' and isDeleted = 'n' order by cast(Sequence as decimal)")
PublicResultRows = cursor.fetchall()
con = sqlite3.connect( self.UserDBPath )
cursor = con.cursor()
cursor.execute("Select Location, Text, ContentsPath, Description, ContentsID, UserContentsLocation from ProcessContentsTable where ParentID = '" + ParentID + "' and isDeleted = 'n' order by cast(Sequence as decimal)")
UserResultRows = cursor.fetchall()
ResultRows = []
for UserRow in UserResultRows:
if "top" in UserRow[5] and UserRow[0] == "ProcessGroup":
ResultRows.append(UserRow)
for PublicRow in PublicResultRows:
if PublicRow[0] == "ProcessGroup":
ResultRows.append(PublicRow)
for UserRow in UserResultRows:
if UserRow[5] == PublicRow[4] and UserRow[0] == "ProcessGroup":
ResultRows.append(UserRow)
for ResultRow in ResultRows:
child = | |
values for dataset: {mat}'.format(
mat=dataset_name, dt=dt)
raise DateValidityError(message)
else:
cost_data = cost_data[cost_data['Metadata', 'Date'] == dt].squeeze()
if cost_data.empty:
raise DateValidityError('No valid cost values found for date: {dt} for dataset: {mat}'.format(
mat=dataset_name, dt=dt))
elif isinstance(cost_data, pd.DataFrame):
raise DateValidityError('Multiple valid cost values found for date: {dt} for dataset: {mat}'.format(
mat=dataset_name, dt=dt))
return cost_data
def __production(self, material: str, weight: float = None,
volume: float = None, area: float = 1, n_units: float = None,
fraction: float = 1, dt: Union[str, float] = 'timeless') -> pd.Series: # Name or DbId
"""
Function to calculate the production costs of materials (OpaqueMaterial, WindowMaterial, ShadingMaterial)
:param material: Name or DbId of the material
:param fraction: in fraction of weight or area or volume to multiply the amount with
:param weight: in kg (needed only if the cost data is in kg reference)
:param volume: in m3 (needed only if the cost data is in m3 reference)
:param area: in m2 if cost data is in m2 reference (this is the default)
:return: production cost
"""
cost_id = self.life_cycle_data.loc[material, 'CostId']
cost_data = self.get_dt_cost_data(cost_id, dt) # pd.Series with MultiIndex
if cost_data['Metadata', 'Unit'] == 'kg':
if weight is None:
raise UnitOfMeasurementError('Please provide weight value for material: {mat}'.format(mat=material))
cost = cost_data['Costs'] * weight * fraction # pd.Series SingleIndex
elif cost_data['Metadata', 'Unit'] == 'm2':
cost = cost_data['Costs'] * area * fraction # pd.Series SingleIndex
elif cost_data['Metadata', 'Unit'] == 'm3':
if volume is None:
raise UnitOfMeasurementError('Please provide volume value for material: {mat}'.format(mat=material))
cost = cost_data['Costs'] * volume * fraction # pd.Series SingleIndex
elif cost_data['Metadata', 'Unit'] == 'pcs': # for HVAC systems
if n_units is None:
raise UnitOfMeasurementError('Please provide number of units for material: {mat}'.format(mat=material))
cost = cost_data['Costs'] * n_units * fraction # float
else:
message = 'Unit of material in model does not match the unit of material production in cost data:\n'
message += '{mat} - {mat_u} <-> {i_u} - {i}'.format(
mat=material, mat_u='kg, m2 or m3',
i=cost_data['Metadata', 'Name'], i_u=cost_data['Metadata', 'Unit']
)
raise UnitOfMeasurementError(message)
return cost
def __installation(self, material: str, weight: float = None,
volume: float = None, area: float = 1, n_units: float = None,
fraction: float = 1, dt: Union[str, float] = 'timeless') -> pd.Series: # Name or DbId):
"""
Function to calculate the installation costs of materials (OpaqueMaterial, WindowMaterial, ShadingMaterial)
:param material: Name or DbId of the material
:param fraction: in fraction of weight or area or volume to multiply the amount with
:param weight: in kg (needed only if the cost data is in kg reference)
:param volume: in m3 (needed only if the cost data is in m3 reference)
:param area: in m2 if cost data is in m2 reference (this is the default)
:return: installation cost
"""
cost_id = self.life_cycle_data.loc[material, 'InstallationId']
cost_data = self.get_dt_cost_data(cost_id, dt) # pd.Series with MultiIndex
if cost_data['Metadata', 'Unit'] == 'kg':
if weight is None:
raise UnitOfMeasurementError('Please provide weight value for material: {mat}'.format(mat=material))
cost = cost_data['Costs'] * weight * fraction # pd.Series SingleIndex
elif cost_data['Metadata', 'Unit'] == 'm2':
cost = cost_data['Costs'] * area * fraction # pd.Series SingleIndex
elif cost_data['Metadata', 'Unit'] == 'm3':
if volume is None:
raise UnitOfMeasurementError('Please provide volume value for material: {mat}'.format(mat=material))
cost = cost_data['Costs'] * volume * fraction # pd.Series SingleIndex
elif cost_data['Metadata', 'Unit'] == 'pcs': # for HVAC systems
if n_units is None:
raise UnitOfMeasurementError('Please provide number of units for material: {mat}'.format(mat=material))
cost = cost_data['Costs'] * n_units * fraction # pd.Series SingleIndex
else:
message = 'Unit of material in model does not match the unit of material installation in cost data:\n'
message += '{mat} - {mat_u} <-> {i_u} - {i}'.format(
mat=material, mat_u='kg, m2 or m3',
i=cost_data['Metadata', 'Name'], i_u=cost_data['Metadata', 'Unit']
)
raise UnitOfMeasurementError(message)
return cost
def __replacement(self, material: str, weight: float = None,
volume: float = None, area: float = 1, n_units: float = None, fraction: float = 1,
dt: Union[str, float] = 'timeless') -> pd.Series:
"""
Function to calculate a ONE TIME replacement cost of materials (OpaqueMaterial, WindowMaterial, ShadingMaterial
:param material: Name or DbId of the material
:param fraction: the amount to be replaced in fraction of weight, volume or area
:param weight: in kg (needed only if the cost data is in kg reference)
:param volume: in m3 (needed only if the cost data is in m3 reference)
:param area: in m2 if cost data is in m2 reference (this is the default)
:param dt: date validity
:return: replacement cost
"""
# count of replacements
# replacement_count = (self.rsp - 1) // life_time
# -1 because we want to make sure that if the rsp equals to the lifetime, no replacement is calculated
replacement = self.__production(material=material, weight=weight, volume=volume, area=area, n_units=n_units,
fraction=fraction, dt=dt)
replacement += self.__installation(material=material, weight=weight, volume=volume, area=area, n_units=n_units,
fraction=fraction, dt=dt)
# replacement *= replacement_count
return replacement
def __operation(self, energy_source: str, energy_demand: float, dt: Union[str, float] = 'timeless') -> pd.Series:
"""
Function to calculate the cost of used energy
:param energy_source: Name or DbId of the energy source
:param energy_demand: Calculated energy demand in kWh
:return:
"""
cost_id = self.life_cycle_data.loc[energy_source, 'CostId']
cost_data = self.get_dt_cost_data(cost_id, dt) # pd.Series with MultiIndex
if cost_data['Metadata', 'Unit'] == 'MJ':
costs = cost_data['Costs'] * energy_demand * 3.6 # pd.Series with SingleIndex
elif cost_data['Metadata', 'Unit'] == 'kWh':
costs = cost_data['Costs'] * energy_demand
else:
message = 'Unit of energy demand does not match unit of energy production in cost data:\n'
message += '{en} - {en_u} <-> {i_u} - {i}'.format(
en=energy_source, en_u='kWh or MJ',
i=cost_data['Metadata', 'Name'], i_u=cost_data['Metadata', 'Unit']
)
raise UnitOfMeasurementError(message)
return costs
def __opaque_material(self, material: firepy.model.building.OpaqueMaterial,
life_time_overwrites: dict = None) -> CostResult:
"""
Results refer to 1 m2 of material
:param material:
:param life_time_overwrites: if lifetimes are evaluated on construction level this should contain a
dict with the new lifetimes {match_prop: lifetime}
:return: cost result in cost / m2 basis
"""
# initiate the new result
cost_result = CostResult(ref_unit='m2', stages=['Production'], dt=[self.sdt])
mat = getattr(material, self.match_prop) # Name or DbId
weight = material.Thickness * material.Density # in kg (/m2)
volume = material.Thickness # in m3 (/m2)
cutting_waste = self.life_cycle_data.loc[mat, 'CuttingWaste']
# Production
dt_prod = self.sdt
production = self.__production(
material=mat,
weight=weight,
volume=volume,
area=1,
fraction=1 + cutting_waste,
dt=dt_prod
)
cost_result.costs.loc[:, (dt_prod, 'Production')] = production
# Installation
installation = self.__installation(
material=mat,
weight=weight,
volume=volume,
area=1,
fraction=1 + cutting_waste,
dt=dt_prod
)
cost_result.costs.loc[:, (dt_prod, 'Installation')] = installation
# Replacement
if life_time_overwrites is not None and mat in life_time_overwrites:
life_time = life_time_overwrites[mat]
else:
life_time = self.life_cycle_data.loc[mat, 'LifeTime']
dt_rep = self.sdt + life_time
while dt_rep < dt_prod + self.rsp:
replacement = self.__replacement(
material=mat,
weight=weight,
volume=volume,
area=1,
fraction=1 + cutting_waste,
dt=dt_rep
)
# here we could specify if only a part of it is replaced by fraction=replace_fraction * (1 + cutting_waste)
cost_result.costs.loc[:, (dt_rep, 'Replacement')] = replacement
dt_rep += life_time
# Add the result to the collection of results
self.cost_results[material.IuId] = cost_result
return cost_result
def __window_material(self, window_material: firepy.model.building.WindowMaterial) -> CostResult:
"""
This should contain both frame and glazing, also cost data should contain the cost of frame and glazing too
Results refer to 1 m2 of material
:param window_material:
:return:
"""
# initiate the new result
cost_result = CostResult(ref_unit='m2', stages=['Production'], dt=[self.sdt])
mat = getattr(window_material, self.match_prop) # Name or DbId
weight = self.life_cycle_data.loc[mat, 'SurfaceWeight'] # kg/m2
cutting_waste = self.life_cycle_data.loc[mat, 'CuttingWaste']
# Production
dt_prod = self.sdt
production = self.__production(
material=mat,
weight=weight, # this is also very unlikely to define cost on weight basis
volume=None, # we cannot have the cost in m3
area=1,
fraction=1 + cutting_waste,
dt=dt_prod
)
cost_result.costs.loc[:, (dt_prod, 'Production')] = production
# Installation
installation = self.__installation(
material=mat,
weight=weight, # this is also very unlikely to define cost on weight basis
volume=None, # we cannot have the cost in m3
area=1,
fraction=1 + cutting_waste,
dt=dt_prod
)
cost_result.costs.loc[:, (dt_prod, 'Installation')] = installation
# Replacement
life_time = self.life_cycle_data.loc[mat, 'LifeTime']
dt_rep = self.sdt + life_time
while dt_rep < dt_prod + self.rsp:
replacement = self.__replacement(
material=mat,
weight=weight,
volume=None,
area=1,
fraction=1 + cutting_waste,
dt=dt_rep
)
# here we could specify if only a part of | |
# imports
import bpy
import os
from bpy.props import StringProperty, BoolProperty, IntProperty, FloatProperty
from bpy_extras.io_utils import ImportHelper
from bpy.types import Operator
import math
import mathutils
from mathutils import *
from math import *
# BLENDER ADDON INFORMATION
bl_info = {
"name": "LatticePhysics Support",
"author": "<NAME>",
"version": (2, 0, 0),
"blender": (2, 80, 0),
"location": "Import-Export",
"description": "Loading LatticePhysics XML-dumps of lattices",
"category": "Import-Export",
}
# HELPER FUNCTIONS
def find_collection(context, item):
collections = item.users_collection
if len(collections) > 0:
return collections[0]
return context.scene.collection
def make_collection(collection_name, parent_collection):
if collection_name in bpy.data.collections: # Does the collection already exist?
return bpy.data.collections[collection_name]
else:
new_collection = bpy.data.collections.new(collection_name)
parent_collection.children.link(new_collection) # Add the new collection under a parent
return new_collection
##########---------------------------------
# STEP 1 # BLENDER ADDON DEFINITION
##########---------------------------------
class LatticePhysicsBlenderAddon(Operator, ImportHelper):
# id and label
bl_idname = "latticephysics.open_filebrowser"
bl_label = "Import LatticePhysics dump file"
# global filter options
filter_glob = StringProperty(
default='*.lpbd;*.txt;*.xml',
options={'HIDDEN'}
)
# Options for importing
# resolution of sites (passed to icosphere)
resolution_sites = IntProperty(
name = "Site resolution",
default = 2,
min = 1,
max = 16
)
radius_sites = FloatProperty(
name = "Site radius (relative)",
default = 0.2,
min = 0.0,
max = 10.0
)
# number of subdivisions of sites (passed to the subsurf modifier)
subdivision_sites = IntProperty(
name = "Subdivision of sites",
default = 2,
min = 0,
max = 8
)
radius_bonds = FloatProperty(
name = "Bond radius (relative)",
default = 0.08,
min = 0.0,
max = 1.0
)
# number of subdivisions of sites (passed to the subsurf modifier)
subdivision_bonds = IntProperty(
name = "Subdivision of bonds",
default = 2,
min = 0,
max = 8
)
# if there is a subsurf modifier for sites
hook_bonds_to_sites = BoolProperty(
name = "Hook bonds to sites",
default = True
)
##########---------------------------------
# STEP 2 # FUNCTIONS FOR ADDING PARTS
##########---------------------------------
# DEFINE A FUNCTION TO ADD A SPHERE
def addSite(self, site_index, p, radius, lbl):
# set the correct position
x = bpy.context.scene.cursor.location[0] + p[0]
y = bpy.context.scene.cursor.location[1] + p[1]
z = bpy.context.scene.cursor.location[2] + p[2]
# add an ico sphere at that point
bpy.ops.mesh.primitive_ico_sphere_add(subdivisions=self.resolution_sites, radius=radius, location=(x,y,z))
# get the current object
obj = bpy.context.active_object
# set the name
obj.name = "site_{0}".format(site_index)
# add subsurf modifier to make surface look smoother
if self.subdivision_sites > 0:
obj.modifiers.new("site_subsurf", "SUBSURF")
obj.modifiers["site_subsurf"].levels = 1
obj.modifiers["site_subsurf"].render_levels = self.subdivision_sites
# set the material (color)
material_name = "site_material_"+lbl
# get the material
material = bpy.data.materials.get(material_name)
if material is None:
# create material
material = bpy.data.materials.new(name=material_name)
# set the color
material.diffuse_color = (1.0,1.0,1.0,1.0)
# use nodes for rendering
material.use_nodes = True
# set the material
if obj.data.materials:
obj.data.materials[0] = material
else:
obj.data.materials.append(material)
# set smooth shading
bpy.ops.object.shade_smooth()
# deselect the object
obj.select_set(False)
# return the object
return obj
# DEFINE A FUNCTION TO ADD A TUBE
def addBond(self, bond_index, p_from, p_to, radius, lbl, hook_from=-1, hook_to=-1):
# set the correct positions
x_from = bpy.context.scene.cursor.location[0] + p_from[0]
y_from = bpy.context.scene.cursor.location[1] + p_from[1]
z_from = bpy.context.scene.cursor.location[2] + p_from[2]
x_to = bpy.context.scene.cursor.location[0] + p_to[0]
y_to = bpy.context.scene.cursor.location[1] + p_to[1]
z_to = bpy.context.scene.cursor.location[2] + p_to[2]
# adding curve from
# https://blender.stackexchange.com/questions/120074/how-to-make-a-curve-path-from-scratch-given-a-list-of-x-y-z-points
# make a new curve
crv = bpy.data.curves.new("crv_{0}".format(bond_index), 'CURVE')
crv.dimensions = '3D'
# make a new spline in that curve
spline = crv.splines.new(type='NURBS')
# a spline point for each point
spline.points.add(1) # theres already one point by default
# assign the point coordinates to the spline points
spline.points[0].co = (x_from,y_from,z_from,1)
spline.points[1].co = ( x_to, y_to, z_to,1)
# make a new object with the curve
obj = bpy.data.objects.new("bond_{0}".format(bond_index), crv)
# TODO render bevel (Probably change in the future)
obj.data.splines[0].use_bezier_u = True
obj.data.splines[0].use_bezier_u = False
# set the bevel so that the bonds are actually tubes
obj.data.bevel_depth = radius
# link it to the scene
bpy.context.scene.collection.objects.link(obj)
# maybe hook the bond to the sites
if self.hook_bonds_to_sites and hook_from>=0 and hook_to>=0:
# add the hook modifiers
obj.modifiers.new("follow_site_{0}".format(hook_from), "HOOK")
obj.modifiers["follow_site_{0}".format(hook_from)].object = bpy.data.objects["site_{0}".format(hook_from)]
obj.modifiers["follow_site_{0}".format(hook_from)].vertex_indices_set([0,])
obj.modifiers.new("follow_site_{0}".format(hook_to), "HOOK")
obj.modifiers["follow_site_{0}".format(hook_to)].object = bpy.data.objects["site_{0}".format(hook_to)]
obj.modifiers["follow_site_{0}".format(hook_to)].vertex_indices_set([1,])
# add subsurf modifier to make surface look smoother
if self.subdivision_bonds > 0:
# then subsurf
obj.modifiers.new("bond_subsurf", "SUBSURF")
obj.modifiers["bond_subsurf"].levels = 1
obj.modifiers["bond_subsurf"].render_levels = self.subdivision_bonds
# set the material (color)
material_name = "bond_material_"+lbl
# get the material
material = bpy.data.materials.get(material_name)
if material is None:
# create material
material = bpy.data.materials.new(name=material_name)
# set the color
material.diffuse_color = (0.5,0.5,0.5,1.0)
# use nodes for rendering
material.use_nodes = True
# set the material
if obj.data.materials:
obj.data.materials[0] = material
else:
obj.data.materials.append(material)
# set smooth shading
bpy.ops.object.shade_smooth()
# select the object
#obj.select_set(True)
# set origin to geometry
#bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='MEDIAN')
# deselect the object
#obj.select_set(False)
# return the object
return obj
##########---------------------------------
# STEP 3 # FUNCTION FOR LOADING LATTICE
##########---------------------------------
def loadLatticeDump(self, filename):
# read in file
with open(filename) as f:
lines = f.readlines()
# refactor filename
fn = filename.split(os.sep)[-1]
# remove whitespace characters like `\n` at the end of each line
lines = [x.strip() for x in lines]
# set the render engine
bpy.context.scene.render.engine = "CYCLES"
# set the units
bpy.context.scene.unit_settings.system = "METRIC"
bpy.context.scene.unit_settings.system_rotation = "DEGREES"
# try to set to filmic color space
try:
bpy.context.scene.view_settings.view_transform = "Filmic"
except:
print("Filmic color space not available")
# try to obtain lists for sites and bonds
if not lines[0].startswith("<LATTICEGRAPH"):
print("ERROR, not a Lattice Graph file")
# make lists for sites and bonds
N_sites = int(lines[0].split(" ")[1].split("=")[1][1:-1])
N_bonds = int(lines[0].split(" ")[2].split("=")[1][1:-1])
print(N_sites, " sites found")
print(N_bonds, " bonds found")
sites_x = [0.0 for s in range(N_sites)]
sites_y = [0.0 for s in range(N_sites)]
sites_z = [0.0 for s in range(N_sites)]
sites_l = ["l" for s in range(N_sites)]
bonds_f = [ 0 for b in range(N_bonds)]
bonds_t = [ 0 for b in range(N_bonds)]
bonds_l = ["l" for b in range(N_bonds)]
# go through all lines and check if a site or bond has to be added
for l in lines:
# line specifying a site
if l.startswith("<SITE"):
# sample line:
# <SITE index="27" label="4" X="2.4748737341529163" Y="-1.0606601717798216" Z="1.7677669529663687" >
# get the data from the line
site_i = int(l.split(" ")[1].split("=")[1][1:-1]) - 1
site_l = l.split(" ")[2].split("=")[1][1:-1]
site_x = float(l.split(" ")[3].split("=")[1][1:-1])
site_y = float(l.split(" ")[4].split("=")[1][1:-1])
site_z = float(l.split(" ")[5].split("=")[1][1:-1])
# add the site information to the lists
sites_l[site_i] = site_l
sites_x[site_i] = site_x
sites_y[site_i] = site_y
sites_z[site_i] = site_z
#print("site found, index=", site_i, ", label is \"", sites_l[site_i],"\"")
# line specifying a bond
if l.startswith("<BOND"):
# sample line:
# <BOND index="1" label="1" from="1" to="2" >
# get the data from the line
bond_i = int(l.split(" ")[1].split("=")[1][1:-1]) - 1
bond_l = l.split(" ")[2].split("=")[1][1:-1]
bond_f = int(l.split(" ")[3].split("=")[1][1:-1]) - 1
bond_t = int(l.split(" ")[4].split("=")[1][1:-1]) - 1
# add the site information to the lists
bonds_l[bond_i] = bond_l
bonds_f[bond_i] = bond_f
bonds_t[bond_i] = bond_t
#print("bond found, index=", bond_i, ", label is \"", bonds_l[bond_i],"\"")
# get the data from the line
# add the bond
#self.addTube(int(bond_data[0]),bond_data[1],bond_data[2],bond_data[3],bond_data[4],bond_data[5],bond_data[6],bond_data[7],l.split("\t")[2])
# center all sites
cm_x = sum(sites_x) / len(sites_x)
cm_y = sum(sites_y) / len(sites_y)
cm_z = sum(sites_z) / len(sites_z)
sites_x = [s - cm_x for s in sites_x]
sites_y = [s - cm_y for s in sites_y]
sites_z = [s - cm_z for s in sites_z]
# determine lenght of all bonds
bond_lengths = [sqrt((sites_x[bonds_f[i]]-sites_x[bonds_t[i]])**2 + (sites_y[bonds_f[i]]-sites_y[bonds_t[i]])**2) for i in range(N_bonds)]
min_bond_length = min(bond_lengths)
# deselect all objects
bpy.ops.object.select_all(action='DESELECT')
# check if lattice already added
if "Lattice {0}".format(fn) in bpy.data.collections: # Does the top level collection already exist?
print("ERROR: Lattice already loaded")
return
# add all sites and collect objects into list
site_objects = [ self.addSite(s+1, [sites_x[s],sites_y[s],sites_z[s]], self.radius_sites * min_bond_length, sites_l[s]) for s in range(N_sites) ]
# add all bonds and collect objects into list
bond_objects = [ self.addBond(b+1, [sites_x[bonds_f[b]],sites_y[bonds_f[b]],sites_z[bonds_f[b]]], [sites_x[bonds_t[b]],sites_y[bonds_t[b]],sites_z[bonds_t[b]]], self.radius_bonds * min_bond_length, bonds_l[b], bonds_f[b]+1, bonds_t[b]+1) for b in range(N_bonds) ]
# make the collections right
general_collection = bpy.data.collections.new("Lattice {0}".format(fn))
bpy.context.scene.collection.children.link(general_collection)
# General site and bond collections
site_collection = make_collection("Sites ({0})".format(fn), general_collection)
bond_collection = make_collection("Bonds ({0})".format(fn), general_collection)
site_collections = dict()
bond_collections = dict()
# make new collections for the respective labels
for s in sites_l:
site_collections[s] = make_collection("Sites (label={0}) ({1})".format(s, fn), site_collection)
for b in bonds_l:
bond_collections[b] | |
"""Spectral Projection gurgle tools"""
from functools import wraps, cached_property
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import IncrementalPCA, PCA
from sklearn.utils.validation import check_is_fitted, check_array
from sklearn.base import TransformerMixin, BaseEstimator
from gurgle.base import CallableModel, TransparentModel
def instantiate_if_type(obj):
if isinstance(obj, type):
return obj()
else:
return obj
def orth(A):
"""
Orthonormalize the matrix A.
Note: Code is more complicated than the purest mathematical need because of numerical problem mitigation.
:param A: a n-by-k array
:return: a n-by-k array whose row spans the same space as the rows of A and with the normal rows
>>> M = np.array([[1,2,3], [3,4,5]])
>>> O = orth(M)
>>> # checking the product of O and its transpose is the identity
>>> np.allclose(np.dot(O, O.T), np.eye(2))
True
"""
u, s, vh = np.linalg.svd(A.T, full_matrices=False)
N, M = A.shape
eps = np.finfo(float).eps
tol = max(np.max(M), np.max(N)) * np.amax(s) * eps
num = np.sum(s > tol, dtype=int)
Q = u[:, :num]
return Q.T
def space_residue(self, X):
return X - np.dot(X, self.orth_projection)
def sub_space_residue(self, X):
return np.dot(self.space_residue(X), self.orth_scaling)
class ScaledPCA(PCA):
scaler_cls = StandardScaler
scaler_ = None
@wraps(PCA.fit)
def fit(self, X, y=None, *args, **kwarg):
self.scaler_ = self.scaler_cls().fit(X)
super().fit(self.scaler_.transform(X), y=None, *args, **kwarg)
return self
@wraps(PCA.transform)
def transform(self, X):
return super().transform(self.scaler_.transform(X))
@wraps(PCA.inverse_transform)
def inverse_transform(self, X):
return self.scaler_.inverse_transform(super().inverse_transform(X))
class ProjectorResiduesOnCall(CallableModel):
def __init__(
self, model=ScaledPCA, call_method='transform', score_scaler=TransparentModel
):
super().__init__(model, call_method)
self.score_scaler = instantiate_if_type(score_scaler)
self._x_buffer = list()
# @cached_property
# def projection(self):
# return np.dot(self.scalings_, self.scalings_.T)
#
# @cached_property
# def orth_projection(self):
# return orth(self)
def is_fit(self):
return hasattr(self.model, 'components_') and self.model.components_ is not None
def decision_function(self, X):
"""Predict raw anomaly score of X using the fitted detector."""
# TODO: Make this independent of self.model.components_ (should only use X and self.model.transform(X))
# so that ScaledPCA can work as well.
if hasattr(
self.model, 'scaler_'
): # TODO: Get rid of this hack by learning some damn linear algebra!
return np.sum(
cdist(self.model.scaler_.transform(X), self.model.components_), axis=1
).ravel()
else:
return np.sum(cdist(X, self.model.components_), axis=1).ravel()
def fit(self, X, y=None, *args, **kwargs):
super().fit(X, y, *args, **kwargs)
scores = self.decision_function(X)
self.score_scaler.fit(scores.reshape((-1, 1)))
return self
def postproc(self, X, model_output):
fv, scaled_score = None, None
if self.is_fit():
scores = self.decision_function(X)
scaled_scores = self.score_scaler.transform(scores.reshape((-1, 1)))
fv, scaled_score = X[0], scaled_scores[0]
return fv, scaled_score
class IncrementalProjectorResiduesOnCall(ProjectorResiduesOnCall):
def __init__(
self,
model=IncrementalPCA,
call_method='transform',
score_scaler=StandardScaler,
learn_batch_size=1,
):
super().__init__(model, call_method)
self.score_scaler = score_scaler
self.learn_batch_size = learn_batch_size
self._x_buffer = list()
def preproc(self, x):
self._x_buffer.append(x)
return np.array([x])
def postproc(self, X, model_output):
fv, scaled_score = super().postproc(X, model_output)
if len(self._x_buffer) >= self.learn_batch_size:
self.model.partial_fit(np.array(self._x_buffer)) # learn a batch
self._x_buffer = list() # reset buffer
return fv, scaled_score
def partial_fittable_instance(obj, name=None, **kwargs):
"""Asserts that obj has a partial_fit, and instantiates it with **kwargs if obj is given as a type"""
if name is None:
if isinstance(obj, type):
name = obj.__name__
else:
name = type(obj).__name__
assert hasattr(obj, 'partial_fit'), f"{name} doesn't have a partial_fit: {obj} "
if isinstance(obj, type):
obj = obj(**kwargs)
return obj
class ScaledIncrementalPCA(IncrementalPCA, ScaledPCA):
scaler_cls = StandardScaler
scaler_ = None
def is_fit(self):
return hasattr(self, 'components_') and self.components_ is not None
@wraps(IncrementalPCA.partial_fit)
def partial_fit(self, X, y=None, check_input=True):
if self.scaler_ is None:
self.scaler_ = self.scaler_cls()
if not self.is_fit():
# then this is the first training data piece we see, so we need to make sure we have enough to start fitting
nrows, fv_size = X.shape
if self.n_components is None:
self.n_components = fv_size
X = complete_with_fake_data_for_warmup(
minimum_n_rows_to_fit=self.n_components, X=X
)
self.scaler_.partial_fit(X)
scaled_X = self.scaler_.transform(X)
super().partial_fit(scaled_X, y, check_input=check_input)
return self
class ResidueGurgle(BaseEstimator):
learn_batch_size = None
def __init__(self, projector=ScaledIncrementalPCA, score_scaler=StandardScaler):
# assert n_components is not None, "You need to specify an actual number of components. No None allowed here."
self.projector = partial_fittable_instance(projector, 'projector')
self.score_scaler = partial_fittable_instance(score_scaler, 'score_scaler')
self._x_buffer = list()
self.learn_batch_size = self.learn_batch_size or (self.n_components + 1)
@property
def n_components(self):
return self.projector.n_components
def is_fit(self):
return (
hasattr(self.projector, 'components_')
and self.projector.components_ is not None
)
def decision_function(self, X):
"""Predict raw anomaly score of X using the fitted detector.
"""
# check_is_fitted(self, ['components_'])
# X = check_array(X)
# return np.sum(cdist(self.projector, self.projector.components_), axis=1).ravel()
# TODO: Make this independent of self.model.components_ (should only use X and self.model.transform(X))
# so that ScaledPCA can work as well.
if hasattr(
self.projector, 'scaler_'
): # TODO: Get rid of this hack by learning some damn linear algebra!
return np.sum(
cdist(self.projector.scaler_.transform(X), self.projector.components_),
axis=1,
).ravel()
else:
return np.sum(cdist(X, self.projector.components_), axis=1).ravel()
def partial_fit(self, X, y=None):
if not self.is_fit():
# then this is the first training data piece we see, so we need to make sure we have enough to start fitting
nrows, fv_size = X.shape
if self.n_components is None:
self.projector.n_components = fv_size
X = complete_with_fake_data_for_warmup(
minimum_n_rows_to_fit=self.n_components, X=X
)
self.projector.partial_fit(X, y)
scores = self.decision_function(X)
self.score_scaler.partial_fit(scores.reshape((-1, 1)))
return self
def __call__(self, x):
fv, scaled_score = None, None
X = np.array([x])
self._x_buffer.extend(X.tolist())
if self.is_fit():
fvs = self.projector.transform(X)
scores = self.decision_function(X)
scaled_scores = self.score_scaler.transform(scores.reshape((-1, 1)))
fv, scaled_score = fvs[0], scaled_scores[0]
if len(self._x_buffer) >= (self.learn_batch_size or len(x)):
self.partial_fit(np.array(self._x_buffer)) # learn a batch
self._x_buffer = list() # reset buffer
return fv, scaled_score
#
# def __call__(self, x):
# x = self.preproc(x)
# model_output = self.call_method(x)
# post_output = self.postproc(x, model_output)
# return post_output
def complete_with_fake_data_for_warmup(minimum_n_rows_to_fit, X=None, fv_size=None):
"""Makes fake data to warmup a partial fit process.
If no X is given, will return a random minimum_n_rows_to_fit x fv_size matrix (with values between 0 and 1)
If X is given, will repeat the rows in a cycle until the minimum_n_rows_to_fit is reached
>>> X = complete_with_fake_data_for_warmup(3, fv_size=2);
>>> X.shape
(3, 2)
>>> import numpy as np
>>> complete_with_fake_data_for_warmup(5, X=np.array([[1,2,3], [4,5,6]]))
array([[1, 2, 3],
[4, 5, 6],
[1, 2, 3],
[4, 5, 6],
[1, 2, 3]])
"""
if X is None:
assert fv_size is not None, 'You need to have some data, or specify an fv_size'
return np.random.rand(minimum_n_rows_to_fit, fv_size)
else:
nrows, fv_size = X.shape
missing_n_rows = max(0, minimum_n_rows_to_fit - nrows)
if missing_n_rows > 0:
return np.array(X.tolist() * int(1 + np.ceil(missing_n_rows / nrows)))[
:minimum_n_rows_to_fit
]
else:
return X
############## Not used at the moment ##################################################################################
class GurgleWrap(BaseEstimator):
learn_batch_size = None
def __init__(self, model, score_scaler=StandardScaler, learn_batch_size=1):
# assert n_components is not None, "You need to specify an actual number of components. No None allowed here."
self.model = partial_fittable_instance(model, 'model')
self.score_scaler = partial_fittable_instance(score_scaler, 'score_scaler')
self._x_buffer = list()
self.learn_batch_size = learn_batch_size
def decision_function(self, X):
"""Predict raw anomaly score of X using the fitted detector.
"""
# check_is_fitted(self, ['components_'])
X = check_array(X)
return np.sum(cdist(X, self.projector.components_), axis=1).ravel()
def partial_fit(self, X, y=None):
if not self.is_fit():
# then this is the first training data piece we see, so we need to make sure we have enough to start fitting
nrows, fv_size = X.shape
if self.n_components is None:
self.projector.n_components = fv_size
X = complete_with_fake_data_for_warmup(
minimum_n_rows_to_fit=self.n_components, X=X
)
self.projector.partial_fit(X, y)
scores = self.decision_function(X)
self.score_scaler.partial_fit(scores.reshape((-1, 1)))
return self
def __call__(self, x):
fv, scaled_score = None, None
X = np.array([x])
self._x_buffer.extend(X.tolist())
if self.is_fit():
fvs = self.projector.transform(X)
scores = self.decision_function(X)
scaled_scores = self.score_scaler.transform(scores.reshape((-1, 1)))
fv, scaled_score = fvs[0], scaled_scores[0]
if len(self._x_buffer) >= (self.learn_batch_size or len(x)):
self.partial_fit(np.array(self._x_buffer)) # learn a batch
self._x_buffer = list() # reset buffer
return fv, scaled_score
class PartialFitPipeline:
def __init__(self, *steps):
self.steps = steps
self.last_step = steps[-1]
def __getattr__(self, attr):
"""Delegate method to wrapped store if not part of wrapper store methods"""
return getattr(self.last_step, attr)
def transform_until_last_step(self, X):
for step in self.steps[:-1]:
X = step.transform(X)
return X
def partial_fit_transform(self, X, y=None, **kwargs):
for step in self.steps:
X = step.partial_fit(X, y, **kwargs).transform(X)
return X
def partial_fit(self, X, y=None, **kwargs):
self.partial_fit_transform(X, y, **kwargs)
return self
def fit(self, X, y=None, **kwargs):
for step in self.steps[:-1]:
X = step.fit(X, y, **kwargs).transform(X)
self.last_step.fit(X)
return self
def transform(self, X):
return self.last_step.transform(self.transform_until_last_step(X))
def predict_proba(self, X):
return self.last_step.predict_proba(self.transform_until_last_step(X))
def predict(self, X):
return self.last_step.predict(self.transform_until_last_step(X))
class ProjectionDecisionFuncMixin:
components_ = None # lint appeaser
def decision_function(self, X):
"""Predict raw anomaly score of X using the fitted detector.
The anomaly score of an input sample is computed based on different
detector algorithms. For consistency, outliers are assigned with
larger anomaly scores.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only
if they are supported by the base estimator.
Returns
-------
anomaly_scores : numpy | |
"
oooo0OOo = ""
for III1Iiii1I11 in lisp . lisp_get_all_addresses ( ) :
i1IIii1iiIi += "{} or " . format ( III1Iiii1I11 )
oooo0OOo += "{} or " . format ( III1Iiii1I11 )
if 72 - 72: O0 / ooOoO0o + OoooooooOO * iII111i
i1IIii1iiIi = i1IIii1iiIi [ 0 : - 4 ]
i1IIii1iiIi += ") and ((udp dst port 4341 or 8472 or 4789) or "
i1IIii1iiIi += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + "(ip[6]&0xe0 == 0 and ip[7] != 0))))"
if 61 - 61: OoooooooOO % II111iiii - I1IiiI % I1ii11iIi11i + i1IIi
if 39 - 39: i1IIi
if 86 - 86: iIii1I11I1II1 + OoOoOO00 . i11iIiiIii - Ii1I
if 51 - 51: OoOoOO00
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if 53 - 53: Ii1I % Oo0Ooo
oooo0OOo = oooo0OOo [ 0 : - 4 ]
i1IIii1iiIi += ( " or (not (src host {}) and " + "((udp src port 4342 and ip[28] == 0x28) or " + "(udp dst port 4342 and ip[28] == 0x12)))" ) . format ( oooo0OOo )
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
if 41 - 41: Ii1I % I1ii11iIi11i
if 12 - 12: OOooOOo
lisp . lprint ( "Capturing packets for: '{}'" . format ( i1IIii1iiIi ) )
Ooo . filter = i1IIii1iiIi
if 69 - 69: OoooooooOO + OOooOOo
if 26 - 26: Oo0Ooo + OOooOOo / OoO0O00 % OoOoOO00 % I1ii11iIi11i + II111iiii
if 31 - 31: I11i % OOooOOo * I11i
if 45 - 45: i1IIi . I1IiiI + OOooOOo - OoooooooOO % ooOoO0o
Ooo . loop ( - 1 , OO0OO00oo0 , [ ooo0oooo0 , lisp_thread ] )
return
if 1 - 1: iIii1I11I1II1
if 93 - 93: i1IIi . i11iIiiIii . Oo0Ooo
if 99 - 99: I11i - I1Ii111 - oO0o % OoO0O00
if 21 - 21: II111iiii % I1ii11iIi11i . i1IIi - OoooooooOO
if 4 - 4: OoooooooOO . ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
def oo0OoOooo ( ) :
lisp . lisp_set_exception ( )
if 95 - 95: IiII * I1ii11iIi11i % ooOoO0o % Ii1I - Ii1I
if 97 - 97: I1ii11iIi11i + iIii1I11I1II1 . O0
if 64 - 64: i1IIi % ooOoO0o / i11iIiiIii - i1IIi % OOooOOo . iII111i
if 8 - 8: Oo0Ooo + II111iiii * OOooOOo * OoOoOO00 * I11i / IiII
for O0ooO0Oo00o in lisp . lisp_crypto_keys_by_nonce . values ( ) :
for iIii in O0ooO0Oo00o : del ( iIii )
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
lisp . lisp_crypto_keys_by_nonce . clear ( )
lisp . lisp_crypto_keys_by_nonce = { }
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
if 62 - 62: i1IIi - OoOoOO00
if 62 - 62: i1IIi + Oo0Ooo % IiII
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 28 - 28: I1ii11iIi11i . i1IIi
if 10 - 10: OoO0O00 / Oo0Ooo
if 15 - 15: iII111i . OoOoOO00 / iII111i * I11i - I1IiiI % I1ii11iIi11i
if 57 - 57: O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
lisp . lisp_rtr_nat_trace_cache . clear ( )
lisp . lisp_rtr_nat_trace_cache = { }
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if 39 - 39: iIii1I11I1II1 - OoooooooOO
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if 23 - 23: II111iiii / oO0o
Oooo0000 = threading . Timer ( 60 , oo0OoOooo , [ ] )
Oooo0000 . start ( )
return
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
if 19 - 19: I11i
if 67 - 67: O0 % iIii1I11I1II1 / IiII . i11iIiiIii - Ii1I + O0
if 27 - 27: OOooOOo
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
def OOooo00 ( ) :
global Oo0oO0oo0oO00 , II1iII1i , II1Ii1iI1i
global OOo , Ii1IIii11 , I11
global i111I , oO0oIIII
if 35 - 35: I1Ii111 . OoOoOO00 * i11iIiiIii
lisp . lisp_i_am ( "rtr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "RTR starting up" )
if 44 - 44: i11iIiiIii / Oo0Ooo
if 42 - 42: OoooooooOO + Oo0Ooo % II111iiii + OoO0O00
if 24 - 24: iII111i * II111iiii % iII111i % IiII + OoooooooOO
if 29 - 29: II111iiii - OoooooooOO - i11iIiiIii . o0oOOo0O0Ooo
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 19 - 19: II111iiii
if 72 - 72: OoooooooOO / I1IiiI + Ii1I / OoOoOO00 * Ii1I
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
oOo = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
II1Ii1iI1i = lisp . lisp_open_listen_socket ( oOo ,
str ( iiI1iIiI ) )
Oo0oO0oo0oO00 = lisp . lisp_open_listen_socket ( "" , "lisp-rtr" )
i111I = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
if 17 - 17: Ii1I . i11iIiiIii
II1iII1i [ 0 ] = II1Ii1iI1i
if 5 - 5: I1ii11iIi11i + O0 + O0 . I1Ii111 - ooOoO0o
II1iII1i [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
II1iII1i [ 2 ] = Oo0oO0oo0oO00
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if 36 - 36: IiII
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
if 74 - 74: I1Ii111 % I1ii11iIi11i
if 7 - 7: II111iiii
if 27 - 27: oO0o . OoooooooOO + i11iIiiIii
if 86 - 86: I11i / o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + oO0o
if 33 - 33: o0oOOo0O0Ooo . iII111i . IiII . i1IIi
OOo = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
OOo . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
II1iII1i . append ( OOo )
if 49 - 49: I1ii11iIi11i
if 84 - 84: I11i - Oo0Ooo / O0 - I1Ii111
if 21 - 21: O0 * O0 % I1ii11iIi11i
if 94 - 94: I11i + II111iiii % i11iIiiIii
if 8 - 8: ooOoO0o * O0
oO0oIIII = lisp . lisp_open_listen_socket ( "0.0.0.0" ,
str ( lisp . LISP_TRACE_PORT ) )
if 73 - 73: o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
if ( lisp . lisp_is_raspbian ( ) == False ) :
Ii1IIii11 = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
if 34 - 34: ooOoO0o
i1iI1 = os . getenv ( "LISP_PCAP_THREADS" )
i1iI1 = 1 if ( i1iI1 == None ) else int ( i1iI1 )
IIi11i1II = os . getenv ( "LISP_WORKER_THREADS" )
IIi11i1II = 0 if ( IIi11i1II == None ) else int ( IIi11i1II )
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
if 58 - 58: I1IiiI - I1ii11iIi11i % O0 . I1IiiI % OoO0O00 % IiII
for oOo0OooOo in range ( i1iI1 ) :
o0iIiiIiiIi = | |
# -*- coding: utf-8 -*-
# Created by makepy.py version 0.5.00
# By python version 2.6.6 |EPD 6.3-2 (32-bit)| (r266:84292, Sep 20 2010, 11:26:16) [MSC v.1500 32 bit (Intel)]
# From type library 'TTankInterfaces.ocx'
# On Thu Dec 02 21:00:50 2010
"""TTankInterfaces"""
makepy_version = '0.5.00'
python_version = 0x20606f0
import win32com.client.CLSIDToClass, pythoncom, pywintypes
import win32com.client.util
from pywintypes import IID
from win32com.client import Dispatch
# The following 3 lines may need tweaking for the particular server
# Candidates are pythoncom.Missing, .Empty and .ArgNotFound
defaultNamedOptArg=pythoncom.Empty
defaultNamedNotOptArg=pythoncom.Empty
defaultUnnamedArg=pythoncom.Empty
CLSID = IID('{831D8AF7-7E2B-426B-A430-18E670F56C12}')
MajorVersion = 10
MinorVersion = 9
LibraryFlags = 10
LCID = 0x0
from win32com.client import DispatchBaseClass
class _BlockSelect(DispatchBaseClass):
CLSID = IID('{C2F42E9B-86B7-4F21-889D-8B854B019640}')
coclass_clsid = IID('{CB81F5AF-7625-4F83-B629-54C37B55A203}')
def Refresh(self):
return self._oleobj_.InvokeTypes(1610809385, LCID, 1, (24, 0), (),)
_prop_map_get_ = {
"ActiveBlock": (1745027109, 2, (8, 0), (), "ActiveBlock", None),
"AllowDragDrop": (1745027114, 2, (11, 0), (), "AllowDragDrop", None),
"AllowPopup": (1745027115, 2, (11, 0), (), "AllowPopup", None),
"HideDetails": (1745027110, 2, (11, 0), (), "HideDetails", None),
"ShowDate": (1745027106, 2, (11, 0), (), "ShowDate", None),
"ShowDuration": (1745027103, 2, (11, 0), (), "ShowDuration", None),
"ShowMemo": (1745027101, 2, (11, 0), (), "ShowMemo", None),
"ShowOwner": (1745027102, 2, (11, 0), (), "ShowOwner", None),
"ShowServer": (1745027108, 2, (11, 0), (), "ShowServer", None),
"ShowStart": (1745027105, 2, (11, 0), (), "ShowStart", None),
"ShowStop": (1745027104, 2, (11, 0), (), "ShowStop", None),
"ShowTank": (1745027107, 2, (11, 0), (), "ShowTank", None),
"SingleClickSelect": (1745027100, 2, (11, 0), (), "SingleClickSelect", None),
"UseServer": (1745027112, 2, (8, 0), (), "UseServer", None),
"UseTank": (1745027111, 2, (8, 0), (), "UseTank", None),
}
_prop_map_put_ = {
"ActiveBlock": ((1745027109, LCID, 4, 0),()),
"AllowDragDrop": ((1745027114, LCID, 4, 0),()),
"AllowPopup": ((1745027115, LCID, 4, 0),()),
"HideDetails": ((1745027110, LCID, 4, 0),()),
"ShowDate": ((1745027106, LCID, 4, 0),()),
"ShowDuration": ((1745027103, LCID, 4, 0),()),
"ShowMemo": ((1745027101, LCID, 4, 0),()),
"ShowOwner": ((1745027102, LCID, 4, 0),()),
"ShowServer": ((1745027108, LCID, 4, 0),()),
"ShowStart": ((1745027105, LCID, 4, 0),()),
"ShowStop": ((1745027104, LCID, 4, 0),()),
"ShowTank": ((1745027107, LCID, 4, 0),()),
"SingleClickSelect": ((1745027100, LCID, 4, 0),()),
"UseServer": ((1745027112, LCID, 4, 0),()),
"UseTank": ((1745027111, LCID, 4, 0),()),
}
class _EventSelect(DispatchBaseClass):
CLSID = IID('{CBA421AC-7EB7-40BA-AA2C-81CC652B2EEF}')
coclass_clsid = IID('{01B10737-93FA-4FB2-B1F1-0C59793EBCAA}')
def ClearChecks(self, CheckState=defaultNamedNotOptArg):
return self._ApplyTypes_(1610809454, 1, (24, 0), ((16395, 3),), u'ClearChecks', None,CheckState
)
def GetChecked(self, EvName=defaultNamedNotOptArg):
return self._oleobj_.InvokeTypes(1610809451, LCID, 1, (11, 0), ((8, 1),),EvName
)
def GetEvName(self, Index=defaultNamedNotOptArg):
return self._ApplyTypes_(1610809453, 1, (8, 0), ((16387, 3),), u'GetEvName', None,Index
)
def Refresh(self):
return self._oleobj_.InvokeTypes(1610809449, LCID, 1, (24, 0), (),)
def SetChecked(self, EvName=defaultNamedNotOptArg, CheckState=defaultNamedNotOptArg):
return self._ApplyTypes_(1610809452, 1, (24, 0), ((8, 1), (16395, 3)), u'SetChecked', None,EvName
, CheckState)
_prop_map_get_ = {
"ActiveEvent": (1745027145, 2, (8, 0), (), "ActiveEvent", None),
"AllowActive": (1745027143, 2, (11, 0), (), "AllowActive", None),
"AllowDragDrop": (1745027142, 2, (11, 0), (), "AllowDragDrop", None),
"HideDetails": (1745027158, 2, (11, 0), (), "HideDetails", None),
"ShowBlock": (1745027148, 2, (11, 0), (), "ShowBlock", None),
"ShowChecks": (1745027146, 2, (11, 0), (), "ShowChecks", None),
"ShowDataFormat": (1745027153, 2, (11, 0), (), "ShowDataFormat", None),
"ShowDefaultEvent": (1745027183, 2, (11, 0), (), "ShowDefaultEvent", None),
"ShowFirstTime": (1745027151, 2, (11, 0), (), "ShowFirstTime", None),
"ShowSampleFreq": (1745027152, 2, (11, 0), (), "ShowSampleFreq", None),
"ShowServer": (1745027150, 2, (11, 0), (), "ShowServer", None),
"ShowSize": (1745027147, 2, (11, 0), (), "ShowSize", None),
"ShowStrbOff": (1745027184, 2, (11, 0), (), "ShowStrbOff", None),
"ShowTank": (1745027149, 2, (11, 0), (), "ShowTank", None),
"ShowType": (1745027154, 2, (11, 0), (), "ShowType", None),
"SingleClickSelect": (1745027144, 2, (11, 0), (), "SingleClickSelect", None),
"UseBlock": (1745027155, 2, (8, 0), (), "UseBlock", None),
"UseServer": (1745027157, 2, (8, 0), (), "UseServer", None),
"UseTank": (1745027156, 2, (8, 0), (), "UseTank", None),
}
_prop_map_put_ = {
"ActiveEvent": ((1745027145, LCID, 4, 0),()),
"AllowActive": ((1745027143, LCID, 4, 0),()),
"AllowDragDrop": ((1745027142, LCID, 4, 0),()),
"HideDetails": ((1745027158, LCID, 4, 0),()),
"ShowBlock": ((1745027148, LCID, 4, 0),()),
"ShowChecks": ((1745027146, LCID, 4, 0),()),
"ShowDataFormat": ((1745027153, LCID, 4, 0),()),
"ShowDefaultEvent": ((1745027183, LCID, 4, 0),()),
"ShowFirstTime": ((1745027151, LCID, 4, 0),()),
"ShowSampleFreq": ((1745027152, LCID, 4, 0),()),
"ShowServer": ((1745027150, LCID, 4, 0),()),
"ShowSize": ((1745027147, LCID, 4, 0),()),
"ShowStrbOff": ((1745027184, LCID, 4, 0),()),
"ShowTank": ((1745027149, LCID, 4, 0),()),
"ShowType": ((1745027154, LCID, 4, 0),()),
"SingleClickSelect": ((1745027144, LCID, 4, 0),()),
"UseBlock": ((1745027155, LCID, 4, 0),()),
"UseServer": ((1745027157, LCID, 4, 0),()),
"UseTank": ((1745027156, LCID, 4, 0),()),
}
class _ServSelProps(DispatchBaseClass):
CLSID = IID('{91124062-FA58-4A43-962A-A3E90676DEC0}')
coclass_clsid = IID('{42EDA46E-842E-4131-9C40-1E47D6B8ABB1}')
_prop_map_get_ = {
}
_prop_map_put_ = {
}
class _ServerSelect(DispatchBaseClass):
CLSID = IID('{7BE3D05E-A964-4EF6-A8E6-1E07AB181A98}')
coclass_clsid = IID('{A16140DD-AAA9-46C3-9565-6F1E8815D90A}')
def AddServer(self, ServName=defaultNamedNotOptArg, IPAddr=defaultNamedNotOptArg, Username=defaultNamedNotOptArg, Domain=defaultNamedNotOptArg
, Password=defaultNamedNotOptArg):
return self._ApplyTypes_(1610809405, 1, (11, 0), ((16392, 3), (16392, 3), (16392, 3), (16392, 3), (16392, 3)), u'AddServer', None,ServName
, IPAddr, Username, Domain, Password)
def DeleteServer(self, ServerName=defaultNamedNotOptArg):
return self._ApplyTypes_(1610809407, 1, (11, 0), ((16392, 3),), u'DeleteServer', None,ServerName
)
def ModifyServer(self, ServName=defaultNamedNotOptArg, IPAddr=defaultNamedNotOptArg, Username=defaultNamedNotOptArg, Domain=defaultNamedNotOptArg
, Password=defaultNamedNotOptArg):
return self._ApplyTypes_(1610809406, 1, (11, 0), ((16392, 3), (16392, 3), (16392, 3), (16392, 3), (16392, 3)), u'ModifyServer', None,ServName
, IPAddr, Username, Domain, Password)
def Refresh(self):
return self._oleobj_.InvokeTypes(1610809419, LCID, 1, (24, 0), (),)
_prop_map_get_ = {
"ActiveServer": (1745027118, 2, (8, 0), (), "ActiveServer", None),
"AllowDragDrop": (1745027148, 2, (11, 0), (), "AllowDragDrop", None),
"AllowEdit": (1745027119, 2, (11, 0), (), "AllowEdit", None),
"HideDetails": (1745027136, 2, (11, 0), (), "HideDetails", None),
}
_prop_map_put_ = {
"ActiveServer": ((1745027118, LCID, 4, 0),()),
"AllowDragDrop": ((1745027148, LCID, 4, 0),()),
"AllowEdit": ((1745027119, LCID, 4, 0),()),
"HideDetails": ((1745027136, LCID, 4, 0),()),
}
class _TankSelect(DispatchBaseClass):
CLSID = IID('{2303C7E3-BC00-4B81-A550-D258167DC1C0}')
coclass_clsid = IID('{6BCC8D27-0166-441E-9441-8F55DB2779FB}')
def AddTank(self, path=defaultNamedNotOptArg, name=defaultNamedNotOptArg, quite=defaultNamedNotOptArg):
return self._oleobj_.InvokeTypes(1610809446, LCID, 1, (11, 0), ((8, 1), (8, 1), (11, 1)),path
, name, quite)
def Refresh(self):
return self._oleobj_.InvokeTypes(1610809417, LCID, 1, (24, 0), (),)
_prop_map_get_ = {
"ActiveTank": (1745027119, 2, (8, 0), (), "ActiveTank", None),
"AllowDragDrop": (1745027146, 2, (11, 0), (), "AllowDragDrop", None),
"AllowPopup": (1745027124, 2, (11, 0), (), "AllowPopup", None),
"FileDialogInitPath": (1745027149, 2, (8, 0), (), "FileDialogInitPath", None),
"HideDetails": (1745027121, 2, (11, 0), (), "HideDetails", None),
"ShowOnlyPathString": (1745027147, 2, (8, 0), (), "ShowOnlyPathString", None),
"ShowServer": (1745027122, 2, (11, 0), (), "ShowServer", None),
"ShowSpecPathOnly": (1745027148, 2, (11, 0), (), "ShowSpecPathOnly", None),
"ShowTankNew": (1745027175, 2, (11, 0), (), "ShowTankNew", None),
"SingleClickSelect": (1745027120, 2, (11, 0), (), "SingleClickSelect", None),
"UseServer": (1745027123, 2, (8, 0), (), "UseServer", None),
}
_prop_map_put_ = {
"ActiveTank": ((1745027119, LCID, 4, 0),()),
"AllowDragDrop": ((1745027146, LCID, 4, 0),()),
"AllowPopup": ((1745027124, LCID, 4, 0),()),
"FileDialogInitPath": ((1745027149, LCID, 4, 0),()),
"HideDetails": ((1745027121, LCID, 4, 0),()),
"ShowOnlyPathString": ((1745027147, LCID, 4, 0),()),
"ShowServer": ((1745027122, LCID, 4, 0),()),
"ShowSpecPathOnly": ((1745027148, LCID, 4, 0),()),
"ShowTankNew": ((1745027175, LCID, 4, 0),()),
"SingleClickSelect": ((1745027120, LCID, 4, 0),()),
"UseServer": ((1745027123, LCID, 4, 0),()),
}
class _BlockSelect_:
CLSID = CLSID_Sink = IID('{DC769221-9AD4-4CCD-B51A-FEC47ED63458}')
coclass_clsid = IID('{CB81F5AF-7625-4F83-B629-54C37B55A203}')
_public_methods_ = [] # For COM Server support
_dispid_to_func_ = {
2 : "OnBeginDrag",
1 : "OnBlockChanged",
}
def __init__(self, oobj = None):
if oobj is None:
self._olecp = None
else:
import win32com.server.util
from win32com.server.policy import EventHandlerPolicy
cpc=oobj._oleobj_.QueryInterface(pythoncom.IID_IConnectionPointContainer)
cp=cpc.FindConnectionPoint(self.CLSID_Sink)
cookie=cp.Advise(win32com.server.util.wrap(self, usePolicy=EventHandlerPolicy))
self._olecp,self._olecp_cookie = cp,cookie
def __del__(self):
try:
self.close()
except pythoncom.com_error:
pass
def close(self):
if self._olecp is not None:
cp,cookie,self._olecp,self._olecp_cookie = self._olecp,self._olecp_cookie,None,None
cp.Unadvise(cookie)
def _query_interface_(self, iid):
import win32com.server.util
if iid==self.CLSID_Sink: return win32com.server.util.wrap(self)
# Event Handlers
# If you create handlers, they should have the following prototypes:
# def OnBeginDrag(self, TDD=defaultNamedNotOptArg):
# def OnBlockChanged(self, ActBlock=defaultNamedNotOptArg, ActTank=defaultNamedNotOptArg, ActServer=defaultNamedNotOptArg):
class _EventSelect_:
CLSID = CLSID_Sink = IID('{3F098EDA-4EFB-4923-9613-373BF08B3F5C}')
coclass_clsid = IID('{01B10737-93FA-4FB2-B1F1-0C59793EBCAA}')
_public_methods_ = [] # For COM Server support
_dispid_to_func_ = {
2 : "OnBeginDrag",
3 : "OnChangeCheck",
5 : "OnEventDblClicked",
4 : "OnEventClicked",
1 : "OnActEventChanged",
}
def __init__(self, oobj = None):
if oobj is None:
self._olecp = None
else:
import win32com.server.util
from win32com.server.policy import EventHandlerPolicy
cpc=oobj._oleobj_.QueryInterface(pythoncom.IID_IConnectionPointContainer)
cp=cpc.FindConnectionPoint(self.CLSID_Sink)
cookie=cp.Advise(win32com.server.util.wrap(self, usePolicy=EventHandlerPolicy))
self._olecp,self._olecp_cookie = cp,cookie
def __del__(self):
try:
self.close()
except pythoncom.com_error:
pass
def close(self):
if self._olecp is not None:
cp,cookie,self._olecp,self._olecp_cookie = self._olecp,self._olecp_cookie,None,None
cp.Unadvise(cookie)
def _query_interface_(self, iid):
import win32com.server.util
if iid==self.CLSID_Sink: return win32com.server.util.wrap(self)
# Event Handlers
# If you create handlers, they should have the following prototypes:
# def OnBeginDrag(self, TDD=defaultNamedNotOptArg):
# def OnChangeCheck(self, EvName=defaultNamedNotOptArg):
# def OnEventDblClicked(self, EvName=defaultNamedNotOptArg):
# def OnEventClicked(self, EvName=defaultNamedNotOptArg):
# def OnActEventChanged(self, NewActEvent=defaultNamedNotOptArg):
class _ServerSelect_:
CLSID = CLSID_Sink = IID('{75CA8D1D-4078-4EA7-8EC2-E2198C9CFA52}')
coclass_clsid = IID('{A16140DD-AAA9-46C3-9565-6F1E8815D90A}')
_public_methods_ = [] # For COM Server support
_dispid_to_func_ = {
1 : "OnServerChanged",
}
def __init__(self, oobj = None):
if oobj is None:
self._olecp = None
else:
import win32com.server.util
from win32com.server.policy import EventHandlerPolicy
cpc=oobj._oleobj_.QueryInterface(pythoncom.IID_IConnectionPointContainer)
cp=cpc.FindConnectionPoint(self.CLSID_Sink)
cookie=cp.Advise(win32com.server.util.wrap(self, usePolicy=EventHandlerPolicy))
self._olecp,self._olecp_cookie = cp,cookie
def __del__(self):
try:
self.close()
except pythoncom.com_error:
pass
def close(self):
if self._olecp is not None:
cp,cookie,self._olecp,self._olecp_cookie = self._olecp,self._olecp_cookie,None,None
cp.Unadvise(cookie)
def _query_interface_(self, iid):
import win32com.server.util
if iid==self.CLSID_Sink: return win32com.server.util.wrap(self)
# Event Handlers
# If you create handlers, they should have the following prototypes:
# def OnServerChanged(self, NewServer=defaultNamedNotOptArg):
class _TankSelect_:
CLSID = CLSID_Sink = IID('{58277ACF-7979-45F9-BBE7-0FB5D6B416F4}')
coclass_clsid = IID('{6BCC8D27-0166-441E-9441-8F55DB2779FB}')
_public_methods_ = [] # For COM Server support
_dispid_to_func_ = {
1 : "OnTankChanged",
}
def __init__(self, oobj = None):
if oobj is None:
self._olecp = None
else:
import win32com.server.util
from win32com.server.policy import EventHandlerPolicy
cpc=oobj._oleobj_.QueryInterface(pythoncom.IID_IConnectionPointContainer)
cp=cpc.FindConnectionPoint(self.CLSID_Sink)
cookie=cp.Advise(win32com.server.util.wrap(self, usePolicy=EventHandlerPolicy))
self._olecp,self._olecp_cookie = cp,cookie
def __del__(self):
try:
self.close()
except pythoncom.com_error:
pass
def close(self):
if self._olecp is not None:
cp,cookie,self._olecp,self._olecp_cookie = self._olecp,self._olecp_cookie,None,None
cp.Unadvise(cookie)
def _query_interface_(self, iid):
import win32com.server.util
if iid==self.CLSID_Sink: return win32com.server.util.wrap(self)
# Event Handlers
# If you create handlers, they should have the following prototypes:
# def OnTankChanged(self, ActTank=defaultNamedNotOptArg, ActServer=defaultNamedNotOptArg):
from win32com.client import CoClassBaseClass
# This CoClass is known by the name 'TTankInterfaces.BlockSelect'
class BlockSelect(CoClassBaseClass): # A CoClass
CLSID = IID('{CB81F5AF-7625-4F83-B629-54C37B55A203}')
coclass_sources = [
_BlockSelect_,
]
default_source = _BlockSelect_
coclass_interfaces = [
_BlockSelect,
]
default_interface = _BlockSelect
# This CoClass is known by the name 'TTankInterfaces.EventSelect'
class EventSelect(CoClassBaseClass): # A CoClass
CLSID = IID('{01B10737-93FA-4FB2-B1F1-0C59793EBCAA}')
coclass_sources = [
_EventSelect_,
]
default_source = _EventSelect_
coclass_interfaces = [
_EventSelect,
]
default_interface = _EventSelect
class ServSelProps(CoClassBaseClass): # A CoClass
CLSID = IID('{42EDA46E-842E-4131-9C40-1E47D6B8ABB1}')
coclass_sources = [
]
coclass_interfaces = [
_ServSelProps,
]
default_interface = _ServSelProps
# This CoClass is known by the name 'TTankInterfaces.ServerSelect'
class ServerSelect(CoClassBaseClass): # A CoClass
CLSID = IID('{A16140DD-AAA9-46C3-9565-6F1E8815D90A}')
coclass_sources = [
_ServerSelect_,
]
default_source = _ServerSelect_
coclass_interfaces = [
_ServerSelect,
]
default_interface = _ServerSelect
# This CoClass is known by the name 'TTankInterfaces.TankSelect'
class TankSelect(CoClassBaseClass): # A CoClass
CLSID = IID('{6BCC8D27-0166-441E-9441-8F55DB2779FB}')
coclass_sources = [
_TankSelect_,
]
default_source = _TankSelect_
coclass_interfaces = [
_TankSelect,
]
default_interface = _TankSelect
_BlockSelect_vtables_dispatch_ = 1
_BlockSelect_vtables_ = [
(( u'Refresh' , ), 1610809385, (1610809385, (), [ ], 1 , 1 , 4 , 0 , 1956 , (3, 0, None, None) , 0 , )),
(( u'UseServer' , None , ), 1745027112, (1745027112, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 1960 , (3, 0, None, None) , 0 , )),
(( u'UseServer' , None , ), 1745027112, (1745027112, (), [ (8, 1, None, None) , ], | |
<gh_stars>10-100
# coding=utf-8
"""
Module for low level OSM file retrieval.
:copyright: (c) 2013 by <NAME>
:license: GPLv3, see LICENSE for more details.
"""
import hashlib
import time
import os
import re
import sys
import datetime
from subprocess import call
from shutil import copyfile
from reporter.utilities import temp_dir, unique_filename, zip_shp, which
from reporter import config
from reporter import LOGGER
from reporter.queries import (
SQL_QUERY_MAP,
OVERPASS_QUERY_MAP,
OVERPASS_QUERY_MAP_POLYGON
)
from reporter.utilities import (
shapefile_resource_base_path,
overpass_resource_base_path,
generic_shapefile_base_path)
from reporter.exceptions import (
OverpassTimeoutException,
OverpassBadRequestException,
OverpassConcurrentRequestException,
OverpassDoesNotReturnData)
from reporter.metadata import metadata_files
from urllib.request import urlopen
# noinspection PyPep8Naming
from urllib.request import Request
from urllib.parse import quote, urlencode
import requests
# noinspection PyPep8Naming
from urllib.error import HTTPError
query_with_value = (
'('
'way["{key}"~"{value}"]'
'(poly:"{polygon}");'
'relation["{key}"~"{value}"]'
'(poly:"{polygon}");'
');'
'(._;>;);'
'out {print_mode};'
)
def get_osm_file(
coordinates,
feature='all',
overpass_verbosity='body',
date_from=None,
date_to=None,
use_polygon=False):
"""Fetch an osm file given a bounding box using the overpass API.
:param coordinates: Coordinates as a list in the form:
[min lat, min lon, max lat, max lon]
:param feature: The type of feature to extract:
buildings, building-points, roads, potential-idp, boundary-[1,11]
:type feature: str
:param overpass_verbosity: Output verbosity in Overpass.
It can be body, skeleton, ids_only or meta.
:type overpass_verbosity: str
:param date_from: First date for date range.
:type date_from: str
:param date_to: Second date for date range.
:type date_to: str
:param use_polygon: Flag if coordinates is polygon or bbox
:type use_polygon: bool
:returns: A file which has been opened on the retrieved OSM dataset.
:rtype: file
Coordinates look like this:
{'NE_lng': 20.444537401199337,
'SW_lat': -34.0460012312071,
'SW_lng': 20.439494848251343,
'NE_lat': -34.044441058971394}
Example overpass API query for buildings (testable at
http://overpass-turbo.eu/)::
(
node
["building"]
["building"!="no"]
({{bbox}});
way
["building"]
["building"!="no"]
({{bbox}});
rel
["building"]
["building"!="no"]
({{bbox}});
<;);out+meta;
Equivalent url (http encoded)::
"""
server_url = 'http://overpass-api.de/api/interpreter?data='
parameters = dict()
parameters['print_mode'] = overpass_verbosity
if use_polygon:
parameters['polygon'] = coordinates
else:
parameters.update(coordinates)
if '=' in feature:
feature_keys = feature.split('=')
parameters['key'] = feature_keys[0]
parameters['value'] = feature_keys[1].replace(',', '|')
query = query_with_value.format(**parameters)
else:
if use_polygon:
query = OVERPASS_QUERY_MAP_POLYGON[feature].format(**parameters)
else:
query = OVERPASS_QUERY_MAP[feature].format(**parameters)
if date_from and date_to:
try:
datetime_from = datetime.datetime.utcfromtimestamp(
float(date_from) / 1000.)
datetime_to = datetime.datetime.utcfromtimestamp(
float(date_to) / 1000.)
date_format = "%Y-%m-%dT%H:%M:%S.%fZ"
diff_query = '[diff:"{date_from}", "{date_to}"];'.format(
date_from=datetime_from.strftime(date_format),
date_to=datetime_to.strftime(date_format)
)
query = diff_query + query
except ValueError as e:
LOGGER.debug(e)
encoded_query = quote(query)
url_path = '%s%s' % (server_url, encoded_query)
safe_name = hashlib.md5(query.encode('utf-8')).hexdigest() + '.osm'
file_path = os.path.join(config.CACHE_DIR, safe_name)
return load_osm_document(file_path, url_path)
def load_osm_document(file_path, url_path):
"""Load an osm document, refreshing it if the cached copy is stale.
To save bandwidth the file is not downloaded if it is less than 1 hour old.
:type file_path: basestring
:param file_path: The path on the filesystem to which the file should
be saved.
:param url_path: Path (relative to the ftp root) from which the file
should be retrieved.
:type url_path: str
:returns: A file object for the the downloaded file.
:rtype: file
Raises:
None
"""
elapsed_seconds = 0
if os.path.exists(file_path):
current_time = time.time() # in unix epoch
file_time = os.path.getmtime(file_path) # in unix epoch
elapsed_seconds = current_time - file_time
if elapsed_seconds > 3600:
os.remove(file_path)
if elapsed_seconds > 3600 or not os.path.exists(file_path):
fetch_osm(file_path, url_path)
message = ('fetched %s' % file_path)
LOGGER.info(message)
file_handle = open(file_path, 'rb')
return file_handle
def fetch_osm_with_post(file_path, url_path, post_data, returns_format='json'):
"""Fetch an osm map and store locally.
:param url_path: The path (relative to the ftp root) from which the
file should be retrieved.
:type url_path: str
:param file_path: The path on the filesystem to which the file should
be saved.
:type file_path: str
:param post_data: Overpass data
:type post_data: str
:param returns_format: Format of the response, could be json or xml
:type returns_format: str
:returns: The path to the downloaded file.
"""
headers = {'User-Agent': 'HotOSM'}
try:
data = requests.post(
url=url_path,
data={'data': post_data},
headers=headers)
if returns_format != 'xml':
regex = '<remark> runtime error:'
if re.search(regex, data.text):
raise OverpassTimeoutException
regex = '(elements|meta)'
if not re.search(regex, data.text):
raise OverpassDoesNotReturnData
if os.path.exists(file_path):
os.remove(file_path)
file_handle = open(file_path, 'wb')
file_handle.write(data.text.encode('utf-8'))
file_handle.close()
except HTTPError as e:
if e.code == 400:
LOGGER.exception('Bad request to Overpass')
raise OverpassBadRequestException
elif e.code == 419:
raise OverpassConcurrentRequestException
LOGGER.exception('Error with Overpass')
raise e
def fetch_osm(file_path, url_path):
"""Fetch an osm map and store locally.
:param url_path: The path (relative to the ftp root) from which the
file should be retrieved.
:type url_path: str
:param file_path: The path on the filesystem to which the file should
be saved.
:type file_path: str
:returns: The path to the downloaded file.
"""
LOGGER.debug('Getting URL: %s', url_path)
headers = {'User-Agent': 'InaSAFE'}
web_request = Request(url_path, None, headers)
try:
url_handle = urlopen(web_request, timeout=60)
data = url_handle.read().decode('utf-8')
regex = '<remark> runtime error:'
if re.search(regex, data):
raise OverpassTimeoutException
regex = '(elements|meta)'
if not re.search(regex, data):
raise OverpassDoesNotReturnData
if os.path.exists(file_path):
os.remove(file_path)
file_handle = open(file_path, 'wb')
file_handle.write(data.encode('utf-8'))
file_handle.close()
except HTTPError as e:
if e.code == 400:
LOGGER.exception('Bad request to Overpass')
raise OverpassBadRequestException
elif e.code == 419:
raise OverpassConcurrentRequestException
LOGGER.exception('Error with Overpass')
raise e
def add_metadata_timestamp(metadata_file_path):
"""Add the current date / time to the metadata file.
:param metadata_file_path: Metadata file path that the timestamp should be
written to.
:type metadata_file_path: str
"""
time_stamp = time.strftime('%d-%m-%Y %H:%M')
extension = os.path.splitext(metadata_file_path)[1]
if extension == 'keywords':
keyword_file = open(metadata_file_path, 'ab')
content = 'date: %s' % time_stamp
keyword_file.write(content.encode('utf-8'))
keyword_file.close()
else:
# Need to write this section : write date/time in XML file
# {{ datetime }} -> 18-06-2018 03:23
f = open(metadata_file_path, 'r')
file_data = f.read()
f.close()
new_data = file_data.replace('{{ datetime }}', time_stamp)
f = open(metadata_file_path, 'w')
f.write(new_data)
f.close()
def import_and_extract_shapefile(
feature_type,
file_path,
qgis_version=2,
output_prefix='',
inasafe_version=None,
lang='en'):
"""Convert the OSM xml file to a shapefile.
This is a multi-step process:
* Create a temporary postgis database
* Load the osm dataset into POSTGIS with osm2pgsql and our custom
style file.
* Save the data out again to a shapefile
* Zip the shapefile ready for user to download
:param feature_type: The feature to extract.
:type feature_type: str
:param file_path: Path to the OSM file name.
:type file_path: str
:param qgis_version: Get the QGIS version. Currently 1,
2 are accepted, default to 2. A different qml style file will be
returned depending on the version
:type qgis_version: int
:param output_prefix: Base name for the shape file. Defaults to ''
which will result in an output file of feature_type + '.shp'. Adding a
prefix of e.g. 'test-' would result in a downloaded file name of
'test-buildings.shp'. Allowed characters are [a-zA-Z-_0-9].
:type output_prefix: str
:param inasafe_version: The InaSAFE version, to get correct metadata.
:type inasafe_version: str
:param lang: The language desired for the labels in the legend.
Example : 'en', 'fr', etc. Default is 'en'.
:type lang: str
:returns: Path to zipfile that was created.
:rtype: str
"""
if not check_string(output_prefix):
error = 'Invalid output prefix: %s' % output_prefix
LOGGER.exception(error)
raise Exception(error)
output_prefix += feature_type
work_dir = temp_dir(sub_dir=feature_type)
directory_name = unique_filename(dir=work_dir)
db_name = os.path.basename(directory_name)
import_osm_file(db_name, feature_type, file_path)
zip_file = extract_shapefile(
feature_type,
db_name,
directory_name,
qgis_version,
output_prefix,
inasafe_version,
lang)
drop_database(db_name)
return zip_file
def import_osm_file(db_name, feature_type, file_path):
"""Import the OSM xml file into a postgis database.
:param db_name: The database to use.
:type db_name: str
:param feature_type: The feature to import.
:type feature_type: str
:param file_path: Path to the OSM file.
:type file_path: str
"""
overpass_resource_path = overpass_resource_base_path(feature_type)
style_file = '%s.style' % overpass_resource_path
# Used to standarise types while data is in pg still
transform_path = '%s.sql' % overpass_resource_path
createdb_executable = which('createdb')[0]
createdb_command = '%s -T template_postgis %s' % (
createdb_executable, db_name)
osm2pgsql_executable = which('osm2pgsql')[0]
osm2pgsql_options = config.OSM2PGSQL_OPTIONS
osm2pgsql_command = '%s -S %s -d %s %s %s' % (
osm2pgsql_executable,
style_file,
db_name,
osm2pgsql_options,
file_path)
psql_executable = which('psql')[0]
transform_command = '%s %s -f %s' % (
psql_executable, db_name, transform_path)
LOGGER.info(createdb_command)
call(createdb_command, shell=True)
LOGGER.info(osm2pgsql_command)
call(osm2pgsql_command, shell=True)
LOGGER.info(transform_command)
call(transform_command, shell=True)
def drop_database(db_name):
"""Remove a database.
:param db_name: The database
:type db_name: str
"""
dropdb_executable = which('dropdb')[0]
dropdb_command = '%s %s' % (dropdb_executable, db_name)
LOGGER.info(dropdb_command)
call(dropdb_command, shell=True)
def extract_shapefile(
feature_type,
db_name,
directory_name,
qgis_version=2,
output_prefix='',
inasafe_version=None,
lang='en'):
"""Extract a database to a shapefile.
This is a multi-step process:
* Create a temporary postgis database
* Load the osm dataset into POSTGIS with osm2pgsql and our custom
style file.
* Save the data out again to a shapefile
* Zip the shapefile ready for | |
<reponame>maulikjs/hue
from __future__ import absolute_import, unicode_literals
import socket
import sys
from collections import defaultdict
from datetime import datetime, timedelta
import pytest
from case import Mock, call, patch
from kombu import pidbox
from kombu.utils.uuid import uuid
from celery.five import Queue as FastQueue
from celery.utils.collections import AttributeDict
from celery.utils.timer2 import Timer
from celery.worker import WorkController as _WC # noqa
from celery.worker import consumer, control
from celery.worker import state as worker_state
from celery.worker.pidbox import Pidbox, gPidbox
from celery.worker.request import Request
from celery.worker.state import revoked
hostname = socket.gethostname()
class WorkController(object):
autoscaler = None
def stats(self):
return {'total': worker_state.total_count}
class Consumer(consumer.Consumer):
def __init__(self, app):
self.app = app
self.buffer = FastQueue()
self.timer = Timer()
self.event_dispatcher = Mock()
self.controller = WorkController()
self.task_consumer = Mock()
self.prefetch_multiplier = 1
self.initial_prefetch_count = 1
from celery.concurrency.base import BasePool
self.pool = BasePool(10)
self.task_buckets = defaultdict(lambda: None)
self.hub = None
def call_soon(self, p, *args, **kwargs):
return p(*args, **kwargs)
class test_Pidbox:
def test_shutdown(self):
with patch('celery.worker.pidbox.ignore_errors') as eig:
parent = Mock()
pbox = Pidbox(parent)
pbox._close_channel = Mock()
assert pbox.c is parent
pconsumer = pbox.consumer = Mock()
cancel = pconsumer.cancel
pbox.shutdown(parent)
eig.assert_called_with(parent, cancel)
pbox._close_channel.assert_called_with(parent)
class test_Pidbox_green:
def test_stop(self):
parent = Mock()
g = gPidbox(parent)
stopped = g._node_stopped = Mock()
shutdown = g._node_shutdown = Mock()
close_chan = g._close_channel = Mock()
g.stop(parent)
shutdown.set.assert_called_with()
stopped.wait.assert_called_with()
close_chan.assert_called_with(parent)
assert g._node_stopped is None
assert g._node_shutdown is None
close_chan.reset()
g.stop(parent)
close_chan.assert_called_with(parent)
def test_resets(self):
parent = Mock()
g = gPidbox(parent)
g._resets = 100
g.reset()
assert g._resets == 101
def test_loop(self):
parent = Mock()
conn = self.app.connection_for_read()
parent.connection_for_read.return_value = conn
drain = conn.drain_events = Mock()
g = gPidbox(parent)
parent.connection = Mock()
do_reset = g._do_reset = Mock()
call_count = [0]
def se(*args, **kwargs):
if call_count[0] > 2:
g._node_shutdown.set()
g.reset()
call_count[0] += 1
drain.side_effect = se
g.loop(parent)
assert do_reset.call_count == 4
class test_ControlPanel:
def setup(self):
self.panel = self.create_panel(consumer=Consumer(self.app))
@self.app.task(name='c.unittest.mytask', rate_limit=200, shared=False)
def mytask():
pass
self.mytask = mytask
def create_state(self, **kwargs):
kwargs.setdefault('app', self.app)
kwargs.setdefault('hostname', hostname)
kwargs.setdefault('tset', set)
return AttributeDict(kwargs)
def create_panel(self, **kwargs):
return self.app.control.mailbox.Node(
hostname=hostname,
state=self.create_state(**kwargs),
handlers=control.Panel.data,
)
def test_enable_events(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
evd = consumer.event_dispatcher
evd.groups = set()
panel.handle('enable_events')
assert not evd.groups
evd.groups = {'worker'}
panel.handle('enable_events')
assert 'task' in evd.groups
evd.groups = {'task'}
assert 'already enabled' in panel.handle('enable_events')['ok']
def test_disable_events(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
evd = consumer.event_dispatcher
evd.enabled = True
evd.groups = {'task'}
panel.handle('disable_events')
assert 'task' not in evd.groups
assert 'already disabled' in panel.handle('disable_events')['ok']
def test_clock(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
panel.state.app.clock.value = 313
x = panel.handle('clock')
assert x['clock'] == 313
def test_hello(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
panel.state.app.clock.value = 313
panel.state.hostname = '<EMAIL>'
worker_state.revoked.add('revoked1')
try:
assert panel.handle('hello', {
'from_node': '<EMAIL>',
}) is None
x = panel.handle('hello', {
'from_node': '<EMAIL>',
})
assert x['clock'] == 314 # incremented
x = panel.handle('hello', {
'from_node': '<EMAIL>',
'revoked': {'1234', '4567', '891'}
})
assert 'revoked1' in x['revoked']
assert '1234' in x['revoked']
assert '4567' in x['revoked']
assert '891' in x['revoked']
assert x['clock'] == 315 # incremented
finally:
worker_state.revoked.discard('revoked1')
def test_conf(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
panel.app = self.app
panel.app.finalize()
self.app.conf.some_key6 = 'hello world'
x = panel.handle('dump_conf')
assert 'some_key6' in x
def test_election(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
consumer.gossip = Mock()
panel.handle(
'election', {'id': 'id', 'topic': 'topic', 'action': 'action'},
)
consumer.gossip.election.assert_called_with('id', 'topic', 'action')
def test_election__no_gossip(self):
consumer = Mock(name='consumer')
consumer.gossip = None
panel = self.create_panel(consumer=consumer)
panel.handle(
'election', {'id': 'id', 'topic': 'topic', 'action': 'action'},
)
def test_heartbeat(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
event_dispatcher = consumer.event_dispatcher
event_dispatcher.enabled = True
panel.handle('heartbeat')
assert ('worker-heartbeat',) in event_dispatcher.send.call_args
def test_time_limit(self):
panel = self.create_panel(consumer=Mock())
r = panel.handle('time_limit', arguments={
'task_name': self.mytask.name, 'hard': 30, 'soft': 10})
assert self.mytask.time_limit == 30
assert self.mytask.soft_time_limit == 10
assert 'ok' in r
r = panel.handle('time_limit', arguments={
'task_name': self.mytask.name, 'hard': None, 'soft': None})
assert self.mytask.time_limit is None
assert self.mytask.soft_time_limit is None
assert 'ok' in r
r = panel.handle('time_limit', arguments={
'task_name': '248e8afya9s8dh921eh928', 'hard': 30})
assert 'error' in r
def test_active_queues(self):
import kombu
x = kombu.Consumer(self.app.connection_for_read(),
[kombu.Queue('foo', kombu.Exchange('foo'), 'foo'),
kombu.Queue('bar', kombu.Exchange('bar'), 'bar')],
auto_declare=False)
consumer = Mock()
consumer.task_consumer = x
panel = self.create_panel(consumer=consumer)
r = panel.handle('active_queues')
assert list(sorted(q['name'] for q in r)) == ['bar', 'foo']
def test_active_queues__empty(self):
consumer = Mock(name='consumer')
panel = self.create_panel(consumer=consumer)
consumer.task_consumer = None
assert not panel.handle('active_queues')
def test_dump_tasks(self):
info = '\n'.join(self.panel.handle('dump_tasks'))
assert 'mytask' in info
assert 'rate_limit=200' in info
def test_dump_tasks2(self):
prev, control.DEFAULT_TASK_INFO_ITEMS = (
control.DEFAULT_TASK_INFO_ITEMS, [])
try:
info = '\n'.join(self.panel.handle('dump_tasks'))
assert 'mytask' in info
assert 'rate_limit=200' not in info
finally:
control.DEFAULT_TASK_INFO_ITEMS = prev
def test_stats(self):
prev_count, worker_state.total_count = worker_state.total_count, 100
try:
assert self.panel.handle('stats')['total'] == 100
finally:
worker_state.total_count = prev_count
def test_report(self):
self.panel.handle('report')
def test_active(self):
r = Request(
self.TaskMessage(self.mytask.name, 'do re mi'),
app=self.app,
)
worker_state.active_requests.add(r)
try:
assert self.panel.handle('dump_active')
finally:
worker_state.active_requests.discard(r)
def test_pool_grow(self):
class MockPool(object):
def __init__(self, size=1):
self.size = size
def grow(self, n=1):
self.size += n
def shrink(self, n=1):
self.size -= n
@property
def num_processes(self):
return self.size
consumer = Consumer(self.app)
consumer.prefetch_multiplier = 8
consumer.qos = Mock(name='qos')
consumer.pool = MockPool(1)
panel = self.create_panel(consumer=consumer)
panel.handle('pool_grow')
assert consumer.pool.size == 2
consumer.qos.increment_eventually.assert_called_with(8)
assert consumer.initial_prefetch_count == 16
panel.handle('pool_shrink')
assert consumer.pool.size == 1
consumer.qos.decrement_eventually.assert_called_with(8)
assert consumer.initial_prefetch_count == 8
panel.state.consumer = Mock()
panel.state.consumer.controller = Mock()
sc = panel.state.consumer.controller.autoscaler = Mock()
panel.handle('pool_grow')
sc.force_scale_up.assert_called()
panel.handle('pool_shrink')
sc.force_scale_down.assert_called()
def test_add__cancel_consumer(self):
class MockConsumer(object):
queues = []
canceled = []
consuming = False
hub = Mock(name='hub')
def add_queue(self, queue):
self.queues.append(queue.name)
def consume(self):
self.consuming = True
def cancel_by_queue(self, queue):
self.canceled.append(queue)
def consuming_from(self, queue):
return queue in self.queues
consumer = Consumer(self.app)
consumer.task_consumer = MockConsumer()
panel = self.create_panel(consumer=consumer)
panel.handle('add_consumer', {'queue': 'MyQueue'})
assert 'MyQueue' in consumer.task_consumer.queues
assert consumer.task_consumer.consuming
panel.handle('add_consumer', {'queue': 'MyQueue'})
panel.handle('cancel_consumer', {'queue': 'MyQueue'})
assert 'MyQueue' in consumer.task_consumer.canceled
def test_revoked(self):
worker_state.revoked.clear()
worker_state.revoked.add('a1')
worker_state.revoked.add('a2')
try:
assert sorted(self.panel.handle('dump_revoked')) == ['a1', 'a2']
finally:
worker_state.revoked.clear()
def test_dump_schedule(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
assert not panel.handle('dump_schedule')
r = Request(
self.TaskMessage(self.mytask.name, 'CAFEBABE'),
app=self.app,
)
consumer.timer.schedule.enter_at(
consumer.timer.Entry(lambda x: x, (r,)),
datetime.now() + timedelta(seconds=10))
consumer.timer.schedule.enter_at(
consumer.timer.Entry(lambda x: x, (object(),)),
datetime.now() + timedelta(seconds=10))
assert panel.handle('dump_schedule')
def test_dump_reserved(self):
consumer = Consumer(self.app)
req = Request(
self.TaskMessage(self.mytask.name, args=(2, 2)), app=self.app,
) # ^ need to keep reference for reserved_tasks WeakSet.
worker_state.task_reserved(req)
try:
panel = self.create_panel(consumer=consumer)
response = panel.handle('dump_reserved', {'safe': True})
assert response[0]['name'] == self.mytask.name
assert response[0]['hostname'] == socket.gethostname()
worker_state.reserved_requests.clear()
assert not panel.handle('dump_reserved')
finally:
worker_state.reserved_requests.clear()
def test_rate_limit_invalid_rate_limit_string(self):
e = self.panel.handle('rate_limit', arguments={
'task_name': 'tasks.add', 'rate_limit': 'x1240301#%!'})
assert 'Invalid rate limit string' in e.get('error')
def test_rate_limit(self):
class xConsumer(object):
reset = False
def reset_rate_limits(self):
self.reset = True
consumer = xConsumer()
panel = self.create_panel(app=self.app, consumer=consumer)
task = self.app.tasks[self.mytask.name]
panel.handle('rate_limit', arguments={'task_name': task.name,
'rate_limit': '100/m'})
assert task.rate_limit == '100/m'
assert consumer.reset
consumer.reset = False
panel.handle('rate_limit', arguments={
'task_name': task.name,
'rate_limit': 0,
})
assert task.rate_limit == 0
assert consumer.reset
def test_rate_limit_nonexistant_task(self):
self.panel.handle('rate_limit', arguments={
'task_name': 'xxxx.does.not.exist',
'rate_limit': '1000/s'})
def test_unexposed_command(self):
with pytest.raises(KeyError):
self.panel.handle('foo', arguments={})
def test_revoke_with_name(self):
tid = uuid()
m = {
'method': 'revoke',
'destination': hostname,
'arguments': {
'task_id': tid,
'task_name': self.mytask.name,
},
}
self.panel.handle_message(m, None)
assert tid in revoked
def test_revoke_with_name_not_in_registry(self):
tid = uuid()
m = {
'method': 'revoke',
'destination': hostname,
'arguments': {
'task_id': tid,
'task_name': 'xxxxxxxxx33333333388888',
},
}
self.panel.handle_message(m, None)
assert tid in revoked
def test_revoke(self):
tid = uuid()
m = {
'method': 'revoke',
'destination': hostname,
'arguments': {
'task_id': tid,
},
}
self.panel.handle_message(m, None)
assert tid in revoked
m = {
'method': 'revoke',
'destination': 'does.not.exist',
'arguments': {
'task_id': tid + 'xxx',
},
}
self.panel.handle_message(m, None)
assert tid + 'xxx' not in revoked
def test_revoke_terminate(self):
request = Mock()
request.id = tid = uuid()
state = self.create_state()
state.consumer = Mock()
worker_state.task_reserved(request)
try:
r = control.revoke(state, tid, terminate=True)
assert tid in revoked
assert request.terminate.call_count
assert 'terminate:' in r['ok']
# unknown task id only revokes
r = control.revoke(state, uuid(), terminate=True)
assert 'tasks unknown' in r['ok']
finally:
worker_state.task_ready(request)
def test_autoscale(self):
self.panel.state.consumer = Mock()
self.panel.state.consumer.controller = Mock()
sc = self.panel.state.consumer.controller.autoscaler = Mock()
sc.update.return_value = 10, 2
m = {'method': 'autoscale',
'destination': hostname,
'arguments': {'max': '10', 'min': '2'}}
r = self.panel.handle_message(m, None)
assert 'ok' in r
self.panel.state.consumer.controller.autoscaler = None
r = self.panel.handle_message(m, None)
assert 'error' in r
def test_ping(self):
m = {'method': 'ping',
'destination': hostname}
r = self.panel.handle_message(m, None)
assert r == {'ok': 'pong'}
def test_shutdown(self):
m = {'method': 'shutdown',
'destination': hostname}
with pytest.raises(SystemExit):
self.panel.handle_message(m, None)
def test_panel_reply(self):
replies = []
class _Node(pidbox.Node):
def reply(self, data, exchange, routing_key, **kwargs):
replies.append(data)
panel = _Node(
hostname=hostname,
state=self.create_state(consumer=Consumer(self.app)),
handlers=control.Panel.data,
| |
<filename>main_pba.py<gh_stars>0
import pba
from datetime import datetime
from dateutil.relativedelta import relativedelta
import os
import matplotlib.pyplot as plt
import math
import pba
import numpy as np
#from pbox import pnt
#from pbox import rng
from math import pi
from vf_overview import truncate
from vf_overview import build_dataframe
from vf_overview import build_financial_summary
from vf_overview import calc_adjusted_yield
from vf_overview import calc_adjustment_factors
from vf_overview import calc_avg_photoperiod
from vf_overview import calc_best_yield
from vf_overview import calc_capex
from vf_overview import calc_crop_productivity_metrics
from vf_overview import calc_depreciation
from vf_overview import calc_direct_labour
from vf_overview import calc_distribution
from vf_overview import calc_education_rev
from vf_overview import calc_electricity
from vf_overview import calc_grants_rev
from vf_overview import calc_growing_media
from vf_overview import calc_hospitality_rev
from vf_overview import calc_insurance
from vf_overview import calc_loan_repayments
from vf_overview import calc_nutrients_and_num_plants
from vf_overview import calc_other_costs
from vf_overview import calc_packaging
from vf_overview import calc_payback_period
from vf_overview import calc_produce_sales
from vf_overview import calc_productivity_metrics
from vf_overview import calc_rent
from vf_overview import calc_roi
from vf_overview import calc_financial_balance
from vf_overview import calc_salaries
from vf_overview import calc_tourism_rev
from vf_overview import calc_vadded_sales
from vf_overview import calc_waste_adjusted_yield
from vf_overview import calc_water
from vf_overview import cogs_to_df
from vf_overview import crop_and_revenue_to_df
from vf_overview import export_results
from vf_overview import extra_to_df
from vf_overview import get_calendar
from vf_overview import get_gp
from vf_overview import get_scenario
from vf_overview import get_staff_list
from vf_overview import get_currency
from vf_overview import opex_to_df
from vf_overview import productivity_targets
from vf_overview import plot_radar_chart
from vf_equipment import Lights
from vf_equipment import get_lights
from pba_plot import pltem
from pba_plot import pltem_med
from pba_plot import med_pbox
from pba_plot import cut
from pba_plot import median
from pba_plot import minmaxmean
from pba_plot import minmaxmedian
from risk_pba import build_risk_assessment_counter
from risk_pba import build_risk_curves
from risk_pba import build_risk_dataframe
from risk_pba import calc_customer_withdrawal
from risk_pba import calc_improved_light_efficiency
from risk_pba import calc_pathogen_outbreak
from risk_pba import calc_repairs
from risk_pba import calc_pest_outbreak
from risk_pba import calc_planning_delay
from risk_pba import calc_power_outage
from risk_pba import competitors_risk
from risk_pba import labour_challenges
from risk_pba import build_counter
from risk_pba import calc_percent_annual_decline
from risk_pba import calc_probability_of_decline
from risk_pba import decline_data
from risk_pba import cdf_bankruptcy_counter
from risk_pba import build_bankruptcy_definition
from risk_pba import threshold_probability
from risk_pba import probability_df
from risk_pba import reduced_product_quality
from risk_pba import pba_risk_assessment
from risk_pba import risk_assessment_probability
from risk_pba import improved_labour_efficiency
from risk_pba import labour_challenges
from risk_pba import play_risks
from risk_pba import play_opportunities
cwd = os.getcwd() # Get the current working directory (cwd)
files = os.listdir(cwd) # Get all the files in that directory
filename = './Current_Financial_Model.xlsx'
years = 15 # Time series length !!UP TO 20!!
simulations = 1
p_box = "yes"
percent_list = []
repairs = []
#Scenario
scenario = get_scenario()
light = get_lights(scenario.light_system)
scenario.currency = get_currency(scenario)
ceo, headgrower, marketer, scientist, sales_person, manager, delivery, farmhand, admin, part_time = get_staff_list(scenario)
end_date, timeseries_monthly, timeseries_yearly = get_calendar(scenario.start_date, years)
growth_plan = get_gp(scenario)
staff_list = get_staff_list(scenario)
capex_pilot, capex_full = calc_capex(scenario, growth_plan)
risk_counter = build_risk_assessment_counter(years)
pes_counter = build_risk_assessment_counter(years)
trad_counter = build_risk_assessment_counter(years)
cdf_counter = build_risk_assessment_counter(years)
cdf_pes_counter = build_risk_assessment_counter(years)
cdf_trad_counter = build_risk_assessment_counter(years)
threshold_counter = build_counter(thresholds=11)
"""INPUTS OVERWRITTEN WITH UNCERTAINTY"""
HVAC_multiplier = 1
scenario.electricity_price = minmaxmean(0.0734, 0.1079, 0.09065)
#part_time.hours_full = part_time.hours * pba.Pbox(pba.I(scenario.growing_area_mulitplier*0.6, scenario.growing_area_mulitplier*1))
#scenario.education_multiplier = 1.05
#scenario.vadded_products_multiplier = pba.norm(scenario.vadded_products_multiplier, (scenario.vadded_products_multiplier*1.5 - scenario.vadded_products_multiplier*0.5)/4)
scenario.education_multiplier = minmaxmean(scenario.education_multiplier*0.9, scenario.education_multiplier*1.1, scenario.education_multiplier)
#scenario.tourism_multiplier =
#scenario.hospitality_multiplier =
#scenario.labour_improvement = minmaxmean(scenario.labour_improvement*0.85, scenario.labour_improvement*1.15, scenario.labour_improvement)
light_improvement = minmaxmean(0.6, 0.8, 0.7)
water_use = pba.mmms(1325, 8325, 3730, 2039)
#scenario.monthly_distribution_y2 = pba.Pbox(pba.I(scenario.monthly_distribution_y1,scenario.monthly_distribution_y2))
#scenario.crop_parameters[0].price1 = pba.Pbox(pba.I(0,30))#THIS INTERVAL SHOULD AFFECT RISK GRAPH DRASICTLLY
#scenario.other_costs_full = pba.Pbox(pba.I(scenario.other_costs_full*0.8,scenario.other_costs_full*1.2))
"""END"""
"""INPUTS CHANGED FOR AFTER INTERVENTIONS"""
"""END"""
print(growth_plan.stacked_growing_area_full)
# byield_crop1, byield_crop2, byield_crop3, byield_crop4 = calc_best_yield(scenario, growth_plan, lettuce_fu_mix, basil_lemon, basil_genovese, none, years)
crop_yields = calc_best_yield(scenario, growth_plan, years, p_box)
light_factor, temp_factor, nutrient_factor, co2_factor = calc_adjustment_factors(scenario, p_box)
adjusted_yields = calc_adjusted_yield(crop_yields, light_factor, temp_factor, nutrient_factor, co2_factor)
waste_adjusted_yields = calc_waste_adjusted_yield(scenario, adjusted_yields, years, p_box)
crop_sales, total_sales = calc_produce_sales(waste_adjusted_yields, scenario)
vadded_sales = calc_vadded_sales(scenario, years)
education_rev = calc_education_rev(scenario, years)
tourism_rev = calc_tourism_rev(scenario, years)
hospitality_rev = calc_hospitality_rev(scenario, years)
grants_rev = calc_grants_rev(years, scenario)
cogs_labour, direct_labour = calc_direct_labour(farmhand, delivery, part_time, years, scenario)
cogs_media = calc_growing_media(scenario, total_sales, adjusted_yields)
cogs_packaging = calc_packaging(scenario, years, waste_adjusted_yields)
cogs_seeds_nutrients, nutrient_consumption, total_no_of_plants = calc_nutrients_and_num_plants(scenario, cogs_media, adjusted_yields, years)
avg_photoperiod = calc_avg_photoperiod(scenario)
cogs_electricity, electricity_consumption = calc_electricity(scenario, growth_plan, avg_photoperiod, light, years, HVAC_multiplier)
cogs_water, water_consumption = calc_water(scenario, years, waste_adjusted_yields, water_use)
opex_rent = calc_rent(scenario, years)
opex_salaries = calc_salaries(ceo, scientist, marketer, admin, manager, headgrower, sales_person, years)
opex_other_costs = calc_other_costs(scenario, opex_salaries, repairs, years)
opex_insurance = calc_insurance(scenario, years)
opex_distribution = calc_distribution(scenario, years)
loan_repayments, loan_balance = calc_loan_repayments(scenario, years)
depreciation, life_span = calc_depreciation(scenario, light, avg_photoperiod, years)
# Constructing Financial Overview Data Frame
financial_annual_overview, financial_monthly_overview = build_dataframe(timeseries_yearly, timeseries_monthly)
financial_annual_overview = crop_and_revenue_to_df(financial_annual_overview, waste_adjusted_yields, total_sales, vadded_sales, education_rev, tourism_rev, hospitality_rev, grants_rev)
financial_annual_overview = cogs_to_df(financial_annual_overview, cogs_labour, cogs_media, cogs_packaging, cogs_seeds_nutrients, cogs_electricity, cogs_water)
financial_annual_overview = opex_to_df(financial_annual_overview, opex_rent, opex_salaries, opex_other_costs, opex_insurance, opex_distribution)
financial_annual_overview = extra_to_df(financial_annual_overview, loan_repayments, loan_balance, scenario, depreciation)
roi = calc_roi(scenario, financial_annual_overview, years)
financial_annual_overview.loc['Return on Investment'] = roi
# BROKEN HERE - FIX ME
investment_balance, payback_period = calc_payback_period(scenario, financial_annual_overview, years, p_box)
financial_annual_overview, financial_balance = calc_financial_balance(financial_annual_overview, scenario, years, p_box)
# BROKEN HERE - FIX ME
# financial_summary = build_financial_summary(financial_annual_overview, investment_balance, roi, timeseries_yearly)
# Productivity Metrics
#productivity_metrics = calc_productivity_metrics(scenario, timeseries_yearly, waste_adjusted_yields, electricity_consumption, direct_labour, water_consumption, staff_list, nutrient_consumption, total_no_of_plants)
#crop_productivity_metrics = calc_crop_productivity_metrics(productivity_metrics, growth_plan, scenario)
#productivity_targets = productivity_targets(crop_productivity_metrics, scenario)
# Where it gets risky
critical_risk, substantial_risk, moderate_risk = build_risk_curves(years)
bankruptcy_definition, balance_threshold = build_bankruptcy_definition(years)
risk_dataframe = build_risk_dataframe(financial_annual_overview)
# Financial Overview
fig1, ax1 = plt.subplots() # ROI
fig1, ax2 = plt.subplots() # Risk Assessment Curve - ROI + Balance
fig1, ax3 = plt.subplots() # Risk Assessment Curve - ROI or Balance
fig1, ax4 = plt.subplots() # Risk Assessment Curve - Balance
fig1, ax5 = plt.subplots() # Threshold Curve
fig1, ax6 = plt.subplots() # Financial Balance
fig1, ax7 = plt.subplots() # Percent change curve
fig1, ax8 = plt.subplots() # Gross Margins
fig1, ax9 = plt.subplots() # Revenue
fig1, ax10 = plt.subplots() # COGS
fig1, ax11 = plt.subplots() # OPEX
fig1, ax12 = plt.subplots() # Net Profit
fig1, ax13 = plt.subplots() # Yield
# BUILD THE RISK DATAFRAME
risk_dataframe = build_risk_dataframe(financial_annual_overview)
'''RISKS START OCCURING HERE - NEED TO BE ADJUSTED FOR PBA'''
# Pathogen Outbreak
w_risks = calc_pathogen_outbreak(scenario, years, waste_adjusted_yields, p_box)
# Pest Outbreak
# w_risks = calc_pest_outbreak(scenario, years, w_risks)
# Power Outage
#w_risks = calc_power_outage(scenario, years, w_risks)
# Sales Risk
#_, total_sales_risk = calc_produce_sales(w_risks, scenario)
# Customer Withdrawal
#total_sales_risk = calc_customer_withdrawal(scenario, years, total_sales_risk)
# Repair Risk
#repairs = calc_repairs(scenario, years)
#opex_other_costs = calc_other_costs(scenario, opex_salaries, repairs, years)
# Labour Risks
#total_sales_risk, cogs_labour = labour_challenges(scenario, years, total_sales_risk, cogs_labour)
"""THIS EXECUTES THE RISKS - PICK THE SELECTED AS TRUE"""
waste_adjusted_yields_risks, total_sales_risk, opex_other_costs, cogs_labour, risk_dataframe = play_risks(pathogen_outbreak=True, pest_outbreak=True, power_outage=True, customer_withdrawal=False, repair_risk=True,labour_problems=False, planning_delay=False,
scenario=scenario, years=years, waste_adjusted_yields=waste_adjusted_yields, p_box=p_box, opex_salaries=opex_salaries, cogs_labour=cogs_labour, risk_dataframe=risk_dataframe, timeseries_yearly=timeseries_yearly)
# opex_other_costs = calc_other_costs(scenario, opex_salaries, repairs, years)
# Electricity efficiency improvements
"""THIS EXECUTES THE OPPORTUNITIES - PICK THE SELECTED AS TRUE"""
cogs_electricity, electricity_consumption = play_opportunities(light_efficiency=True, labour_efficiency=False,
scenario=scenario, years=years, growth_plan=growth_plan, avg_photoperiod=avg_photoperiod, light=light, life_span=life_span, electricity_consumption=electricity_consumption, HVAC_multiplier=HVAC_multiplier, light_improvement=light_improvement, cogs_electricity=cogs_electricity)
#cogs_electricity, electricity_consumption = calc_improved_light_efficiency(scenario, years, growth_plan, avg_photoperiod, light, life_span, electricity_consumption, HVAC_multiplier, light_improvement)
'''Recomposing Dataframe'''
risk_dataframe = crop_and_revenue_to_df(risk_dataframe, waste_adjusted_yields_risks, total_sales_risk, vadded_sales, education_rev, tourism_rev, hospitality_rev, grants_rev)
risk_dataframe = cogs_to_df(risk_dataframe, cogs_labour, cogs_media, cogs_packaging,
cogs_seeds_nutrients, cogs_electricity, cogs_water)
risk_dataframe = opex_to_df(risk_dataframe, opex_rent, opex_salaries, opex_other_costs,
opex_insurance, opex_distribution)
# PLANNING DELAY
#risk_dataframe = calc_planning_delay(risk_dataframe, timeseries_yearly, years, scenario)
# REBALANCE DATAFRAME
risk_dataframe = extra_to_df(risk_dataframe, loan_repayments, loan_balance, scenario,
depreciation)
# ROI
roi_risk = calc_roi(scenario, risk_dataframe, years)
risk_dataframe.loc['Return on Investment'] = roi_risk
risk_dataframe, risk_financial_balance = calc_financial_balance(risk_dataframe, scenario, years, p_box)
risk_counter, pes_counter, trad_counter = pba_risk_assessment(roi_risk, risk_financial_balance, bankruptcy_definition, balance_threshold, years, risk_counter, pes_counter, trad_counter, p_box)
risk_summary = build_financial_summary(risk_dataframe, investment_balance,roi_risk, timeseries_yearly)
# Rate of Decline
#percent_list = calc_percent_annual_decline(risk_dataframe) # FIX ME
# CDF Bankruptcy
cdf_counter, cdf_pes_counter, cdf_trad_counter = cdf_bankruptcy_counter(bankruptcy_definition, cdf_counter, cdf_pes_counter, cdf_trad_counter, roi_risk, risk_financial_balance, years, balance_threshold, p_box)
# CDF Threshold
threshold_counter, thresholds_axis = threshold_probability(threshold_counter, roi, risk_financial_balance, bankruptcy_definition, balance_threshold)
# COMMENT OUT IF NOT INTERESTED IN RISK PLOTS
#fig1, ax2.plot(risk_dataframe.columns, roi_risk)
#fig1, ax3.plot(risk_dataframe.columns, risk_dataframe.loc['Revenue - Crop Sales'])
#fig1, ax5.plot(risk_dataframe.columns, risk_dataframe.loc['Financial Balance'])
# FIRST PASSAGE TIME
first_passage_df = risk_assessment_probability(cdf_counter, years, simulations, timeseries_yearly, p_box)
first_passage_pes_df = risk_assessment_probability(cdf_pes_counter, years, simulations, timeseries_yearly, p_box)
first_passage_trad_df = risk_assessment_probability(cdf_trad_counter, years, simulations, timeseries_yearly, p_box)
# BANKRUPTCY WITH RECOVERY
risk_assessment_probability_df = risk_assessment_probability(risk_counter, years, simulations, timeseries_yearly, p_box)
risk_assessment_pes_df = risk_assessment_probability(pes_counter, years, simulations, timeseries_yearly, p_box)
risk_assessment_trad_df = risk_assessment_probability(trad_counter, years, simulations, timeseries_yearly, p_box)
percent_df = calc_probability_of_decline(percent_list, simulations) #FIX ME
threshold_df = probability_df(threshold_counter, simulations, thresholds_axis)
'''HERE ARE THE PLOTS!'''
# THIS GRAPH IS TOO CLUTTERED WITH PROBABILITY BOUNDS
#pltem(ax1, financial_annual_overview.columns, financial_annual_overview.loc['Total Revenue'], label='Revenue')
#pltem(ax1, financial_annual_overview.columns, financial_annual_overview.loc['Total COGS'])
#ax1.plot(financial_annual_overview.columns, financial_annual_overview.loc['Total OPEX'], label='OPEX')
#pltem(ax1, financial_annual_overview.columns, financial_annual_overview.loc['Gross Profit'])
#pltem(ax1, financial_annual_overview.columns, financial_annual_overview.loc['Net Profit'])
#pltem(ax6, financial_annual_overview.columns, financial_annual_overview.loc['Total OPEX'])
#ax1.plot(financial_annual_overview.columns, financial_annual_overview.loc['Total OPEX'], label='OPEX')
#ax1.set_xlabel('Year')
#ax1.set_ylabel('Finance (£)')
#ax1.set_title('Annual Financial Overview')
#ax1.legend()
#
SMALL = 8
MEDIUM = 10
LARGE = 12
VLARGE = 20
xticks = 4
yticks = 3
#fig, axes = plt.subplots(nrows=10, ncols=1)
#plt.setp(ax1, xtickslabels =xticks, ytickslabels=yticks)
#plt.locator_params(axis="x", nbins=4)
#plt.locator_params(axis="y", nbins=3)
#plt.rc('xtick', labelsize=MEDIUM)
#plt.rc('ytick', labelsize=MEDIUM)
# ROI GRAPH
pltem_med(ax1, risk_dataframe.columns, roi_risk, shade=True)
pltem(ax1, risk_dataframe.columns, roi_risk, label = '', shade=True)
ax1.plot(timeseries_yearly, bankruptcy_definition, label = 'Threshold', color='k', linestyle='dashed')
ax1.set_xlim(timeseries_yearly[1], timeseries_yearly[-1])
ax1.set_xlabel('Year', fontsize=MEDIUM)
ax1.set_ylabel('ROI (%)', fontsize = MEDIUM)
ax1.set_title('Return on Investment for Farm Lifetime', fontsize=LARGE)
ax1.legend()
ax1.grid()
# Financial Balance Graph
balance_threshold_p = [balance_threshold for i in range(years+1)]
pltem_med(ax2, risk_dataframe.columns, risk_financial_balance, shade=True)
pltem(ax2, risk_dataframe.columns, risk_financial_balance, label='', shade=True)
ax2.plot(timeseries_yearly, balance_threshold_p, label='Threshold', color='k', linestyle='dashed')
ax2.set_xlabel('Year')
ax2.set_ylabel('Financial Balance ({})'.format(scenario.currency))
ax2.set_title('Financial Balance for Farm Lifetime')
ax2.legend()
ax2.grid()
ax3.plot(timeseries_yearly, critical_risk, linestyle='dashed', color='k', lw=1) #label='Critical')
ax3.plot(timeseries_yearly, moderate_risk, linestyle='dashed', color='k', lw=1) #label='Moderate')
ax3.plot(timeseries_yearly, substantial_risk, linestyle='dashed', color='k', lw=1) #label='Substantial')
pltem(ax3, risk_assessment_probability_df.columns, risk_assessment_probability_df.loc['cdf'], label='', shade=True) # ROI AND FINANCIAL BALANCE
ax3.legend()
ax3.set_xlim(timeseries_yearly[0], timeseries_yearly[-1])
ax3.set_ylim(0, 1)
ax3.set_ylabel('Probability of Bankruptcy')
ax3.set_xlabel('Year')
ax3.set_title('Risk Assessment for ROI and Balance')
ax3.text(timeseries_yearly[1], 0.8, 'Critical')
ax3.text(timeseries_yearly[years-10], 0.6, 'Substantial')
ax3.text(timeseries_yearly[years-5], 0.3, 'Moderate')
ax3.text(timeseries_yearly[years-3], 0.05, 'Safe')
ax4.plot(timeseries_yearly, critical_risk, linestyle='dashed', color='k', lw=1) #label='Critical')
ax4.plot(timeseries_yearly, moderate_risk, linestyle='dashed', color='k', lw=1) #label='Moderate')
ax4.plot(timeseries_yearly, substantial_risk, linestyle='dashed', color='k', lw=1) #label='Substantial')
pltem(ax4, risk_assessment_pes_df.columns, risk_assessment_pes_df.loc['cdf'], label='', shade=True) # ROI OR FINANCIAL BALANCE
ax4.legend()
ax4.set_xlim(timeseries_yearly[0], timeseries_yearly[-1])
ax4.set_ylim(0, 1)
ax4.set_ylabel('Probability of Bankruptcy')
ax4.set_xlabel('Year')
ax4.set_title('Risk Assessment for ROI or Balance')
ax4.text(timeseries_yearly[1], 0.8, 'Critical')
ax4.text(timeseries_yearly[years-10], 0.6, 'Substantial')
ax4.text(timeseries_yearly[years-5], 0.3, 'Moderate')
ax4.text(timeseries_yearly[years-3], 0.05, 'Safe')
ax5.plot(timeseries_yearly, critical_risk, linestyle='dashed', color='k', lw=1) #label='Critical')
ax5.plot(timeseries_yearly, moderate_risk, linestyle='dashed', color='k', lw=1) #label='Moderate')
ax5.plot(timeseries_yearly, substantial_risk, linestyle='dashed', color='k', lw=1) #label='Substantial')
pltem(ax5, risk_assessment_trad_df.columns, risk_assessment_trad_df.loc['cdf'], label='', shade=True) # JUST FINANCIAL BALANCE
ax5.legend()
ax5.set_xlim(timeseries_yearly[0], timeseries_yearly[-1])
ax5.set_ylim(0, 1)
ax5.set_ylabel('Probability of Bankruptcy')
ax5.set_xlabel('Year')
ax5.set_title('Risk Assessment for Negative Cashflow')
ax5.text(timeseries_yearly[1], 0.8, 'Critical')
ax5.text(timeseries_yearly[years-10], 0.6, 'Substantial')
ax5.text(timeseries_yearly[years-5], 0.3, 'Moderate')
ax5.text(timeseries_yearly[years-3], 0.05, 'Safe')
# pltem(ax6, threshold_df.columns, threshold_df.loc['cdf'], label='CDF', shade=True)
# ax6.set_xlabel('Threshold | |
name = "com_google_cloud_go_firestore",
importpath = "cloud.google.com/go/firestore",
sum = "h1:9x7Bx0A9R5/M9jibeJeZWqjeVEIxYW9fZYqB9a70/bY=",
version = "v1.1.0",
)
go_repository(
name = "com_google_cloud_go_pubsub",
importpath = "cloud.google.com/go/pubsub",
sum = "h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU=",
version = "v1.3.1",
)
go_repository(
name = "com_google_cloud_go_storage",
importpath = "cloud.google.com/go/storage",
sum = "h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA=",
version = "v1.10.0",
)
go_repository(
name = "com_shuralyov_dmitri_gpu_mtl",
importpath = "dmitri.shuralyov.com/gpu/mtl",
sum = "h1:+PdD6GLKejR9DizMAKT5DpSAkKswvZrurk1/eEt9+pw=",
version = "v0.0.0-20201218220906-28db891af037",
)
go_repository(
name = "in_gopkg_airbrake_gobrake_v2",
importpath = "gopkg.in/airbrake/gobrake.v2",
sum = "h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=",
version = "v2.0.9",
)
go_repository(
name = "in_gopkg_alecthomas_kingpin_v2",
importpath = "gopkg.in/alecthomas/kingpin.v2",
sum = "h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=",
version = "v2.2.6",
)
go_repository(
name = "in_gopkg_check_v1",
importpath = "gopkg.in/check.v1",
sum = "h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=",
version = "v1.0.0-20200227125254-8fa46927fb4f",
)
go_repository(
name = "in_gopkg_cheggaaa_pb_v1",
importpath = "gopkg.in/cheggaaa/pb.v1",
sum = "h1:Ev7yu1/f6+d+b3pi5vPdRPc6nNtP1umSfcWiEfRqv6I=",
version = "v1.0.25",
)
go_repository(
name = "in_gopkg_errgo_v2",
importpath = "gopkg.in/errgo.v2",
sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=",
version = "v2.1.0",
)
go_repository(
name = "in_gopkg_fsnotify_v1",
importpath = "gopkg.in/fsnotify.v1",
sum = "h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=",
version = "v1.4.7",
)
go_repository(
name = "in_gopkg_gemnasium_logrus_airbrake_hook_v2",
importpath = "gopkg.in/gemnasium/logrus-airbrake-hook.v2",
sum = "h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=",
version = "v2.1.2",
)
go_repository(
name = "in_gopkg_go_playground_assert_v1",
importpath = "gopkg.in/go-playground/assert.v1",
sum = "h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=",
version = "v1.2.1",
)
go_repository(
name = "in_gopkg_go_playground_validator_v8",
importpath = "gopkg.in/go-playground/validator.v8",
sum = "h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ=",
version = "v8.18.2",
)
go_repository(
name = "in_gopkg_inf_v0",
importpath = "gopkg.in/inf.v0",
sum = "h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=",
version = "v0.9.1",
)
go_repository(
name = "in_gopkg_ini_v1",
importpath = "gopkg.in/ini.v1",
sum = "h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=",
version = "v1.62.0",
)
go_repository(
name = "in_gopkg_mgo_v2",
importpath = "gopkg.in/mgo.v2",
sum = "h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=",
version = "v2.0.0-20180705113604-9856a29383ce",
)
go_repository(
name = "in_gopkg_natefinch_lumberjack_v2",
importpath = "gopkg.in/natefinch/lumberjack.v2",
sum = "h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=",
version = "v2.0.0",
)
go_repository(
name = "in_gopkg_resty_v1",
importpath = "gopkg.in/resty.v1",
sum = "h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=",
version = "v1.12.0",
)
go_repository(
name = "in_gopkg_segmentio_analytics_go_v3",
importpath = "gopkg.in/segmentio/analytics-go.v3",
sum = "h1:UzxH1uaGZRpMKDhJyBz0pexz6yUoBU3x8bJsRk/HV6U=",
version = "v3.1.0",
)
go_repository(
name = "in_gopkg_square_go_jose_v2",
importpath = "gopkg.in/square/go-jose.v2",
sum = "h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=",
version = "v2.5.1",
)
go_repository(
name = "in_gopkg_src_d_go_billy_v4",
importpath = "gopkg.in/src-d/go-billy.v4",
sum = "h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg=",
version = "v4.3.2",
)
go_repository(
name = "in_gopkg_src_d_go_git_fixtures_v3",
importpath = "gopkg.in/src-d/go-git-fixtures.v3",
sum = "h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=",
version = "v3.5.0",
)
go_repository(
name = "in_gopkg_src_d_go_git_v4",
importpath = "gopkg.in/src-d/go-git.v4",
sum = "h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE=",
version = "v4.13.1",
)
go_repository(
name = "in_gopkg_tomb_v1",
importpath = "gopkg.in/tomb.v1",
sum = "h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=",
version = "v1.0.0-20141024135613-dd632973f1e7",
)
go_repository(
name = "in_gopkg_warnings_v0",
importpath = "gopkg.in/warnings.v0",
sum = "h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=",
version = "v0.1.2",
)
go_repository(
name = "in_gopkg_yaml_v2",
importpath = "gopkg.in/yaml.v2",
replace = "gopkg.in/yaml.v2",
sum = "h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=",
version = "v2.4.0",
)
go_repository(
name = "in_gopkg_yaml_v3",
importpath = "gopkg.in/yaml.v3",
sum = "h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=",
version = "v3.0.0-20210107192922-496545a6307b",
)
go_repository(
name = "io_etcd_go_bbolt",
importpath = "go.etcd.io/bbolt",
sum = "h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=",
version = "v1.3.5",
)
go_repository(
name = "io_etcd_go_etcd",
importpath = "go.etcd.io/etcd",
sum = "h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo=",
version = "v0.5.0-alpha.5.0.20200910180754-dd1b699fc489",
)
go_repository(
name = "io_etcd_go_etcd_api_v3",
build_file_proto_mode = "disable",
importpath = "go.etcd.io/etcd/api/v3",
sum = "h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw=",
version = "v3.5.0",
)
go_repository(
name = "io_etcd_go_etcd_client_pkg_v3",
importpath = "go.etcd.io/etcd/client/pkg/v3",
sum = "h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU=",
version = "v3.5.0",
)
go_repository(
name = "io_etcd_go_etcd_client_v2",
importpath = "go.etcd.io/etcd/client/v2",
sum = "h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs=",
version = "v2.305.0",
)
go_repository(
name = "io_etcd_go_etcd_client_v3",
build_file_proto_mode = "disable",
importpath = "go.etcd.io/etcd/client/v3",
sum = "h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek=",
version = "v3.5.0",
)
go_repository(
name = "io_k8s_api",
build_file_proto_mode = "disable",
importpath = "k8s.io/api",
sum = "h1:bgdZrW++LqgrLikWYNruIKAtltXbSCX2l5mJu11hrVE=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_apiextensions_apiserver",
build_file_proto_mode = "disable",
importpath = "k8s.io/apiextensions-apiserver",
sum = "h1:ZrXQeslal+6zKM/HjDXLzThlz/vPSxrfK3OqL8txgVQ=",
version = "v0.20.1",
)
go_repository(
name = "io_k8s_apimachinery",
build_file_proto_mode = "disable",
importpath = "k8s.io/apimachinery",
sum = "h1:R5p3SlhaABYShQSO6LpPsYHjV05Q+79eBUR0Ut/f4tk=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_apiserver",
importpath = "k8s.io/apiserver",
sum = "h1:NnVriMMOpqQX+dshbDoZixqmBhfgrPk2uOh2fzp9vHE=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_cli_runtime",
importpath = "k8s.io/cli-runtime",
sum = "h1:ZRrIC1ly/f86BlF0r22KSMtKo3xbeYegAqaH/tEen94=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_client_go",
importpath = "k8s.io/client-go",
sum = "h1:nJZOfolnsVtDtbGJNCxzOtKUAu7zvXjB8+pMo9UNxZo=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_code_generator",
importpath = "k8s.io/code-generator",
sum = "h1:kp65Y6kF6A4+5PvSNvXWSI5p5vuA9tUxEqEZciPw+7Q=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_component_base",
importpath = "k8s.io/component-base",
sum = "h1:G0inASS5vAqCpzs7M4Sp9dv9d0aElpz39zDHbSB4f4g=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_component_helpers",
importpath = "k8s.io/component-helpers",
sum = "h1:lp+Y2AFn+gAEEXl+DbOuLgeWGVwJaF/X1o3I9iLHebE=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_cri_api",
importpath = "k8s.io/cri-api",
sum = "h1:iXX0K2pRrbR8yXbZtDK/bSnmg/uSqIFiVJK1x4LUOMc=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_gengo",
importpath = "k8s.io/gengo",
sum = "h1:JApXBKYyB7l9xx+DK7/+mFjC7A9Bt5A93FPvFD0HIFE=",
version = "v0.0.0-20201113003025-83324d819ded",
)
go_repository(
name = "io_k8s_klog_v2",
importpath = "k8s.io/klog/v2",
sum = "h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts=",
version = "v2.8.0",
)
go_repository(
name = "io_k8s_kube_openapi",
importpath = "k8s.io/kube-openapi",
sum = "h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=",
version = "v0.0.0-20201113171705-d219536bb9fd",
)
go_repository(
name = "io_k8s_kubectl",
importpath = "k8s.io/kubectl",
sum = "h1:G0a3fJXvypzN1fDcO+clH131rpDxNtDZIgSuogSCtng=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_kubernetes",
importpath = "k8s.io/kubernetes",
sum = "h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8=",
version = "v1.13.0",
)
go_repository(
name = "io_k8s_metrics",
importpath = "k8s.io/metrics",
sum = "h1:HVAYYKA/9HhKQX952EwE4hejvD61UALLpqYRYvRSvGo=",
version = "v0.20.6",
)
go_repository(
name = "io_k8s_sigs_apiserver_network_proxy_konnectivity_client",
importpath = "sigs.k8s.io/apiserver-network-proxy/konnectivity-client",
sum = "h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY=",
version = "v0.0.15",
)
go_repository(
name = "io_k8s_sigs_controller_runtime",
importpath = "sigs.k8s.io/controller-runtime",
sum = "h1:GMHvzjTmaWHQB8HadW+dIvBoJuLvZObYJ5YoZruPRao=",
version = "v0.8.3",
)
go_repository(
name = "io_k8s_sigs_kustomize",
importpath = "sigs.k8s.io/kustomize",
sum = "h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=",
version = "v2.0.3+incompatible",
)
go_repository(
name = "io_k8s_sigs_structured_merge_diff_v4",
importpath = "sigs.k8s.io/structured-merge-diff/v4",
sum = "h1:4oyYo8NREp49LBBhKxEqCulFjg26rawYKrnCmg+Sr6c=",
version = "v4.0.3",
)
go_repository(
name = "io_k8s_sigs_yaml",
importpath = "sigs.k8s.io/yaml",
sum = "h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=",
version = "v1.2.0",
)
go_repository(
name = "io_k8s_utils",
importpath = "k8s.io/utils",
sum = "h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ=",
version = "v0.0.0-20210111153108-fddb29f9d009",
)
go_repository(
name = "io_opencensus_go",
importpath = "go.opencensus.io",
sum = "h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=",
version = "v0.23.0",
)
go_repository(
name = "io_rsc_pdf",
importpath = "rsc.io/pdf",
sum = "h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=",
version = "v0.1.1",
)
go_repository(
name = "net_starlark_go",
importpath = "go.starlark.net",
sum = "h1:lkYv5AKwvvduv5XWP6szk/bvvgO6aDeUujhZQXIFTes=",
version = "v0.0.0-20190702223751-32f345186213",
)
go_repository(
name = "org_bazil_fuse",
importpath = "bazil.org/fuse",
sum = "h1:SC+c6A1qTFstO9qmB86mPV2IpYme/2ZoEQ0hrP+wo+Q=",
version = "v0.0.0-20160811212531-371fbbdaa898",
)
go_repository(
name = "org_golang_google_api",
importpath = "google.golang.org/api",
replace = "google.golang.org/api",
sum = "h1:4sAyIHT6ZohtAQDoxws+ez7bROYmUlOVvsUscYCDTqA=",
version = "v0.43.0",
)
go_repository(
name = "org_golang_google_appengine",
importpath = "google.golang.org/appengine",
sum = "h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=",
version = "v1.6.7",
)
go_repository(
name = "org_golang_google_cloud",
importpath = "google.golang.org/cloud",
sum = "h1:Cpp2P6TPjujNoC5M2KHY6g7wfyLYfIWRZaSdIKfDasA=",
version = "v0.0.0-20151119220103-975617b05ea8",
)
go_repository(
name = "org_golang_google_genproto",
importpath = "google.golang.org/genproto",
replace = "google.golang.org/genproto",
sum = "h1:vVeMwkgjjF0rgUTvAJkHJC5hUf50yFdZFDpBgK2kVXI=",
version = "v0.0.0-20210329143202-679c6ae281ee",
)
go_repository(
name = "org_golang_google_grpc",
importpath = "google.golang.org/grpc",
replace = "google.golang.org/grpc",
sum = "h1:cmUfbeGKnz9+2DD/UYsMQXeqbHZqZDs4eQwW0sFOpBY=",
version = "v1.36.1",
)
go_repository(
name = "org_golang_google_protobuf",
importpath = "google.golang.org/protobuf",
sum = "h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=",
version = "v1.26.0",
)
go_repository(
name = "org_golang_x_arch",
importpath = "golang.org/x/arch",
sum = "h1:XmKBi9R6duxOB3lfc72wyrwiOY7X2Jl1wuI+RFOyMDE=",
version = "v0.0.0-20201008161808-52c3e6f60cff",
)
go_repository(
name = "org_golang_x_crypto",
importpath = "golang.org/x/crypto",
replace = "github.com/golang/crypto",
sum = "h1:8llN7yzwGxwu9113L6qZhy5GAsXqM0CwzpGy7Jg4d8A=",
version = "v0.0.0-20210322153248-0c34fe9e7dc2",
)
go_repository(
name = "org_golang_x_exp",
importpath = "golang.org/x/exp",
replace = "github.com/golang/exp",
sum = "h1:Y1U71lBbDwoAU53t+H+zyzEHffhVdtwbhunE3d+c248=",
version = "v0.0.0-20210220032938-85be41e4509f",
)
go_repository(
name = "org_golang_x_image",
importpath = "golang.org/x/image",
replace = "github.com/golang/image",
sum = "h1:2RD9fgAaBaM2OmMeoOZhrVFJncb2ZvYbaQBc93IRHKg=",
version = "v0.0.0-20210220032944-ac19c3e999fb",
)
go_repository(
name = "org_golang_x_lint",
importpath = "golang.org/x/lint",
replace = "github.com/golang/lint",
sum = "h1:yoAjkFah23+tVFVuPMY+qVhs1qr4MxUlH0TAh80VbOw=",
version = "v0.0.0-20201208152925-83fdc39ff7b5",
)
go_repository(
name = "org_golang_x_mobile",
importpath = "golang.org/x/mobile",
replace = "github.com/golang/mobile",
sum = "h1:aCGaFYtce0DYTWxDA9R7t3PWnA41u4L9vVB5g5/WWgM=",
version = "v0.0.0-20210220033013-bdb1ca9a1e08",
)
go_repository(
name = "org_golang_x_mod",
importpath = "golang.org/x/mod",
replace = "github.com/golang/mod",
sum = "h1:lH77g1z4f17x8y6SttLVCeJEBqNXOmSodud0ja0tP60=",
version = "v0.4.2",
)
go_repository(
name = "org_golang_x_net",
importpath = "golang.org/x/net",
replace = "github.com/golang/net",
sum = "h1:TlSK6ePheivtSKLVN+ngvv+GsazgRZ5EGFnJcDU3PSc=",
version = "v0.0.0-20210330142815-c8897c278d10",
)
go_repository(
name = "org_golang_x_oauth2",
importpath = "golang.org/x/oauth2",
replace = "github.com/golang/oauth2",
sum = "h1:Zc7CevDpcBZhHjqOaX0sV8wd+9SOK8Ga79yw9fOhbxA=",
version = "v0.0.0-20210323180902-22b0adad7558",
)
go_repository(
name = "org_golang_x_sync",
importpath = "golang.org/x/sync",
replace = "github.com/golang/sync",
sum = "h1:SaEy0CSD5VHxfkVDQp+KnOeeiSEG4LrHDCKqu9MskrQ=",
version = "v0.0.0-20210220032951-036812b2e83c",
)
go_repository(
name = "org_golang_x_sys",
importpath = "golang.org/x/sys",
replace = "github.com/golang/sys",
sum = "h1:lYOSQzf0I48VjRWUcpmAx0uYpuOQKKW7jwuyKDxh7eo=",
version = "v0.0.0-20210326220804-49726bf1d181",
)
go_repository(
name = "org_golang_x_term",
importpath = "golang.org/x/term",
replace = "github.com/golang/term",
sum = "h1:Fl0EEbnS9r4M/efcJKb0c2ahDsLrvuA+DkkBfLQgYWA=",
version = "v0.0.0-20210317153231-de623e64d2a6",
)
go_repository(
name = "org_golang_x_text",
importpath = "golang.org/x/text",
replace = "github.com/golang/text",
sum = "h1:ohhPA4cGdIMkmMvhC+HU6qBl8zeOoM8M8o8N6mbcL3U=",
version = "v0.3.5",
)
go_repository(
name = "org_golang_x_time",
importpath = "golang.org/x/time",
replace = "github.com/golang/time",
sum = "h1:xgei/lBA0MICqy4kX0+HHp9N3aFDmulXmfDG4mvhA+c=",
version = "v0.0.0-20210220033141-f8bda1e9f3ba",
)
go_repository(
name = "org_golang_x_tools",
importpath = "golang.org/x/tools",
replace = "github.com/golang/tools",
sum = "h1:5KUMmBvKS2Hhl2vb1USRzLZbaiQ+cPEftIk2+QH7mAI=",
version = "v0.1.0",
)
go_repository(
name = "org_golang_x_xerrors",
importpath = "golang.org/x/xerrors",
replace = "github.com/golang/xerrors",
sum = "h1:jhmkoTjuPVg+HX0++Mq184QYuCgK29clNAbkZwI8/0Y=",
version = "v0.0.0-20200804184101-5ec99f83aff1",
)
go_repository(
name = "org_mongodb_go_mongo_driver",
importpath = "go.mongodb.org/mongo-driver",
replace = "go.mongodb.org/mongo-driver",
sum = "h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI=",
version = "v1.5.1",
)
go_repository(
name = "org_mozilla_go_pkcs7",
importpath = "go.mozilla.org/pkcs7",
sum = "h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=",
version = "v0.0.0-20200128120323-432b2356ecb1",
)
go_repository(
name = "org_uber_go_atomic",
importpath = "go.uber.org/atomic",
sum = "h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=",
version = "v1.7.0",
)
go_repository(
name = "org_uber_go_goleak",
importpath = "go.uber.org/goleak",
sum = "h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=",
version = "v1.1.10",
)
go_repository(
name = "org_uber_go_multierr",
importpath = "go.uber.org/multierr",
sum = "h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=",
version = "v1.6.0",
)
go_repository(
name | |
between normalized log ratios of pairs of genes predicted to be
in the same operon per experiment
"adjcor" (float): Correlation between normalized log ratios of pairs of adjacent genes per experiment
"gccor" (float): Correlation between normalized log ratios of genes and their GC percentage per experiment
"maxFit" (float): The maximum normalized log ratio value per experiment
We combine the initialized Quality DataFrame, the Fit Read
Metrics DataFrame and the FitQuality DataFrame and that's
our current Quality DataFrame, which contains 19 (or possibly 18) columns,
and the number of rows is equal to the total number of
good experiments (so it varies per run).
Then we run the function FEBA_Exp_Status
FEBA_Exp_Status:
For each row in the Quality DataFrame, we check the values under
certain columns to decide how to label it's quality.
The values we check, in this order, are:
1. Is it a Time0? ("Time0")
2. Does it have a low Median over the sums over locusIds? ("low_count")
- param 'min_gMed'
3. Does it have a high Median of the differences between first and
second half log ratios? ("high_mad12") - param 'max_mad12'
4. Is there a low correlation between the first and second half
log ratios? ("low_cor12") - param 'min_cor12'
5. Is the GC correlation high or is the adjacent genes correlation
high? ("high_adj_gc_cor") - params 'max_gccor' & 'max_adjcor'
If none of these are True, then the experiment is given the status
"OK". Otherwise, it returns the first value for which it is
True in the above questions, the value it returns is the
string in parentheses.
The whole function returns a pandas series the length of which
is the number of rows in quality_df.
Now we continue to add on to the quality dataframe by creating
a column
called 'u', in which we go through each experiment, and if its
status is "OK", then we give it the value True, otherwise it is
given the value False. So 'u' contains whether the experiment
has a status that passed. After adding the column 'u' to the
Quality DataFrame, it has 20 (or 19) columns total.
We shift our focus to the 'strains' DataFrame.
First, we only take the metadata columns from all.poolcount,
meaning the columns:
'barcode', 'rcbarcode', 'scaffold', 'strand', 'pos', 'locusId', 'f'
and add new columns to it:
'used': which strains were used to compute Gene Fitness (just the
strainsUsed List as a column.)
'enoughT0': which strains had a high enough Mean within T0s
Now we create two important dataframes:
strain_lr and strain_se: Both of which have the
same length (num rows) as all.poolcount. Both simply
take the values computed in the function
StrainFitness (in analysis1) and turn them into
dictionaries with experiment names as the column
names and the values as the columns beneath them.
Next we normalize the strain fitness values (creating
the dataframe 'strain_lrn'). We use the function
normalize_per_strain_values:
First, for every strain, we find the closest gene center from our list of used genes
(genes that passed all the thresholds to be used). We call this list 'strainToGene'.
So for strains that are within a gene, their closest gene will also be the gene
that they are inserted into.
Next, we create a dataframe that is the difference between the normalized log
ratio values per experiment per gene, and the log ratio values per gene (that
are not normalized).
Then, for each strain, we normalize its fitness values in a complicated way.
The way in which the fitness values are normalized are the following:
create_strain_lrn:
First, for each experiment, we take the per gene difference between
the normalized log ratio and the plain old log ratio, and then we
map those values onto all the strains using the Strain To Closest
Gene series we computed earlier. So for each strain, we take the
closest Gene and place the difference between the normalized
log fitness and the regular log fitness in a pandas Series we call
"per_strain_exp_diff". Then we group the strains by scaffold and
take the medians of per_strain_exp_diff by these scaffolds, and for each
strain, instead of having its own gene difference value, it instead now
has the median of all the strain difference values in the same scaffold
as it. Next we multiply that entire series by negative 1 and call it
neg_str_scf_med, for 'negative strain per scaffold median'.
Now we initialize the numbers we'll add to the original log ratios
in order to get the normalized strain values. For each value in
per_strain_exp_diff, if it's 'NA', then we insert the negative
median of the differences from neg_str_scf_med, otherwise, we leave the
value as it is (the per_strain_exp_diff value); we call the new series 'sdiff'.
To get the final normalized log ratios for the strains under this
experiment, we simply add the original log ratios per strain to the values
in sdiff, and that's our column for this experiment.
The entire normalized log ratio per strain dataframe is one column
per experiment name.
So within the function analysis2, we have created several new dataframes
to the output 'gene_fit_d':
'q': for quality, with 20/19 columns and one row per used experiment,
ALL BELOW DATAFRAMES HAVE THEIR NUMBER OF ROWS = nAllStrains
'strains': The original strains meta_ix dataframe, with two extra
columns 'used' and 'enoughT0'
Below dataframes have number of columns = nExperimentsUsed
'strain_lr': The log ratios per experiment (unnormalized)
'strain_se': The standard error per strain per experiment
'strain_lrn': The normalized log ratios per experiment
This is a pandas Series (also length nAllStrains):
'strainToGene': Closest gene to strain using index from 'g' pandas Series.
Function ends and returns gene_fit_d
"""
if cfg is not None:
minT0Strain = cfg["minT0Strain"]
else:
minT0Strain=3
cfg = {
"status_d": None
}
gene_fit_d = initialize_gene_fit_d(GeneFitResults, debug=True)
# We recompute central_insert_bool_list:
central_insert_bool_list = [True if (0.1<=x<=0.9) else False for x in all_df['f']]
# What is q? Quality
q_col = ["name", "short", "t0set"]
if "num" in exps_df:
q_col.append("num")
# Creating quality datafrme
# We only get the rows which have enough experiments assoc with genesUsed12
tmp_name_in_lrn = [True if exps_df['name'].iloc[i] in gene_fit_d['lrn'].head() else False for i \
in range(len(exps_df['name']))]
# quality_df is a DataFrame with 4 (or 3) columns and num rows = tmp_name_in_lrn.count(True)
quality_df = exps_df[tmp_name_in_lrn][q_col]
quality_df.index = list(quality_df['name'])
#gene_fit_d['q'] = quality_df
q_exp_names = quality_df['name']
for i in range(len(q_exp_names)):
if not q_exp_names.iat[i] == list(gene_fit_d['lrn'].head())[i]:
raise Exception(f"Mismatched names in fit: {q_exp_names.iat[i]} != "
f"{list(gene_fit_d['lrn'].head())[i]}")
if debug:
print("Running FitReadMetrics() and FitQuality()")
st = time.time()
# fitreadmet is a dataframe with 3 columns and the num rows = len(q_exp_names)
fitreadmet = FitReadMetrics(all_df, q_exp_names, central_insert_bool_list)
print(f"Time to run FitReadMetrics: {time.time() - st} seconds")
st = time.time()
# fq_result is a dataframe with 12 columns and num rows = len(q_exp_names)
fq_result, CrudeOp_df = FitQuality(gene_fit_d, genes_df, prnt_dbg=False)
print(f"Time to run FitQuality: {time.time() - st} seconds")
# Since we are concatenating one dataframe with 4 (or 3) cols, one with 3, and one
# with 12 cols, the overall dataframe gene_fit_d['q'] should have 19 columns
# (or 18 depending on if 'num' is not in exps_df. It should be so we expect 4)
gene_fit_d['q'] = pd.concat([quality_df,
fitreadmet,
fq_result], axis=1)
# HERE
#DEBUG:
#gene_fit_d['q'].to_csv("tmp/py_gene_fit_q2.tsv", sep="\t")
# status is a pandas series of str, should be exa
status = FEBA_Exp_Status(gene_fit_d['q'], status_d=cfg["status_d"], dbg_prnt=False)
# We get a list of status is ok + False for the rows of q that surpass length of status
gene_fit_d['q']['u'] = [True if status.iat[i] == "OK" else False for i in range(len(status))]
# Printing out
for s in ["low_count", "high_mad12", "low_cor12", "high_adj_gc_cor"]:
if list(status).count(s) > 0:
logging.info(f"{s}: {gene_fit_d['q']['name'][status == s]}")
# Creating strains dataframes and values (as opposed to genes)
strains | |
<filename>code/data.py
"""
This file defines the Hierarchy of Graph Tree class and PartNet data loader.
"""
import sys
import os
import json
import torch
import numpy as np
from torch.utils import data
from pyquaternion import Quaternion
from sklearn.decomposition import PCA
from collections import namedtuple
from utils import one_hot
import trimesh
# store a part hierarchy of graphs for a shape
class Tree(object):
# global object category information
part_name2id = dict()
part_id2name = dict()
part_name2cids = dict()
part_non_leaf_sem_names = []
num_sem = None
root_sem = None
@ staticmethod
def load_category_info(cat):
with open(os.path.join('../stats/part_semantics/', cat+'.txt'), 'r') as fin:
for l in fin.readlines():
x, y, _ = l.rstrip().split()
x = int(x)
Tree.part_name2id[y] = x
Tree.part_id2name[x] = y
Tree.part_name2cids[y] = []
if '/' in y:
Tree.part_name2cids['/'.join(y.split('/')[:-1])].append(x)
Tree.num_sem = len(Tree.part_name2id) + 1
for k in Tree.part_name2cids:
Tree.part_name2cids[k] = np.array(Tree.part_name2cids[k], dtype=np.int32)
if len(Tree.part_name2cids[k]) > 0:
Tree.part_non_leaf_sem_names.append(k)
Tree.root_sem = Tree.part_id2name[1]
# store a part node in the tree
class Node(object):
def __init__(self, part_id=0, is_leaf=False, box=None, label=None, children=None, edges=None, full_label=None, geo=None, geo_feat=None):
self.is_leaf = is_leaf # store True if the part is a leaf node
self.part_id = part_id # part_id in result_after_merging.json of PartNet
self.box = box # box parameter for all nodes
self.geo = geo # 1 x 1000 x 3 point cloud
self.geo_feat = geo_feat # 1 x 100 geometry feature
self.label = label # node semantic label at the current level
self.full_label = full_label # node semantic label from root (separated by slash)
self.children = [] if children is None else children
# all of its children nodes; each entry is a Node instance
self.edges = [] if edges is None else edges
# all of its children relationships;
# each entry is a tuple <part_a, part_b, type, params, dist>
"""
Here defines the edges format:
part_a, part_b:
Values are the order in self.children (e.g. 0, 1, 2, 3, ...).
This is an directional edge for A->B.
If an edge is commutative, you may need to manually specify a B->A edge.
For example, an ADJ edge is only shown A->B,
there is no edge B->A in the json file.
type:
Four types considered in StructureNet: ADJ, ROT_SYM, TRANS_SYM, REF_SYM.
params:
There is no params field for ADJ edge;
For ROT_SYM edge, 0-2 pivot point, 3-5 axis unit direction, 6 radian rotation angle;
For TRANS_SYM edge, 0-2 translation vector;
For REF_SYM edge, 0-2 the middle point of the segment that connects the two box centers,
3-5 unit normal direction of the reflection plane.
dist:
For ADJ edge, it's the closest distance between two parts;
For SYM edge, it's the chamfer distance after matching part B to part A.
"""
def get_semantic_id(self):
return Tree.part_name2id[self.full_label]
def get_semantic_one_hot(self):
out = np.zeros((1, Tree.num_sem), dtype=np.float32)
out[0, Tree.part_name2id[self.full_label]] = 1
return torch.tensor(out, dtype=torch.float32).to(device=self.box.device)
def get_box_quat(self):
box = self.box.cpu().numpy().squeeze()
center = box[:3]
size = box[3:6]
xdir = box[6:9]
xdir /= np.linalg.norm(xdir)
ydir = box[9:]
ydir /= np.linalg.norm(ydir)
zdir = np.cross(xdir, ydir)
zdir /= np.linalg.norm(zdir)
rotmat = np.vstack([xdir, ydir, zdir]).T
q = Quaternion(matrix=rotmat)
quat = np.array([q.w, q.x, q.y, q.z], dtype=np.float32)
box_quat = np.hstack([center, size, quat]).astype(np.float32)
return torch.from_numpy(box_quat).view(1, -1).to(device=self.box.device)
def set_from_box_quat(self, box_quat):
box_quat = box_quat.cpu().numpy().squeeze()
center = box_quat[:3]
size = box_quat[3:6]
q = Quaternion(box_quat[6], box_quat[7], box_quat[8], box_quat[9])
rotmat = q.rotation_matrix
box = np.hstack([center, size, rotmat[:, 0].flatten(), rotmat[:, 1].flatten()]).astype(np.float32)
self.box = torch.from_numpy(box).view(1, -1)
def to(self, device):
if self.box is not None:
self.box = self.box.to(device)
for edge in self.edges:
if 'params' in edge:
edge['params'].to(device)
if self.geo is not None:
self.geo = self.geo.to(device)
for child_node in self.children:
child_node.to(device)
return self
def _to_str(self, level, pid, detailed=False):
out_str = ' |'*(level-1) + ' ├'*(level > 0) + str(pid) + ' ' + self.label + (' [LEAF] ' if self.is_leaf else ' ') + '{' + str(self.part_id) + '}'
if detailed:
out_str += 'Box('+';'.join([str(item) for item in self.box.numpy()])+')\n'
else:
out_str += '\n'
if len(self.children) > 0:
for idx, child in enumerate(self.children):
out_str += child._to_str(level+1, idx)
if detailed and len(self.edges) > 0:
for edge in self.edges:
if 'params' in edge:
edge = edge.copy() # so the original parameters don't get changed
edge['params'] = edge['params'].cpu().numpy()
out_str += ' |'*(level) + ' ├' + 'Edge(' + str(edge) + ')\n'
return out_str
def __str__(self):
return self._to_str(0, 0)
def depth_first_traversal(self):
nodes = []
stack = [self]
while len(stack) > 0:
node = stack.pop()
nodes.append(node)
stack.extend(reversed(node.children))
return nodes
def child_adjacency(self, typed=False, max_children=None):
if max_children is None:
adj = torch.zeros(len(self.children), len(self.children))
else:
adj = torch.zeros(max_children, max_children)
if typed:
edge_types = ['ADJ', 'ROT_SYM', 'TRANS_SYM', 'REF_SYM']
for edge in self.edges:
if typed:
edge_type_index = edge_types.index(edge['type'])
adj[edge['part_a'], edge['part_b']] = edge_type_index
adj[edge['part_b'], edge['part_a']] = edge_type_index
else:
adj[edge['part_a'], edge['part_b']] = 1
adj[edge['part_b'], edge['part_a']] = 1
return adj
def geos(self, leafs_only=True):
nodes = list(self.depth_first_traversal())
out_geos = []; out_nodes = [];
for node in nodes:
if not leafs_only or node.is_leaf:
out_geos.append(node.geo)
out_nodes.append(node)
return out_geos, out_nodes
def boxes(self, per_node=False, leafs_only=False):
nodes = list(reversed(self.depth_first_traversal()))
node_boxesets = []
boxes_stack = []
for node in nodes:
node_boxes = []
for i in range(len(node.children)):
node_boxes = boxes_stack.pop() + node_boxes
if node.box is not None and (not leafs_only or node.is_leaf):
node_boxes.append(node.box)
if per_node:
node_boxesets.append(node_boxes)
boxes_stack.append(node_boxes)
assert len(boxes_stack) == 1
if per_node:
return node_boxesets, list(nodes)
else:
boxes = boxes_stack[0]
return boxes
def graph(self, leafs_only=False):
part_boxes = []
part_geos = []
edges = []
part_ids = []
part_sems = []
nodes = list(reversed(self.depth_first_traversal()))
box_index_offset = 0
for node in nodes:
child_count = 0
box_idx = {}
for i, child in enumerate(node.children):
if leafs_only and not child.is_leaf:
continue
part_boxes.append(child.box)
part_geos.append(child.geo)
part_ids.append(child.part_id)
part_sems.append(child.full_label)
box_idx[i] = child_count+box_index_offset
child_count += 1
for edge in node.edges:
if leafs_only and not (
node.children[edge['part_a']].is_leaf and
node.children[edge['part_b']].is_leaf):
continue
edges.append(edge.copy())
edges[-1]['part_a'] = box_idx[edges[-1]['part_a']]
edges[-1]['part_b'] = box_idx[edges[-1]['part_b']]
box_index_offset += child_count
return part_boxes, part_geos, edges, part_ids, part_sems
def edge_tensors(self, edge_types, device, type_onehot=True):
num_edges = len(self.edges)
# get directed edge indices in both directions as tensor
edge_indices = torch.tensor(
[[e['part_a'], e['part_b']] for e in self.edges] + [[e['part_b'], e['part_a']] for e in self.edges],
device=device, dtype=torch.long).view(1, num_edges*2, 2)
# get edge type as tensor
edge_type = torch.tensor([edge_types.index(edge['type']) for edge in self.edges], device=device, dtype=torch.long)
if type_onehot:
edge_type = one_hot(inp=edge_type, label_count=len(edge_types)).transpose(0, 1).view(1, num_edges, len(edge_types)).to(dtype=torch.float32)
else:
edge_type = edge_type.view(1, num_edges)
edge_type = torch.cat([edge_type, edge_type], dim=1) # add edges in other direction (symmetric adjacency)
return edge_type, edge_indices
def get_subtree_edge_count(self):
cnt = 0
if self.children is not None:
for cnode in self.children:
cnt += cnode.get_subtree_edge_count()
if self.edges is not None:
cnt += len(self.edges)
return cnt
# functions for class Tree
def __init__(self, root):
self.root = root
def to(self, device):
self.root = self.root.to(device)
return self
def __str__(self):
return str(self.root)
def depth_first_traversal(self):
return self.root.depth_first_traversal()
def boxes(self, per_node=False, leafs_only=False):
return self.root.boxes(per_node=per_node, leafs_only=leafs_only)
def graph(self, leafs_only=False):
return self.root.graph(leafs_only=leafs_only)
def free(self):
for node in self.depth_first_traversal():
del node.geo
del node.geo_feat
del node.box
del node
# extend torch.data.Dataset class for PartNet
class PartNetDataset(data.Dataset):
def __init__(self, root, object_list, data_features, load_geo=False):
self.root = root
self.data_features = data_features
self.load_geo = load_geo
if isinstance(object_list, str):
with open(os.path.join(self.root, object_list), 'r') as f:
self.object_names = [item.rstrip() for item in f.readlines()]
else:
self.object_names = object_list
def __getitem__(self, index):
if 'object' in self.data_features:
obj = self.load_object(os.path.join(self.root, self.object_names[index]+'.json'), \
load_geo=self.load_geo)
data_feats = ()
for feat in self.data_features:
if feat == 'object':
data_feats = data_feats + (obj,)
elif feat == 'name':
data_feats = data_feats + (self.object_names[index],)
else:
assert False, 'ERROR: unknow feat type %s!' % feat
return data_feats
def __len__(self):
return len(self.object_names)
def get_anno_id(self, anno_id):
obj = self.load_object(os.path.join(self.root, anno_id+'.json'), \
load_geo=self.load_geo)
return obj
@staticmethod
def load_object(fn, load_geo=False):
if load_geo:
geo_fn = fn.replace('_hier', '_geo').replace('json', 'npz')
geo_data = np.load(geo_fn)
with open(fn, 'r') as f:
root_json = json.load(f)
# create a virtual parent node of the root node and add it to the stack
StackElement = namedtuple('StackElement', ['node_json', 'parent', 'parent_child_idx'])
stack = [StackElement(node_json=root_json, parent=None, parent_child_idx=None)]
root = None
# traverse the tree, converting each node json to a Node instance
while len(stack) > 0:
stack_elm = stack.pop()
parent = stack_elm.parent
parent_child_idx = stack_elm.parent_child_idx
node_json = stack_elm.node_json
node = Tree.Node(
part_id=node_json['id'],
is_leaf=('children' not in node_json),
label=node_json['label'])
if 'geo' in node_json.keys():
node.geo = | |
function, in seconds.
:type DestinationConfig: dict
:param DestinationConfig: (Streams) An Amazon SQS queue or Amazon SNS topic destination for discarded records.\n\nOnSuccess (dict) --The destination configuration for successful invocations.\n\nDestination (string) --The Amazon Resource Name (ARN) of the destination resource.\n\n\n\nOnFailure (dict) --The destination configuration for failed invocations.\n\nDestination (string) --The Amazon Resource Name (ARN) of the destination resource.\n\n\n\n\n
:type MaximumRecordAgeInSeconds: integer
:param MaximumRecordAgeInSeconds: (Streams) The maximum age of a record that Lambda sends to a function for processing.
:type BisectBatchOnFunctionError: boolean
:param BisectBatchOnFunctionError: (Streams) If the function returns an error, split the batch in two and retry.
:type MaximumRetryAttempts: integer
:param MaximumRetryAttempts: (Streams) The maximum number of times to retry when the function returns an error.
:type ParallelizationFactor: integer
:param ParallelizationFactor: (Streams) The number of batches to process from each shard concurrently.
:rtype: dict
ReturnsResponse Syntax
{
'UUID': 'string',
'BatchSize': 123,
'MaximumBatchingWindowInSeconds': 123,
'ParallelizationFactor': 123,
'EventSourceArn': 'string',
'FunctionArn': 'string',
'LastModified': datetime(2015, 1, 1),
'LastProcessingResult': 'string',
'State': 'string',
'StateTransitionReason': 'string',
'DestinationConfig': {
'OnSuccess': {
'Destination': 'string'
},
'OnFailure': {
'Destination': 'string'
}
},
'MaximumRecordAgeInSeconds': 123,
'BisectBatchOnFunctionError': True|False,
'MaximumRetryAttempts': 123
}
Response Structure
(dict) --
A mapping between an AWS resource and an AWS Lambda function. See CreateEventSourceMapping for details.
UUID (string) --
The identifier of the event source mapping.
BatchSize (integer) --
The maximum number of items to retrieve in a single batch.
MaximumBatchingWindowInSeconds (integer) --
(Streams) The maximum amount of time to gather records before invoking the function, in seconds.
ParallelizationFactor (integer) --
(Streams) The number of batches to process from each shard concurrently.
EventSourceArn (string) --
The Amazon Resource Name (ARN) of the event source.
FunctionArn (string) --
The ARN of the Lambda function.
LastModified (datetime) --
The date that the event source mapping was last updated, or its state changed.
LastProcessingResult (string) --
The result of the last AWS Lambda invocation of your Lambda function.
State (string) --
The state of the event source mapping. It can be one of the following: Creating , Enabling , Enabled , Disabling , Disabled , Updating , or Deleting .
StateTransitionReason (string) --
Indicates whether the last change to the event source mapping was made by a user, or by the Lambda service.
DestinationConfig (dict) --
(Streams) An Amazon SQS queue or Amazon SNS topic destination for discarded records.
OnSuccess (dict) --
The destination configuration for successful invocations.
Destination (string) --
The Amazon Resource Name (ARN) of the destination resource.
OnFailure (dict) --
The destination configuration for failed invocations.
Destination (string) --
The Amazon Resource Name (ARN) of the destination resource.
MaximumRecordAgeInSeconds (integer) --
(Streams) The maximum age of a record that Lambda sends to a function for processing.
BisectBatchOnFunctionError (boolean) --
(Streams) If the function returns an error, split the batch in two and retry.
MaximumRetryAttempts (integer) --
(Streams) The maximum number of times to retry when the function returns an error.
Exceptions
Lambda.Client.exceptions.ServiceException
Lambda.Client.exceptions.ResourceNotFoundException
Lambda.Client.exceptions.InvalidParameterValueException
Lambda.Client.exceptions.TooManyRequestsException
Lambda.Client.exceptions.ResourceConflictException
Lambda.Client.exceptions.ResourceInUseException
Examples
This operation updates a Lambda function event source mapping
response = client.update_event_source_mapping(
BatchSize=123,
Enabled=True,
FunctionName='myFunction',
UUID='1234xCy789012',
)
print(response)
Expected Output:
{
'BatchSize': 123,
'EventSourceArn': 'arn:aws:s3:::examplebucket/*',
'FunctionArn': 'arn:aws:lambda:us-west-2:123456789012:function:myFunction',
'LastModified': datetime(2016, 11, 21, 19, 49, 20, 0, 326, 0),
'LastProcessingResult': '',
'State': '',
'StateTransitionReason': '',
'UUID': '1234xCy789012',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'UUID': 'string',
'BatchSize': 123,
'MaximumBatchingWindowInSeconds': 123,
'ParallelizationFactor': 123,
'EventSourceArn': 'string',
'FunctionArn': 'string',
'LastModified': datetime(2015, 1, 1),
'LastProcessingResult': 'string',
'State': 'string',
'StateTransitionReason': 'string',
'DestinationConfig': {
'OnSuccess': {
'Destination': 'string'
},
'OnFailure': {
'Destination': 'string'
}
},
'MaximumRecordAgeInSeconds': 123,
'BisectBatchOnFunctionError': True|False,
'MaximumRetryAttempts': 123
}
:returns:
UUID (string) -- [REQUIRED]
The identifier of the event source mapping.
FunctionName (string) -- The name of the Lambda function.
Name formats
Function name - MyFunction .
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction .
Version or Alias ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD .
Partial ARN - 123456789012:function:MyFunction .
The length constraint applies only to the full ARN. If you specify only the function name, it\'s limited to 64 characters in length.
Enabled (boolean) -- Disables the event source mapping to pause polling and invocation.
BatchSize (integer) -- The maximum number of items to retrieve in a single batch.
Amazon Kinesis - Default 100. Max 10,000.
Amazon DynamoDB Streams - Default 100. Max 1,000.
Amazon Simple Queue Service - Default 10. Max 10.
MaximumBatchingWindowInSeconds (integer) -- (Streams) The maximum amount of time to gather records before invoking the function, in seconds.
DestinationConfig (dict) -- (Streams) An Amazon SQS queue or Amazon SNS topic destination for discarded records.
OnSuccess (dict) --The destination configuration for successful invocations.
Destination (string) --The Amazon Resource Name (ARN) of the destination resource.
OnFailure (dict) --The destination configuration for failed invocations.
Destination (string) --The Amazon Resource Name (ARN) of the destination resource.
MaximumRecordAgeInSeconds (integer) -- (Streams) The maximum age of a record that Lambda sends to a function for processing.
BisectBatchOnFunctionError (boolean) -- (Streams) If the function returns an error, split the batch in two and retry.
MaximumRetryAttempts (integer) -- (Streams) The maximum number of times to retry when the function returns an error.
ParallelizationFactor (integer) -- (Streams) The number of batches to process from each shard concurrently.
"""
pass
def update_function_code(FunctionName=None, ZipFile=None, S3Bucket=None, S3Key=None, S3ObjectVersion=None, Publish=None, DryRun=None, RevisionId=None):
"""
Updates a Lambda function\'s code.
The function\'s code is locked when you publish a version. You can\'t modify the code of a published version, only the unpublished version.
See also: AWS API Documentation
Exceptions
Examples
The following example replaces the code of the unpublished ($LATEST) version of a function named my-function with the contents of the specified zip file in Amazon S3.
Expected Output:
:example: response = client.update_function_code(
FunctionName='string',
ZipFile=b'bytes',
S3Bucket='string',
S3Key='string',
S3ObjectVersion='string',
Publish=True|False,
DryRun=True|False,
RevisionId='string'
)
:type FunctionName: string
:param FunctionName: [REQUIRED]\nThe name of the Lambda function.\n\nName formats\n\nFunction name - my-function .\nFunction ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function .\nPartial ARN - 123456789012:function:my-function .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n
:type ZipFile: bytes
:param ZipFile: The base64-encoded contents of the deployment package. AWS SDK and AWS CLI clients handle the encoding for you.\n\nThis value will be base64 encoded automatically. Do not base64 encode this value prior to performing the operation.\n
:type S3Bucket: string
:param S3Bucket: An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.
:type S3Key: string
:param S3Key: The Amazon S3 key of the deployment package.
:type S3ObjectVersion: string
:param S3ObjectVersion: For versioned objects, the version of the deployment package object to use.
:type Publish: boolean
:param Publish: Set to true to publish a new version of the function after updating the code. This has the same effect as calling PublishVersion separately.
:type DryRun: boolean
:param DryRun: Set to true to validate the request parameters and access permissions without modifying the function code.
:type RevisionId: string
:param RevisionId: Only update the function if the revision ID matches the ID that\'s specified. Use this option to avoid modifying a function that has changed since you last read it.
:rtype: dict
ReturnsResponse Syntax
{
'FunctionName': 'string',
'FunctionArn': 'string',
'Runtime': 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'nodejs10.x'|'nodejs12.x'|'java8'|'java11'|'python2.7'|'python3.6'|'python3.7'|'python3.8'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'dotnetcore3.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'ruby2.7'|'provided',
'Role': 'string',
'Handler': 'string',
'CodeSize': 123,
'Description': 'string',
'Timeout': 123,
'MemorySize': 123,
'LastModified': 'string',
'CodeSha256': 'string',
'Version': 'string',
'VpcConfig': {
'SubnetIds': [
'string',
],
'SecurityGroupIds': [
'string',
],
'VpcId': 'string'
},
'DeadLetterConfig': {
'TargetArn': 'string'
},
'Environment': {
'Variables': {
'string': 'string'
},
'Error': {
'ErrorCode': 'string',
'Message': 'string'
}
},
'KMSKeyArn': 'string',
'TracingConfig': {
'Mode': 'Active'|'PassThrough'
},
'MasterArn': 'string',
'RevisionId': 'string',
'Layers': [
{
'Arn': 'string',
'CodeSize': 123
},
],
'State': 'Pending'|'Active'|'Inactive'|'Failed',
'StateReason': 'string',
'StateReasonCode': 'Idle'|'Creating'|'Restoring'|'EniLimitExceeded'|'InsufficientRolePermissions'|'InvalidConfiguration'|'InternalError'|'SubnetOutOfIPAddresses'|'InvalidSubnet'|'InvalidSecurityGroup',
'LastUpdateStatus': 'Successful'|'Failed'|'InProgress',
'LastUpdateStatusReason': 'string',
'LastUpdateStatusReasonCode': 'EniLimitExceeded'|'InsufficientRolePermissions'|'InvalidConfiguration'|'InternalError'|'SubnetOutOfIPAddresses'|'InvalidSubnet'|'InvalidSecurityGroup'
}
Response Structure
(dict) --
Details about a function\'s configuration.
FunctionName (string) --
The name of the function.
FunctionArn (string) --
The function\'s Amazon Resource Name (ARN).
Runtime (string) --
The runtime environment for the Lambda function.
Role (string) --
The function\'s execution role.
Handler (string) --
The function that Lambda calls to begin executing your function.
CodeSize (integer) --
The size of the function\'s deployment package, in bytes.
Description (string) --
The function\'s description.
Timeout (integer) --
The amount of time in seconds that Lambda allows a function to run before stopping it.
MemorySize | |
self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ServiceRefTargetList') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ServiceRefValue():
"""
A service reference value.
:attr str account_id: The id of the account owning the service.
:attr str service_type: (optional) The service type.
:attr str service_name: (optional) The service name.
:attr str service_instance: (optional) The service instance.
"""
def __init__(self,
account_id: str,
*,
service_type: str = None,
service_name: str = None,
service_instance: str = None) -> None:
"""
Initialize a ServiceRefValue object.
:param str account_id: The id of the account owning the service.
:param str service_type: (optional) The service type.
:param str service_name: (optional) The service name.
:param str service_instance: (optional) The service instance.
"""
self.account_id = account_id
self.service_type = service_type
self.service_name = service_name
self.service_instance = service_instance
@classmethod
def from_dict(cls, _dict: Dict) -> 'ServiceRefValue':
"""Initialize a ServiceRefValue object from a json dictionary."""
args = {}
if 'account_id' in _dict:
args['account_id'] = _dict.get('account_id')
else:
raise ValueError('Required property \'account_id\' not present in ServiceRefValue JSON')
if 'service_type' in _dict:
args['service_type'] = _dict.get('service_type')
if 'service_name' in _dict:
args['service_name'] = _dict.get('service_name')
if 'service_instance' in _dict:
args['service_instance'] = _dict.get('service_instance')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ServiceRefValue object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'account_id') and self.account_id is not None:
_dict['account_id'] = self.account_id
if hasattr(self, 'service_type') and self.service_type is not None:
_dict['service_type'] = self.service_type
if hasattr(self, 'service_name') and self.service_name is not None:
_dict['service_name'] = self.service_name
if hasattr(self, 'service_instance') and self.service_instance is not None:
_dict['service_instance'] = self.service_instance
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ServiceRefValue object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ServiceRefValue') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ServiceRefValue') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Zone():
"""
An output zone.
:attr str id: The globally unique ID of the zone.
:attr str crn: The zone CRN.
:attr str name: The name of the zone.
:attr str account_id: The id of the account owning this zone.
:attr str description: The description of the zone.
:attr List[Address] addresses: The list of addresses in the zone.
:attr List[Address] excluded: The list of excluded addresses in the zone.
:attr str href: The href link to the resource.
:attr datetime created_at: The time the resource was created.
:attr str created_by_id: IAM ID of the user or service which created the
resource.
:attr datetime last_modified_at: The last time the resource was modified.
:attr str last_modified_by_id: IAM ID of the user or service which modified the
resource.
"""
def __init__(self,
id: str,
crn: str,
name: str,
account_id: str,
description: str,
addresses: List['Address'],
excluded: List['Address'],
href: str,
created_at: datetime,
created_by_id: str,
last_modified_at: datetime,
last_modified_by_id: str) -> None:
"""
Initialize a Zone object.
:param str id: The globally unique ID of the zone.
:param str crn: The zone CRN.
:param str name: The name of the zone.
:param str account_id: The id of the account owning this zone.
:param str description: The description of the zone.
:param List[Address] addresses: The list of addresses in the zone.
:param List[Address] excluded: The list of excluded addresses in the zone.
:param str href: The href link to the resource.
:param datetime created_at: The time the resource was created.
:param str created_by_id: IAM ID of the user or service which created the
resource.
:param datetime last_modified_at: The last time the resource was modified.
:param str last_modified_by_id: IAM ID of the user or service which
modified the resource.
"""
self.id = id
self.crn = crn
self.name = name
self.account_id = account_id
self.description = description
self.addresses = addresses
self.excluded = excluded
self.href = href
self.created_at = created_at
self.created_by_id = created_by_id
self.last_modified_at = last_modified_at
self.last_modified_by_id = last_modified_by_id
@classmethod
def from_dict(cls, _dict: Dict) -> 'Zone':
"""Initialize a Zone object from a json dictionary."""
args = {}
if 'id' in _dict:
args['id'] = _dict.get('id')
else:
raise ValueError('Required property \'id\' not present in Zone JSON')
if 'crn' in _dict:
args['crn'] = _dict.get('crn')
else:
raise ValueError('Required property \'crn\' not present in Zone JSON')
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError('Required property \'name\' not present in Zone JSON')
if 'account_id' in _dict:
args['account_id'] = _dict.get('account_id')
else:
raise ValueError('Required property \'account_id\' not present in Zone JSON')
if 'description' in _dict:
args['description'] = _dict.get('description')
else:
raise ValueError('Required property \'description\' not present in Zone JSON')
if 'addresses' in _dict:
args['addresses'] = [Address.from_dict(x) for x in _dict.get('addresses')]
else:
raise ValueError('Required property \'addresses\' not present in Zone JSON')
if 'excluded' in _dict:
args['excluded'] = [Address.from_dict(x) for x in _dict.get('excluded')]
else:
raise ValueError('Required property \'excluded\' not present in Zone JSON')
if 'href' in _dict:
args['href'] = _dict.get('href')
else:
raise ValueError('Required property \'href\' not present in Zone JSON')
if 'created_at' in _dict:
args['created_at'] = string_to_datetime(_dict.get('created_at'))
else:
raise ValueError('Required property \'created_at\' not present in Zone JSON')
if 'created_by_id' in _dict:
args['created_by_id'] = _dict.get('created_by_id')
else:
raise ValueError('Required property \'created_by_id\' not present in Zone JSON')
if 'last_modified_at' in _dict:
args['last_modified_at'] = string_to_datetime(_dict.get('last_modified_at'))
else:
raise ValueError('Required property \'last_modified_at\' not present in Zone JSON')
if 'last_modified_by_id' in _dict:
args['last_modified_by_id'] = _dict.get('last_modified_by_id')
else:
raise ValueError('Required property \'last_modified_by_id\' not present in Zone JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Zone object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'crn') and self.crn is not None:
_dict['crn'] = self.crn
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'account_id') and self.account_id is not None:
_dict['account_id'] = self.account_id
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'addresses') and self.addresses is not None:
_dict['addresses'] = [x.to_dict() for x in self.addresses]
if hasattr(self, 'excluded') and self.excluded is not None:
_dict['excluded'] = [x.to_dict() for x in self.excluded]
if hasattr(self, 'href') and self.href is not None:
_dict['href'] = self.href
if hasattr(self, 'created_at') and self.created_at is not None:
_dict['created_at'] = datetime_to_string(self.created_at)
if hasattr(self, 'created_by_id') and self.created_by_id is not None:
_dict['created_by_id'] = self.created_by_id
if hasattr(self, 'last_modified_at') and self.last_modified_at is not None:
_dict['last_modified_at'] = datetime_to_string(self.last_modified_at)
if hasattr(self, 'last_modified_by_id') and self.last_modified_by_id is not None:
_dict['last_modified_by_id'] = self.last_modified_by_id
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Zone object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'Zone') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Zone') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ZoneList():
"""
The response object of the ListZones operation.
:attr int count: The number of returned results.
:attr List[ZoneSummary] zones: The returned zones.
"""
def __init__(self,
count: int,
zones: List['ZoneSummary']) -> None:
"""
Initialize a ZoneList object.
:param int count: The number of returned results.
:param List[ZoneSummary] zones: The returned zones.
"""
self.count = count
self.zones = zones
@classmethod
def from_dict(cls, _dict: Dict) -> 'ZoneList':
"""Initialize a ZoneList object from a json dictionary."""
args = {}
if 'count' in _dict:
args['count'] = _dict.get('count')
else:
raise ValueError('Required property \'count\' not present in ZoneList JSON')
if 'zones' in _dict:
args['zones'] = [ZoneSummary.from_dict(x) for x in _dict.get('zones')]
else:
raise ValueError('Required property \'zones\' not present in ZoneList JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ZoneList object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return | |
<gh_stars>0
import numpy as np
import tensorflow as tf
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.layers import Conv2D, MaxPool2D, AveragePooling2D, Flatten, Dense, BatchNormalization, Activation, Dropout, Add, TimeDistributed, UpSampling2D, GaussianNoise
from tensorflow.keras.backend import categorical_crossentropy, switch, sum, mean
from datagen import loc2box2d
def nsm(abox_2dtensor, prediction, nsm_iou_threshold, nsm_score_threshold, nsm_max_output_size, total_classes):
'''
'''
loc_2dtensor = prediction[:, total_classes+1:] # (h*w*k, 4)
pbox_2dtensor = loc2box2d(box_2dtensor=abox_2dtensor, bbe_2dtensor=loc_2dtensor) # (h*w*k, 4)
clz_2dtensor = prediction[:, :total_classes+1] # (h*w*k, total_classes+1)
clz_1dtensor = tf.math.argmax(input=clz_2dtensor, axis=-1) # (h*w*k,)
cancel = tf.where(
condition=tf.math.less(x=clz_1dtensor, y=total_classes),
x=1.0,
y=0.0) # (h*w*k,)
score_1dtensor = tf.math.reduce_max(input_tensor=clz_2dtensor, axis=-1) # (h*w*k,)
score_1dtensor *= cancel # (h*w*k,)
selected_indices, valid_outputs = tf.image.non_max_suppression_padded(
boxes=pbox_2dtensor,
scores=score_1dtensor,
max_output_size=nsm_max_output_size,
iou_threshold=nsm_iou_threshold,
score_threshold=nsm_score_threshold,
pad_to_max_output_size=True)
box_2dtensor = tf.gather(params=pbox_2dtensor, indices=selected_indices) # (nsm_max_output_size, 4)
clz_1dtensor = tf.gather(params=clz_1dtensor, indices=selected_indices) # (nsm_max_output_size,)
clz_2dtensor = tf.expand_dims(input=clz_1dtensor, axis=1) # (nsm_max_output_size, 1)
box_2dtensor = tf.cast(x=box_2dtensor, dtype='int32')
clz_2dtensor = tf.cast(x=clz_2dtensor, dtype='int32')
boxclz_2dtensor = tf.concat(values=[box_2dtensor, clz_2dtensor], axis=-1)
return boxclz_2dtensor, valid_outputs
def nsm_robust(abox_2dtensor, prediction, nsm_iou_threshold, nsm_score_threshold, nsm_max_output_size, total_classes):
'''
'''
loc_2dtensor = prediction[:, total_classes+1:] # (h*w*k, 4)
pbox_2dtensor = loc2box2d(box_2dtensor=abox_2dtensor, bbe_2dtensor=loc_2dtensor) # (h*w*k, 4)
xpbox_2dtensor = pbox_2dtensor[:16384]
spbox_2dtensor = pbox_2dtensor[16384:20480]*2
mpbox_2dtensor = pbox_2dtensor[20480:21504]*4
lpbox_2dtensor = pbox_2dtensor[21504:]*8
pbox_2dtensor = tf.concat(values=[xpbox_2dtensor, spbox_2dtensor, mpbox_2dtensor, lpbox_2dtensor], axis=0)
clz_2dtensor = prediction[:, :total_classes+1] # (h*w*k, total_classes+1)
clz_1dtensor = tf.math.argmax(input=clz_2dtensor, axis=-1) # (h*w*k,)
cancel = tf.where(
condition=tf.math.less(x=clz_1dtensor, y=total_classes*tf.ones(shape=abox_2dtensor.shape[0], dtype='int64')),
x=tf.ones(shape=abox_2dtensor.shape[0]),
y=tf.zeros(shape=abox_2dtensor.shape[0])) # (h*w*k,)
score_1dtensor = tf.math.reduce_max(input_tensor=clz_2dtensor, axis=-1) # (h*w*k,)
score_1dtensor *= cancel # (h*w*k,)
selected_indices, valid_outputs = tf.image.non_max_suppression_padded(
boxes=pbox_2dtensor,
scores=score_1dtensor,
max_output_size=nsm_max_output_size,
iou_threshold=nsm_iou_threshold,
score_threshold=nsm_score_threshold,
pad_to_max_output_size=True)
box_2dtensor = tf.gather(params=pbox_2dtensor, indices=selected_indices) # (nsm_max_output_size, 4)
clz_1dtensor = tf.gather(params=clz_1dtensor, indices=selected_indices) # (nsm_max_output_size,)
clz_2dtensor = tf.expand_dims(input=clz_1dtensor, axis=1) # (nsm_max_output_size, 1)
box_2dtensor = tf.cast(x=box_2dtensor, dtype='int32')
clz_2dtensor = tf.cast(x=clz_2dtensor, dtype='int32')
boxclz_2dtensor = tf.concat(values=[box_2dtensor, clz_2dtensor], axis=-1)
return boxclz_2dtensor, valid_outputs
def nsm_sml(abox_2dtensor, prediction, nsm_iou_threshold, nsm_score_threshold, nsm_max_output_size, total_classes):
'''
'''
loc_2dtensor = prediction[:, total_classes+1:] # (h*w*k, 4)
pbox_2dtensor = loc2box2d(box_2dtensor=abox_2dtensor, bbe_2dtensor=loc_2dtensor) # (h*w*k, 4)
spbox_2dtensor = pbox_2dtensor[:4096]
mpbox_2dtensor = pbox_2dtensor[4096:5120]*2
lpbox_2dtensor = pbox_2dtensor[5120:5376]*4
pbox_2dtensor = tf.concat(values=[spbox_2dtensor, mpbox_2dtensor, lpbox_2dtensor], axis=0)
clz_2dtensor = prediction[:, :total_classes+1] # (h*w*k, total_classes+1)
clz_1dtensor = tf.math.argmax(input=clz_2dtensor, axis=-1) # (h*w*k,)
cancel = tf.where(
condition=tf.math.less(x=clz_1dtensor, y=total_classes*tf.ones(shape=abox_2dtensor.shape[0], dtype='int64')),
x=tf.ones(shape=abox_2dtensor.shape[0]),
y=tf.zeros(shape=abox_2dtensor.shape[0])) # (h*w*k,)
score_1dtensor = tf.math.reduce_max(input_tensor=clz_2dtensor, axis=-1) # (h*w*k,)
score_1dtensor *= cancel # (h*w*k,)
selected_indices, valid_outputs = tf.image.non_max_suppression_padded(
boxes=pbox_2dtensor,
scores=score_1dtensor,
max_output_size=nsm_max_output_size,
iou_threshold=nsm_iou_threshold,
score_threshold=nsm_score_threshold,
pad_to_max_output_size=True)
box_2dtensor = tf.gather(params=pbox_2dtensor, indices=selected_indices) # (nsm_max_output_size, 4)
clz_1dtensor = tf.gather(params=clz_1dtensor, indices=selected_indices) # (nsm_max_output_size,)
clz_2dtensor = tf.expand_dims(input=clz_1dtensor, axis=1) # (nsm_max_output_size, 1)
box_2dtensor = tf.cast(x=box_2dtensor, dtype='int32')
clz_2dtensor = tf.cast(x=clz_2dtensor, dtype='int32')
boxclz_2dtensor = tf.concat(values=[box_2dtensor, clz_2dtensor], axis=-1)
return boxclz_2dtensor, valid_outputs
def nsm_sm(abox_2dtensor, prediction, nsm_iou_threshold, nsm_score_threshold, nsm_max_output_size, total_classes):
'''
'''
loc_2dtensor = prediction[:, total_classes+1:] # (h*w*k, 4)
pbox_2dtensor = loc2box2d(box_2dtensor=abox_2dtensor, bbe_2dtensor=loc_2dtensor) # (h*w*k, 4)
spbox_2dtensor = pbox_2dtensor[:4096]
mpbox_2dtensor = pbox_2dtensor[4096:5120]*2
pbox_2dtensor = tf.concat(values=[spbox_2dtensor, mpbox_2dtensor], axis=0)
clz_2dtensor = prediction[:, :total_classes+1] # (h*w*k, total_classes+1)
clz_1dtensor = tf.math.argmax(input=clz_2dtensor, axis=-1) # (h*w*k,)
cancel = tf.where(
condition=tf.math.less(x=clz_1dtensor, y=total_classes*tf.ones(shape=abox_2dtensor.shape[0], dtype='int64')),
x=tf.ones(shape=abox_2dtensor.shape[0]),
y=tf.zeros(shape=abox_2dtensor.shape[0])) # (h*w*k,)
score_1dtensor = tf.math.reduce_max(input_tensor=clz_2dtensor, axis=-1) # (h*w*k,)
score_1dtensor *= cancel # (h*w*k,)
selected_indices, valid_outputs = tf.image.non_max_suppression_padded(
boxes=pbox_2dtensor,
scores=score_1dtensor,
max_output_size=nsm_max_output_size,
iou_threshold=nsm_iou_threshold,
score_threshold=nsm_score_threshold,
pad_to_max_output_size=True)
box_2dtensor = tf.gather(params=pbox_2dtensor, indices=selected_indices) # (nsm_max_output_size, 4)
clz_1dtensor = tf.gather(params=clz_1dtensor, indices=selected_indices) # (nsm_max_output_size,)
clz_2dtensor = tf.expand_dims(input=clz_1dtensor, axis=1) # (nsm_max_output_size, 1)
box_2dtensor = tf.cast(x=box_2dtensor, dtype='int32')
clz_2dtensor = tf.cast(x=clz_2dtensor, dtype='int32')
boxclz_2dtensor = tf.concat(values=[box_2dtensor, clz_2dtensor], axis=-1)
return boxclz_2dtensor, valid_outputs
def nsm_ml(abox_2dtensor, prediction, nsm_iou_threshold, nsm_score_threshold, nsm_max_output_size, total_classes):
'''
'''
loc_2dtensor = prediction[:, total_classes+1:] # (h*w*k, 4)
pbox_2dtensor = loc2box2d(box_2dtensor=abox_2dtensor, bbe_2dtensor=loc_2dtensor) # (h*w*k, 4)
mpbox_2dtensor = pbox_2dtensor[:1024]
lpbox_2dtensor = pbox_2dtensor[1024:1280]*2
pbox_2dtensor = tf.concat(values=[mpbox_2dtensor, lpbox_2dtensor], axis=0)
clz_2dtensor = prediction[:, :total_classes+1] # (h*w*k, total_classes+1)
clz_1dtensor = tf.math.argmax(input=clz_2dtensor, axis=-1) # (h*w*k,)
cancel = tf.where(
condition=tf.math.less(x=clz_1dtensor, y=total_classes*tf.ones(shape=abox_2dtensor.shape[0], dtype='int64')),
x=tf.ones(shape=abox_2dtensor.shape[0]),
y=tf.zeros(shape=abox_2dtensor.shape[0])) # (h*w*k,)
score_1dtensor = tf.math.reduce_max(input_tensor=clz_2dtensor, axis=-1) # (h*w*k,)
score_1dtensor *= cancel # (h*w*k,)
selected_indices, valid_outputs = tf.image.non_max_suppression_padded(
boxes=pbox_2dtensor,
scores=score_1dtensor,
max_output_size=nsm_max_output_size,
iou_threshold=nsm_iou_threshold,
score_threshold=nsm_score_threshold,
pad_to_max_output_size=True)
box_2dtensor = tf.gather(params=pbox_2dtensor, indices=selected_indices) # (nsm_max_output_size, 4)
clz_1dtensor = tf.gather(params=clz_1dtensor, indices=selected_indices) # (nsm_max_output_size,)
clz_2dtensor = tf.expand_dims(input=clz_1dtensor, axis=1) # (nsm_max_output_size, 1)
box_2dtensor = tf.cast(x=box_2dtensor, dtype='int32')
clz_2dtensor = tf.cast(x=clz_2dtensor, dtype='int32')
boxclz_2dtensor = tf.concat(values=[box_2dtensor, clz_2dtensor], axis=-1)
return boxclz_2dtensor, valid_outputs
def identity_block(input_tensor, kernel_size, filters, block_name, use_bias, weight_decay, trainable, bn_trainable):
'''
https://arxiv.org/pdf/1512.03385.pdf
Bottleneck architecture
Arguments
input_tensor:
kernel_size:
filters:
trainable:
Return
tensor:
'''
filters1, filters2, filters3 = filters
tensor = Conv2D(
filters=filters1,
kernel_size=[1, 1],
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=block_name+'_conv1')(input_tensor)
tensor = BatchNormalization(trainable=bn_trainable, name=block_name+'_conv1_bn')(tensor)
tensor = Activation('relu')(tensor)
tensor = Conv2D(
filters=filters2,
kernel_size=kernel_size,
padding='same',
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=block_name+'_conv2')(tensor)
tensor = BatchNormalization(trainable=bn_trainable, name=block_name+'_conv2_bn')(tensor)
tensor = Activation('relu')(tensor)
tensor = Conv2D(
filters=filters3,
kernel_size=[1, 1],
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=block_name+'_conv3')(tensor)
tensor = BatchNormalization(trainable=bn_trainable, name=block_name+'_conv3_bn')(tensor)
tensor = Add()([tensor, input_tensor])
tensor = Activation('relu')(tensor)
return tensor
def conv_block(input_tensor, kernel_size, filters, strides, block_name, use_bias, weight_decay, trainable, bn_trainable):
'''
https://arxiv.org/pdf/1512.03385.pdf
Bottleneck architecture
Arguments
input_tensor:
kernel_size:
filters:
strides:
trainable:
Return
tensor:
'''
filters1, filters2, filters3 = filters
tensor = Conv2D(
filters=filters1,
kernel_size=[1, 1],
strides=strides,
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=block_name+'_conv1')(input_tensor)
tensor = BatchNormalization(trainable=bn_trainable, name=block_name+'_conv1_bn')(tensor)
tensor = Activation('relu')(tensor)
tensor = Conv2D(
filters=filters2,
kernel_size=kernel_size,
padding='same',
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=block_name+'_conv2')(tensor)
tensor = BatchNormalization(trainable=bn_trainable, name=block_name+'_conv2_bn')(tensor)
tensor = Activation('relu')(tensor)
tensor = Conv2D(
filters=filters3,
kernel_size=[1, 1],
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=block_name+'_conv3')(tensor)
tensor = BatchNormalization(trainable=bn_trainable, name=block_name+'_conv3_bn')(tensor)
input_tensor = Conv2D(
filters=filters3,
kernel_size=[1, 1],
strides=strides,
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=block_name+'_conv4')(input_tensor)
input_tensor = BatchNormalization(trainable=bn_trainable, name=block_name+'_conv4_bn')(input_tensor, training=trainable)
tensor = Add()([tensor, input_tensor])
tensor = Activation('relu')(tensor)
return tensor
def resnet(input_tensor, block_settings, use_bias, weight_decay, trainable, bn_trainable, net_name):
'''
https://arxiv.org/pdf/1512.03385.pdf
Bottleneck architecture
Arguments
input_tensor:
block_settings:
[[64, 64, 256], [3, [2, 2]], [4, [2, 2]], [6, [2, 2]], [3, [2, 2]]] # Resnet 50, pool 64
[[64, 64, 256], [3, [2, 2]], [4, [2, 2]], [23, [2, 2]], [3, [2, 2]]] # Resnet 101, pool 64
[[64, 64, 256], [3, [2, 2]], [8, [2, 2]], [36, [2, 2]], [3, [2, 2]]] # Resnet 152, pool 64
trainable:
Return
tensor:
'''
filters = np.array(block_settings[0])
n_C2, strides_C2 = block_settings[1]
tensors = []
# C1
tensor = Conv2D(
filters=filters[0],
kernel_size=[7, 7],
strides=[2, 2],
padding='same',
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=net_name+'_conv1')(input_tensor)
tensor = BatchNormalization(trainable=bn_trainable, name=net_name+'_conv1_bn')(tensor)
tensor = Activation('relu')(tensor)
# C2
tensor = MaxPool2D(pool_size=[3, 3], strides=strides_C2, padding='same')(tensor)
tensor = conv_block(
input_tensor=tensor,
kernel_size=[3, 3],
filters=filters,
strides=[1, 1],
block_name=net_name+'_stg1_blk0_',
use_bias=use_bias,
weight_decay=weight_decay,
trainable=trainable,
bn_trainable=bn_trainable)
for n in range(1, n_C2):
tensor = identity_block(
input_tensor=tensor,
kernel_size=[3, 3],
filters=filters,
block_name=net_name+'_stg1_blk'+str(n)+'_',
use_bias=use_bias,
weight_decay=weight_decay,
trainable=trainable,
bn_trainable=bn_trainable)
tensors.append(tensor)
# C34...
for c in range(2, 2+len(block_settings[2:])):
n_C, strides_C = block_settings[c]
tensor = conv_block(
input_tensor=tensor,
kernel_size=[3, 3],
filters=(2**(c-1))*filters,
strides=strides_C,
block_name=net_name+'_stg'+str(c)+'_blk0_',
use_bias=use_bias,
weight_decay=weight_decay,
trainable=trainable,
bn_trainable=bn_trainable)
for n in range(1, n_C):
tensor = identity_block(
input_tensor=tensor,
kernel_size=[3, 3],
filters=(2**(c-1))*filters,
block_name=net_name+'_stg'+str(c)+'_blk'+str(n)+'_',
use_bias=use_bias,
weight_decay=weight_decay,
trainable=trainable,
bn_trainable=bn_trainable)
tensors.append(tensor)
return tensors
def loss(total_classes, lamda=1.0):
'''
'''
def smooth_l1(y_true, y_pred):
'''
'''
HUBER_DELTA = 1.0
x = tf.math.abs(y_true - y_pred)
x = switch(x < HUBER_DELTA, 0.5*x**2, HUBER_DELTA*(x - 0.5*HUBER_DELTA))
return x
def balanced_l1(y_true, y_pred):
'''
https://arxiv.org/pdf/1904.02701.pdf
'''
alpha = 0.5
gamma = 1.5
b = 19.085
C = 0
x = abs(y_true - y_pred)
x = switch(x < 1.0, (alpha*x + alpha/b)*tf.math.log(b*x + 1) - alpha*x, gamma*x + C)
return x
def ssd_loss(y_true, y_pred):
'''
https://arxiv.org/pdf/1512.02325.pdf
Arguments
y_true: (h*w*k, total_classes+1+4)
y_pred: (h*w*k, total_classes+1+4)
Return
loss
'''
true_clz_2dtensor = y_true[:, :total_classes+1] # (h*w*k, total_classes+1)
pred_clz_2dtensor = y_pred[:, :total_classes+1] # (h*w*k, total_classes+1)
true_loc_2dtensor = y_true[:, total_classes+1:] # (h*w*k, 4)
pred_loc_2dtensor = y_pred[:, total_classes+1:] # (h*w*k, 4)
sum_true_clz_2dtensor = tf.math.reduce_sum(input_tensor=true_clz_2dtensor, axis=-1) # (h*w*k,)
selected_clz_indices = tf.where(
condition=tf.math.equal(x=sum_true_clz_2dtensor, y=1)) # foreground, background
selected_loc_indices = tf.where(
condition=tf.math.logical_and(
x=tf.math.equal(x=sum_true_clz_2dtensor, y=1),
y=tf.math.not_equal(x=true_clz_2dtensor[:, -1], y=1))) # foreground
true_clz_2dtensor = tf.gather_nd(params=true_clz_2dtensor, indices=selected_clz_indices) # (fb, total_classes+1)
pred_clz_2dtensor = tf.gather_nd(params=pred_clz_2dtensor, indices=selected_clz_indices) # (fb, total_classes+1)
true_loc_2dtensor = tf.gather_nd(params=true_loc_2dtensor, indices=selected_loc_indices) # (f, 4)
pred_loc_2dtensor = tf.gather_nd(params=pred_loc_2dtensor, indices=selected_loc_indices) # (f, 4)
clz_loss = categorical_crossentropy(true_clz_2dtensor, pred_clz_2dtensor) # (fb,)
loc_loss = tf.math.reduce_sum(input_tensor=smooth_l1(true_loc_2dtensor, pred_loc_2dtensor), axis=-1) # (f,)
loss = tf.math.reduce_mean(clz_loss) + lamda*tf.math.reduce_mean(loc_loss)
return loss
return ssd_loss
def build_model(ishape, resnet_settings, k, total_classes, net_name='Fansipan'):
'''
'''
use_bias = True
weight_decay = 0.0
trainable = True
bn_trainable = True
input_tensor = Input(shape=ishape, name=net_name+'_input', dtype='float32')
tensors = resnet(
input_tensor=input_tensor,
block_settings=resnet_settings,
use_bias=use_bias,
weight_decay=weight_decay,
trainable=trainable,
bn_trainable=bn_trainable,
net_name=net_name)
tensor = tensors[-1]
head_dims = tensor.shape[3]
tensor = Conv2D(
filters=head_dims,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=net_name+'_conv_prehead')(tensor)
tensor = BatchNormalization(trainable=bn_trainable, name=net_name+'_conv_prehead_bn')(tensor)
tensor = Activation('relu')(tensor)
tensor = Conv2D(
filters=k*(total_classes+1+4),
kernel_size=[1, 1],
strides=[1, 1],
padding='same',
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=net_name+'_conv_head')(tensor)
tensor = tf.reshape(tensor=tensor, shape=[-1, total_classes+1+4]) # (h*w*k, total_classes+1+4)
clz_tensor = tensor[:, :total_classes+1]
clz_tensor = Activation('softmax')(clz_tensor)
loc_tensor = tensor[:, total_classes+1:]
output_tensor = tf.concat(values=[clz_tensor, loc_tensor], axis=-1) # (h*w*k, total_classes+1+4)
model = Model(inputs=input_tensor, outputs=output_tensor)
model.compile(optimizer=Adam(), loss=loss(total_classes=total_classes, lamda=1.0))
return model
def build_test_model(ishape, resnet_settings, k, total_classes, abox_2dtensor, nsm_iou_threshold, nsm_score_threshold, nsm_max_output_size, net_name='Fansipan'):
'''
'''
use_bias = True
weight_decay = 0.0
trainable = False
bn_trainable = False
input_tensor = Input(shape=ishape, name=net_name+'_input', dtype='float32')
tensors = resnet(
input_tensor=input_tensor,
block_settings=resnet_settings,
use_bias=use_bias,
weight_decay=weight_decay,
trainable=trainable,
bn_trainable=bn_trainable,
net_name=net_name)
tensor = tensors[-1]
head_dims = tensor.shape[3]
tensor = Conv2D(
filters=head_dims,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=net_name+'_conv_prehead')(tensor)
tensor = BatchNormalization(trainable=bn_trainable, name=net_name+'_conv_prehead_bn')(tensor)
tensor = Activation('relu')(tensor)
tensor = Conv2D(
filters=k*(total_classes+1+4),
kernel_size=[1, 1],
strides=[1, 1],
padding='same',
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=net_name+'_conv_head')(tensor)
tensor = tf.reshape(tensor=tensor, shape=[-1, total_classes+1+4]) # (h*w*k, total_classes+1+4)
clz_tensor = tensor[:, :total_classes+1]
clz_tensor = Activation('softmax')(clz_tensor)
loc_tensor = tensor[:, total_classes+1:]
tensor = tf.concat(values=[clz_tensor, loc_tensor], axis=-1) # (h*w*k, total_classes+1+4)
tensor, valid_outputs = nsm(
abox_2dtensor=abox_2dtensor,
prediction=tensor,
nsm_iou_threshold=nsm_iou_threshold,
nsm_score_threshold=nsm_score_threshold,
nsm_max_output_size=nsm_max_output_size,
total_classes=total_classes)
valid_outputs = tf.cast(x=valid_outputs, dtype='float32')
valid_outputs = tf.expand_dims(input=valid_outputs, axis=0)
model = Model(inputs=input_tensor, outputs=[tensor, valid_outputs])
model.compile(optimizer=Adam(), loss=[lambda y_true, y_pred: 0.0, lambda y_true, y_pred: 0.0])
return model
def build_robust_test_model(ishape, resnet_settings, k, total_classes, abox_2dtensor, nsm_iou_threshold, nsm_score_threshold, nsm_max_output_size):
'''
'''
use_bias = True
weight_decay = 0.0
trainable = False
bn_trainable = False
xinput_tensor = Input(shape=[ishape[0], ishape[1], ishape[2]], dtype='float32')
sinput_tensor = tf.image.resize(images=xinput_tensor, size=[ishape[0]//2, ishape[1]//2])
minput_tensor = tf.image.resize(images=xinput_tensor, size=[ishape[0]//4, ishape[1]//4])
linput_tensor = tf.image.resize(images=xinput_tensor, size=[ishape[0]//8, ishape[1]//8])
net_name = 'XNet'
tensors = resnet(
input_tensor=xinput_tensor,
block_settings=resnet_settings,
use_bias=use_bias,
weight_decay=weight_decay,
trainable=trainable,
bn_trainable=bn_trainable,
net_name=net_name)
tensor = tensors[-1]
head_dims = tensor.shape[3]
tensor = Conv2D(
filters=head_dims,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=net_name+'_conv_prehead')(tensor)
tensor = BatchNormalization(trainable=bn_trainable, name=net_name+'_conv_prehead_bn')(tensor)
tensor = Activation('relu')(tensor)
tensor = Conv2D(
filters=k*(total_classes+1+4),
kernel_size=[1, 1],
strides=[1, 1],
padding='same',
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=net_name+'_conv_head')(tensor)
tensor = tf.reshape(tensor=tensor, shape=[-1, total_classes+1+4])
clz_tensor = tensor[:, :total_classes+1]
clz_tensor = Activation('softmax')(clz_tensor)
loc_tensor = tensor[:, total_classes+1:]
xtensor = tf.concat(values=[clz_tensor, loc_tensor], axis=-1)
net_name = 'SNet'
tensors = resnet(
input_tensor=sinput_tensor,
block_settings=resnet_settings,
use_bias=use_bias,
weight_decay=weight_decay,
trainable=trainable,
bn_trainable=bn_trainable,
net_name=net_name)
tensor = tensors[-1]
head_dims = tensor.shape[3]
tensor = Conv2D(
filters=head_dims,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=net_name+'_conv_prehead')(tensor)
tensor = BatchNormalization(trainable=bn_trainable, name=net_name+'_conv_prehead_bn')(tensor)
tensor = Activation('relu')(tensor)
tensor = Conv2D(
filters=k*(total_classes+1+4),
kernel_size=[1, 1],
strides=[1, 1],
padding='same',
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=net_name+'_conv_head')(tensor)
tensor = tf.reshape(tensor=tensor, shape=[-1, total_classes+1+4])
clz_tensor = tensor[:, :total_classes+1]
clz_tensor = Activation('softmax')(clz_tensor)
loc_tensor = tensor[:, total_classes+1:]
stensor = tf.concat(values=[clz_tensor, loc_tensor], axis=-1)
net_name = 'MNet'
tensors = resnet(
input_tensor=minput_tensor,
block_settings=resnet_settings,
use_bias=use_bias,
weight_decay=weight_decay,
trainable=trainable,
bn_trainable=bn_trainable,
net_name=net_name)
tensor = tensors[-1]
head_dims = tensor.shape[3]
tensor = Conv2D(
filters=head_dims,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=net_name+'_conv_prehead')(tensor)
tensor = BatchNormalization(trainable=bn_trainable, name=net_name+'_conv_prehead_bn')(tensor)
tensor = Activation('relu')(tensor)
tensor = Conv2D(
filters=k*(total_classes+1+4),
kernel_size=[1, 1],
strides=[1, 1],
padding='same',
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=net_name+'_conv_head')(tensor)
tensor = tf.reshape(tensor=tensor, shape=[-1, total_classes+1+4])
clz_tensor = tensor[:, :total_classes+1]
clz_tensor = Activation('softmax')(clz_tensor)
loc_tensor = tensor[:, total_classes+1:]
mtensor = tf.concat(values=[clz_tensor, loc_tensor], axis=-1)
net_name = 'LNet'
tensors = resnet(
input_tensor=linput_tensor,
block_settings=resnet_settings,
use_bias=use_bias,
weight_decay=weight_decay,
trainable=trainable,
bn_trainable=bn_trainable,
net_name=net_name)
tensor = tensors[-1]
head_dims = tensor.shape[3]
tensor = Conv2D(
filters=head_dims,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
use_bias=use_bias,
kernel_regularizer=regularizers.l2(weight_decay),
trainable=trainable,
name=net_name+'_conv_prehead')(tensor)
tensor = BatchNormalization(trainable=bn_trainable, name=net_name+'_conv_prehead_bn')(tensor)
tensor = Activation('relu')(tensor)
tensor | |
"""
Visual demonstrations of anarchy system.
viz.py
"""
import random
import os
import PIL.Image
from . import rng, cohort
MAX_RANDOM = rng.ID_MASK
def builtin_next():
"""
Returns random numbers using built-in random.
"""
return random.randint(0, MAX_RANDOM)
def demo_prng():
"""
Creates images that demonstrate the PRNG functionality and compare
it against the built-in irreversible PRNG.
"""
seed = 2318230981
random.seed(seed)
prev_a = 472938
prev_b = builtin_next()
a_seq = PIL.Image.new("RGB", (100, 100))
b_seq = PIL.Image.new("RGB", (100, 100))
a_map = {}
a_map_max = 0
b_map = {}
b_map_max = 0
for x in range(100):
for y in range(100):
next_a = rng.prng(prev_a, seed)
c_x = round(prev_a / MAX_RANDOM * 99)
c_y = round(next_a / MAX_RANDOM * 99)
if c_x not in a_map:
a_map[c_x] = {}
if c_y not in a_map[c_x]:
a_map[c_x][c_y] = 0
else:
a_map[c_x][c_y] += 1
if a_map[c_x][c_y] > a_map_max:
a_map_max = a_map[c_x][c_y]
prev_a = next_a
brightness = round(next_a / MAX_RANDOM * 255)
a_seq.putpixel((x, y), (brightness,) * 3)
next_b = builtin_next()
c_x = round(prev_b / MAX_RANDOM * 99)
c_y = round(next_b / MAX_RANDOM * 99)
if (c_x < 0 or c_x >= 100 or c_y < 0 or c_y >= 100):
print(prev_b, next_b, c_x, c_y)
if c_x not in b_map:
b_map[c_x] = {}
if c_y not in b_map[c_x]:
b_map[c_x][c_y] = 0
else:
b_map[c_x][c_y] += 1
if b_map[c_x][c_y] > b_map_max:
b_map_max = b_map[c_x][c_y]
prev_b = next_b
brightness = round(next_b / MAX_RANDOM * 255)
b_seq.putpixel((x, y), (brightness,) * 3)
a_coords = PIL.Image.new("RGB", (100, 100))
b_coords = PIL.Image.new("RGB", (100, 100))
for kx in a_map:
for ky in a_map[kx]:
val = round(a_map[kx][ky] / a_map_max * 255)
a_coords.putpixel((kx, ky), (val,) * 3)
for kx in b_map:
for ky in b_map[kx]:
val = round(b_map[kx][ky] / b_map_max * 255)
b_coords.putpixel((kx, ky), (val,) * 3)
a_seq.save(os.path.join("demos", "rng_seq_anarchy.png"), format="png")
b_seq.save(os.path.join("demos", "rng_seq_builtin.png"), format="png")
a_coords.save(
os.path.join("demos", "rng_coords_anarchy.png"),
format="png"
)
b_coords.save(
os.path.join("demos", "rng_coords_builtin.png"),
format="png"
)
def demo_shuffle():
"""
Creates images that demonstrate the shuffle functionality and
compare it against irreversible shuffling using the built-in random
module.
"""
seed = 8231093281
random.seed(seed)
a_demo = PIL.Image.new("RGB", (100, 100))
b_demo = PIL.Image.new("RGB", (100, 100))
# Twenty rows of repeated shuffles of a single initial list
seq_a = list(range(100))
seq_b = list(range(100))
for i in range(20):
y = i
for x in range(100):
a_demo.putpixel((x, y), (round(seq_a[x] / 100 * 256),) * 3)
b_demo.putpixel((x, y), (round(seq_b[x] / 100 * 256),) * 3)
seq_a_new = [
seq_a[cohort.cohort_shuffle(j, 100, seed)]
for j in range(len(seq_a))
]
seq_a = seq_a_new
random.shuffle(seq_b)
# Twenty rows of different shuffles of the same initial list
seq_a = list(range(100))
seq_b = list(range(100))
seq_a_here = seq_a
seq_b_here = seq_b
for i in range(20):
y = i + 25
for x in range(100):
a_demo.putpixel((x, y), (round(seq_a_here[x] / 100 * 256),) * 3)
b_demo.putpixel((x, y), (round(seq_b_here[x] / 100 * 256),) * 3)
seq_a_here = [
seq_a[cohort.cohort_shuffle(j, 100, seed + i)]
for j in range(len(seq_a))
]
seq_b_here = seq_b[:]
random.shuffle(seq_b_here)
# How many trials to run
N_TRIALS = 1000
# 50x50 square of x vs y shuffle input/output positions
seq_a = list(range(50))
seq_b = list(range(50))
a_map = {}
a_map_max = 0
b_map = {}
b_map_max = 0
for i in range(N_TRIALS):
shuf_a = [
seq_a[cohort.cohort_shuffle(j, 50, seed + i)]
for j in range(len(seq_a))
]
for j, n in enumerate(shuf_a):
if j not in a_map:
a_map[j] = {}
if n not in a_map[j]:
a_map[j][n] = 0
a_map[j][n] += 1
if a_map[j][n] > a_map_max:
a_map_max = a_map[j][n]
shuf_b = seq_b[:]
random.shuffle(shuf_b)
for j, n in enumerate(shuf_b):
if j not in b_map:
b_map[j] = {}
if n not in b_map[j]:
b_map[j][n] = 0
b_map[j][n] += 1
if b_map[j][n] > b_map_max:
b_map_max = b_map[j][n]
for x in a_map:
for y in a_map[x]:
val = round(a_map[x][y] / a_map_max * 255)
a_demo.putpixel((x, y + 50), (val,) * 3)
for x in b_map:
for y in b_map[x]:
val = round(b_map[x][y] / b_map_max * 255)
b_demo.putpixel((x, y + 50), (val,) * 3)
# 50x50 square of x vs y ordering counts over 1000 shuffles
seq_a = list(range(50))
seq_b = list(range(50))
a_map = {}
b_map = {}
for i in range(N_TRIALS):
shuf_a = [
seq_a[cohort.cohort_shuffle(j, 50, seed + i)]
for j in range(len(seq_a))
]
for j, n in enumerate(shuf_a):
if n not in a_map:
a_map[n] = {}
for m in shuf_a[j + 1:]:
if m not in a_map[n]:
a_map[n][m] = 0
a_map[n][m] += 1
shuf_b = seq_b[:]
random.shuffle(shuf_b)
for j, n in enumerate(shuf_b):
if n not in b_map:
b_map[n] = {}
for m in shuf_b[j + 1:]:
if m not in b_map[n]:
b_map[n][m] = 0
b_map[n][m] += 1
for first in a_map:
for second in a_map[first]:
val = round(a_map[first][second] / N_TRIALS * 255)
a_demo.putpixel((second + 50, first + 50), (val,) * 3)
for first in b_map:
for second in b_map[first]:
val = round(b_map[first][second] / N_TRIALS * 255)
b_demo.putpixel((second + 50, first + 50), (val,) * 3)
a_demo.save(os.path.join("demos", "rng_shuf_anarchy.png"), format="png")
b_demo.save(os.path.join("demos", "rng_shuf_builtin.png"), format="png")
def demo_distribute():
"""
Creates images that demonstrate the distribute functionality and
compare it against irreversible distribution using the built-in
random module.
"""
seed = 3129320881
random.seed(seed)
a_demo = PIL.Image.new("RGB", (100, 100))
#b_demo = PIL.Image.new("RGB", (100, 100))
# TODO: Build a similar demo using built-in RNG?
# Twenty rows of distributions of 50 elements within 10 10-item segments
# Roughness 0.5
for y in range(20):
segments = []
for i in range(10):
segments.append([])
for item in range(50):
s = cohort.distribution_segment(item, 50, 10, 10, 0.5, seed + y)
segments[s].append(item)
for x in range(100):
si = x // 10
sii = x % 10
if sii < len(segments[si]):
val = round((20 + segments[si][sii]) / 70 * 255)
a_demo.putpixel((x, y), (val,) * 3)
# Five lines each with different roughness values:
for i, r in enumerate([0, 0.1, 0.3, 0.5, 0.7, 0.9, 1]):
start = 25 + i * 7
for y in range(start, start + 5):
segments = []
for i in range(10):
segments.append([])
for item in range(50):
s = cohort.distribution_segment(
item,
50,
10,
10,
r,
seed + y
)
segments[s].append(item)
for x in range(100):
si = x // 10
sii = x % 10
if sii < len(segments[si]):
val = round((20 + segments[si][sii]) / 70 * 255)
a_demo.putpixel((x, y), (val,) * 3)
# Ten lines showing the distribution portion for each of 100 100-slot bins
# Roughness: 0.2
for y in range(76, 86):
ys = seed + y
for x in range(100):
here = cohort.distribution_portion(x, 5000, 100, 100, 0.2, ys)
val = round(here / 100 * 255)
a_demo.putpixel((x, y), (val,) * 3)
# Roughness: 0.8
for y in range(90, 100):
ys = seed + y
for x in range(100):
here = cohort.distribution_portion(x, 5000, 100, 100, 0.8, ys)
val = round(here / 100 * 255)
a_demo.putpixel((x, y), (val,) * 3)
a_demo.save(os.path.join("demos", "rng_dist_anarchy.png"), format="png")
#b_demo.save(os.path.join("demos", "rng_dist_builtin.png"), format="png")
WHITE, RED, BLUE = [
(255, 255, 255),
(255, 0, 83),
(0, 121, 210),
]
def pick_color(seed):
"""
Simple independent distribution function.
"""
x = rng.uniform(seed)
if x < 1 / 100:
return RED
elif x < 6 / 100:
return BLUE
else:
return WHITE
def demo_shuf_compare():
"""
Creates an image demonstrating the difference between shuffling and
independent-chance-based distribution schemes.
"""
seed = 9328810312
random.seed(seed)
demo = PIL.Image.new("RGB", (110, 110))
n = 0
for xblock in range(10):
for yblock in range(10):
lseed = seed + xblock * 729837 + yblock * 92873
if yblock < 5:
items = [pick_color(lseed + i * 4879283) for i in range(100)]
else:
items = [RED] + [BLUE] * 5 + [WHITE] * (100 - 6)
for lx in range(10):
for ly in range(10):
x = xblock * 11 + lx
y = yblock * 11 + ly
# shift lower blocks down to create separation
if yblock >= 5:
y += 1
if yblock < 5:
color = items[lx * 10 + ly]
else:
color = items[
cohort.cohort_shuffle(n, 100, lseed) % 100
]
# Increment anarchy N
n += 1
demo.putpixel((x, y), color)
demo.save(os.path.join("demos", "rng_shuf_compare.png"), format="png")
def main():
if not os.path.exists("demos"):
os.mkdir("demos")
demo_prng()
demo_shuffle()
demo_distribute()
demo_shuf_compare()
if __name__ == "__main__":
| |
<filename>iso-langs/iso639_3.py
# ISO 639-3 codes retrieved from https://iso639-3.sil.org/code_tables/download_tables
#
# Prepared as python module by <NAME> for mtdata https://github.com/thammegowda/mtdata
data='''aaa Ghotuo
aab Alumu-Tesu
aac Ari
aad Amal
aae Arbëreshë Albanian
aaf Aranadan
aag Ambrak
aah Abu' Arapesh
aai Arifama-Miniafia
aak Ankave
aal Afade
aan Anambé
aao Algerian Saharan Arabic
aap Pará Arára
aaq Eastern Abnaki
aar Afar
aas Aasáx
aat Arvanitika Albanian
aau Abau
aaw Solong
aax Mandobo Atas
aaz Amarasi
aba Abé
abb Bankon
abc Ambala Ayta
abd Manide
abe Western Abnaki
abf Abai Sungai
abg Abaga
abh Tajiki Arabic
abi Abidji
abj Aka-Bea
abk Abkhazian
abl Lampung Nyo
abm Abanyom
abn Abua
abo Abon
abp Abellen Ayta
abq Abaza
abr Abron
abs Ambonese Malay
abt Ambulas
abu Abure
abv Baharna Arabic
abw Pal
abx Inabaknon
aby Aneme Wake
abz Abui
aca Achagua
acb Áncá
acd Gikyode
ace Achinese
acf Saint Lucian Creole French
ach Acoli
aci Aka-Cari
ack Aka-Kora
acl Akar-Bale
acm Mesopotamian Arabic
acn Achang
acp Eastern Acipa
acq Ta'izzi-Adeni Arabic
acr Achi
acs Acroá
act Achterhoeks
acu Achuar-Shiwiar
acv Achumawi
acw Hijazi Arabic
acx Omani Arabic
acy Cypriot Arabic
acz Acheron
ada Adangme
adb Atauran
add Lidzonka Dzodinka
ade Adele
adf Dhofari Arabic
adg Andegerebinha
adh Adhola
adi Adi
adj Adioukrou
adl Galo
adn Adang
ado Abu
adq Adangbe
adr Adonara
ads Adamorobe Sign Language
adt Adnyamathanha
adu Aduge
adw Amundava
adx Amdo Tibetan
ady Adyghe Adygei
adz Adzera
aea Areba
aeb Tunisian Arabic
aec Saidi Arabic
aed Argentine Sign Language
aee Northeast Pashai Northeast Pashayi
aek Haeke
ael Ambele
aem Arem
aen Armenian Sign Language
aeq Aer
aer Eastern Arrernte
aes Alsea
aeu Akeu
aew Ambakich
aey Amele
aez Aeka
afb Gulf Arabic
afd Andai
afe Putukwam
afg Afghan Sign Language
afh Afrihili
afi Akrukay Chini
afk Nanubae
afn Defaka
afo Eloyi
afp Tapei
afr Afrikaans
afs Afro-Seminole Creole
aft Afitti
afu Awutu
afz Obokuitai
aga Aguano
agb Legbo
agc Agatu
agd Agarabi
age Angal
agf Arguni
agg Angor
agh Ngelima
agi Agariya
agj Argobba
agk Isarog Agta
agl Fembe
agm Angaataha
agn Agutaynen
ago Tainae
agq Aghem
agr Aguaruna
ags Esimbi
agt Central Cagayan Agta
agu Aguacateco
agv Remontado Dumagat
agw Kahua
agx Aghul
agy Southern Alta
agz Mt. Iriga Agta
aha Ahanta
ahb Axamb
ahg Qimant
ahh Aghu
ahi Tiagbamrin Aizi
ahk Akha
ahl Igo
ahm Mobumrin Aizi
ahn Àhàn
aho Ahom
ahp Aproumu Aizi
ahr Ahirani
ahs Ashe
aht Ahtena
aia Arosi
aib Ainu (China)
aic Ainbai
aid Alngith
aie Amara
aif Agi
aig Antigua and Barbuda Creole English
aih Ai-Cham
aii Assyrian Neo-Aramaic
aij Lishanid Noshan
aik Ake
ail Aimele
aim Aimol
ain Ainu (Japan)
aio Aiton
aip Burumakok
aiq Aimaq
air Airoran
ait Arikem
aiw Aari
aix Aighon
aiy Ali
aja Aja (South Sudan)
ajg Aja (Benin)
aji Ajië
ajn Andajin
ajp South Levantine Arabic
ajt Judeo-Tunisian Arabic
aju Judeo-Moroccan Arabic
ajw Ajawa
ajz Amri Karbi
aka Akan
akb Batak Angkola
akc Mpur
akd Ukpet-Ehom
ake Akawaio
akf Akpa
akg Anakalangu
akh Angal Heneng
aki Aiome
akj Aka-Jeru
akk Akkadian
akl Aklanon
akm Aka-Bo
ako Akurio
akp Siwu
akq Ak
akr Araki
aks Akaselem
akt Akolet
aku Akum
akv Akhvakh
akw Akwa
akx Aka-Kede
aky Aka-Kol
akz Alabama
ala Alago
alc Qawasqar
ald Alladian
ale Aleut
alf Alege
alh Alawa
ali Amaimon
alj Alangan
alk Alak
all Allar
alm Amblong
aln Gheg Albanian
alo Larike-Wakasihu
alp Alune
alq Algonquin
alr Alutor
als Tosk Albanian
alt Southern Altai
alu 'Are'are
alw Alaba-K’abeena Wanbasana
alx Amol
aly Alyawarr
alz Alur
ama Amanayé
amb Ambo
amc Amahuaca
ame Yanesha'
amf Hamer-Banna
amg Amurdak
amh Amharic
ami Amis
amj Amdang
amk Ambai
aml War-Jaintia
amm Ama (Papua New Guinea)
amn Amanab
amo Amo
amp Alamblak
amq Amahai
amr Amarakaeri
ams Southern Amami-Oshima
amt Amto
amu Gu<NAME>
amv Ambelau
amw Western Neo-Aramaic
amx Anmatyerre
amy Ami
amz Atampaya
ana Andaqui
anb Andoa
anc Ngas
and Ansus
ane Xârâcùù
anf Animere
ang Old English (ca. 450-1100)
anh Nend
ani Andi
anj Anor
ank Goemai
anl Anu-Hkongso Chin
anm Anal
ann Obolo
ano Andoque
anp Angika
anq Jarawa (India)
anr Andh
ans Anserma
ant Antakarinya Antikarinya
anu Anuak
anv Denya
anw Anaang
anx Andra-Hus
any Anyin
anz Anem
aoa Angolar
aob Abom
aoc Pemon
aod Andarum
aoe Angal Enen
aof Bragat
aog Angoram
aoi Anindilyakwa
aoj Mufian
aok Arhö
aol Alor
aom Ömie
aon Bumbita Arapesh
aor Aore
aos Taikat
aot Atong (India) A'tong
aou A'ou
aox Atorada
aoz Uab Meto
apb Sa'a
apc North Levantine Arabic
apd Sudanese Arabic
ape Bukiyip
apf Pahanan Agta
apg Ampanang
aph Athpariya
api Apiaká
apj Jicarilla Apache
apk Kiowa Apache
apl Lipan Apache
apm Mescalero-Chiricahua Apache
apn Apinayé
apo Ambul
app Apma
apq A-Pucikwar
apr Arop-Lokep
aps Arop-Sissano
apt Apatani
apu Apurinã
apv Alapmunte
apw Western Apache
apx Aputai
apy Apalaí
apz Safeyoka
aqc Archi
aqd Ampari Dogon
aqg Arigidi
aqm Atohwaim
aqn Northern Alta
aqp Atakapa
aqr Arhâ
aqt Angaité
aqz Akuntsu
ara Arabic
arb Standard Arabic
arc Official Aramaic (700-300 BCE) Imperial Aramaic (700-300 BCE)
ard Arabana
are Western Arrarnta
arg Aragonese
arh Arhuaco
ari Arikara
arj Arapaso
ark Arikapú
arl Arabela
arn Mapudungun Mapuche
aro Araona
arp Arapaho
arq Algerian Arabic
arr Karo (Brazil)
ars Najdi Arabic
aru Aruá (Amazonas State) Arawá
arv Arbore
arw Arawak
arx Aruá (Rodonia State)
ary Moroccan Arabic
arz Egyptian Arabic
asa Asu (Tanzania)
asb Assiniboine
asc Casuarina Coast Asmat
ase American Sign Language
asf Auslan Australian Sign Language
asg Cishingini
ash Abishira
asi Buruwai
asj Sari
ask Ashkun
asl Asilulu
asm Assamese
asn Xingú Asuriní
aso Dano
asp Algerian Sign Language
asq Austrian Sign Language
asr Asuri
ass Ipulo
ast Asturian Bable,Asturleonese,Leonese
asu Tocantins Asurini
asv Asoa
asw Australian Aborigines Sign Language
asx Muratayak
asy Yaosakor Asmat
asz As
ata Pele-Ata
atb Zaiwa
atc Atsahuaca
atd Ata Manobo
ate Atemble
atg Ivbie North-Okpela-Arhe
ati Attié
atj Atikamekw
atk Ati
atl Mt. Iraya Agta
atm Ata
atn Ashtiani
ato Atong (Cameroon)
atp Pudtol Atta
atq Aralle-Tabulahan
atr Waimiri-Atroari
ats Gros Ventre
att Pamplona Atta
atu Reel
atv Northern Altai
atw Atsugewi
atx Arutani
aty Aneityum
atz Arta
aua Asumboa
aub Alugu
auc Waorani
aud Anuta
aug Aguna
auh Aushi
aui Anuki
auj Awjilah
auk Heyo
aul Aulua
aum Asu (Nigeria)
aun Molmo One
auo Auyokawa
aup Makayam
auq Anus Korur
aur Aruek
aut Austral
auu Auye
auw Awyi
aux Aurá
auy Awiyaana
auz Uzbeki Arabic
ava Avaric
avb Avau
avd Alviri-Vidari
ave Avestan
avi Avikam
avk Kotava
avl Eastern Egyptian Bedawi Arabic
avm Angkamuthi
avn Avatime
avo Agavotaguerra
avs Aushiri
avt Au
avu Avokaya
avv Avá-Canoeiro
awa Awadhi
awb Awa (Papua New Guinea)
awc Cicipu
awe Awetí
awg Anguthimri
awh Awbono
awi Aekyom
awk Awabakal
awm Arawum
awn Awngi
awo Awak
awr Awera
aws South Awyu
awt Araweté
awu Central Awyu
awv Jair Awyu
aww Awun
awx Awara
awy Edera Awyu
axb Abipon
axe Ayerrerenge
axg Mato Grosso Arára
axk Yaka (Central African Republic)
axl Lower Southern Aranda
axm Middle Armenian
axx Xârâgurè
aya Awar
ayb Ayizo Gbe
ayc Southern Aymara
ayd Ayabadhu
aye Ayere
ayg Ginyanga
ayh Hadrami Arabic
ayi Leyigha
ayk Akuku
ayl Libyan Arabic
aym Aymara
ayn Sanaani Arabic
ayo Ayoreo
ayp North Mesopotamian Arabic
ayq Ayi (Papua New Guinea)
ayr Central Aymara
ays Sorsogon Ayta
ayt Magbukun Ayta
ayu Ayu
ayz Mai Brat
aza Azha
azb South Azerbaijani
azd Eastern Durango Nahuatl
aze Azerbaijani
azg San Pedro Amuzgos Amuzgo
azj North Azerbaijani
azm Ipalapa Amuzgo
azn Western Durango Nahuatl
azo Awing
azt Faire Atta
azz Highland Puebla Nahuatl
baa Babatana
bab Bainouk-Gunyuño
bac Badui
bae Baré
baf Nubaca
bag Tuki
bah Bahamas Creole English
baj Barakai
bak Bashkir
bal Baluchi
bam Bambara
ban Balinese
bao Waimaha
bap Bantawa
bar Bavarian
bas Basa (Cameroon)
bau Bada (Nigeria)
bav Vengo
baw Bambili-Bambui
bax Bamun
bay Batuley
bba Baatonum
bbb Barai
bbc Batak Toba
bbd Bau
bbe Bangba
bbf Baibai
bbg Barama
bbh Bugan
bbi Barombi
bbj Ghomálá'
bbk Babanki
bbl Bats
bbm Babango
bbn Uneapa
bbo Northern Bobo Madaré Konabéré
bbp West Central Banda
bbq Bamali
bbr Girawa
bbs Bakpinka
bbt Mburku
bbu Kulung (Nigeria)
bbv Karnai
bbw Baba
bbx Bubia
bby Befang
bca Central Bai
bcb Bainouk-Samik
bcc Southern Balochi
bcd North Babar
bce Bamenyam
bcf Bamu
bcg Baga Pokur
bch Bariai
bci Baoulé
bcj Bardi
bck Bunuba
bcl Central Bikol
bcm Bannoni
bcn Bali (Nigeria)
bco Kaluli
bcp Bali (Democratic Republic of Congo)
bcq Bench
bcr Babine
bcs Kohumono
bct Bendi
bcu Awad Bing
bcv Shoo-Minda-Nye
bcw Bana
bcy Bacama
bcz Bainouk-Gunyaamolo
bda Bayot
bdb Basap
bdc Emberá-Baudó
bdd Bunama
bde Bade
bdf Biage
bdg Bonggi
bdh Baka (South Sudan)
bdi Burun
bdj Bai (South Sudan) Bai
bdk Budukh
bdl Indonesian Bajau
bdm Buduma
bdn Baldemu
bdo Morom
bdp Bende
bdq Bahnar
bdr West Coast Bajau
bds Burunge
bdt Bokoto
bdu Oroko
bdv Bodo Parja
bdw Baham
bdx Budong-Budong
bdy Bandjalang
bdz Badeshi
bea Beaver
beb Bebele
bec Iceve-Maci
bed Bedoanas
bee Byangsi
bef Benabena
beg Belait
beh Biali
bei Bekati'
bej Beja Bedawiyet
bek Bebeli
bel Belarusian
bem Bemba (Zambia)
ben Bengali
beo Beami
bep Besoa
beq Beembe
bes Besme
bet Guiberoua Béte
beu Blagar
bev Daloa Bété
bew Betawi
bex Jur Modo
bey Beli (Papua New Guinea)
bez Bena (Tanzania)
bfa Bari
bfb Pauri Bareli
bfc Panyi Bai Northern Bai
bfd Bafut
bfe Betaf Tena
bff Bofi
bfg Busang Kayan
bfh Blafe
bfi British Sign Language
bfj Bafanji
bfk Ban Khor Sign Language
bfl Banda-Ndélé
bfm Mmen
bfn Bunak
bfo Malba Birifor
bfp Beba
bfq Badaga
bfr Bazigar
bfs Southern Bai
bft Balti
bfu Gahri
bfw Bondo
bfx Bantayanon
bfy Bagheli
bfz Mahasu Pahari
bga Gwamhi-Wuri
bgb Bobongko
bgc Haryanvi
bgd Rathwi Bareli
bge Bauria
bgf Bangandu
bgg Bugun
bgi Giangan
bgj Bangolan
bgk Bit Buxinhua
bgl Bo (Laos)
bgn Western Balochi
bgo Baga Koga
bgp Eastern Balochi
bgq Bagri
bgr Bawm Chin
bgs Tagabawa
bgt Bughotu
bgu Mbongno
bgv Warkay-Bipim
bgw Bhatri
bgx Balkan Gagauz Turkish
bgy Benggoi
bgz Banggai
bha Bharia
bhb Bhili
bhc Biga
bhd Bhadrawahi
bhe Bhaya
bhf Odiai
bhg Binandere
bhh Bukharic
bhi Bhilali
bhj Bahing
bhl Bimin
bhm Bathari
bhn Bohtan Neo-Aramaic
bho Bhojpuri
bhp Bima
bhq Tukang Besi South
bhr Bara Malagasy
bhs Buwal
bht Bhattiyali
bhu Bhunjia
bhv Bahau
bhw Biak
bhx Bhalay
bhy Bhele
bhz Bada (Indonesia)
bia Badimaya
bib Bissa Bisa
bic Bikaru
bid Bidiyo
bie Bepour
bif Biafada
big Biangai
bij Vaghat-Ya-Bijim-Legeri
bik Bikol
bil Bile
bim Bimoba
bin Bini Edo
bio Nai
bip Bila
biq Bipi
bir Bisorio
bis Bislama
bit Berinomo
biu Biete
biv Southern Birifor
biw Kol (Cameroon)
bix Bijori
biy Birhor
biz Baloi
bja Budza
bjb Banggarla
bjc Bariji
bje Biao-Jiao Mien
bjf Barzani Jewish Neo-Aramaic
bjg Bidyogo
bjh Bahinemo
bji Burji
bjj Kanauji
bjk Barok
bjl Bulu (Papua New Guinea)
bjm Bajelani
bjn Banjar
bjo Mid-Southern Banda
bjp Fanamaket
bjr Binumarien
bjs Bajan
bjt Balanta-Ganja
bju Busuu
bjv Bedjond
bjw Bakwé
bjx Banao Itneg
bjy Bayali
bjz Baruga
bka Kyak
bkc Baka (Cameroon)
bkd Binukid Talaandig
bkf Beeke
bkg Buraka
bkh Bakoko
bki Baki
bkj Pande
bkk Brokskat
bkl Berik
bkm Kom (Cameroon)
bkn Bukitan
bko Kwa'
bkp Boko (Democratic Republic of Congo)
bkq Bakairí
bkr Bakumpai
bks Northern Sorsoganon
bkt Boloki
bku Buhid
bkv Bekwarra
bkw Bekwel
bkx Baikeno
bky Bokyi
bkz Bungku
bla Siksika
blb Bilua
blc Bella Coola
bld Bolango
ble Balanta-Kentohe
blf Buol
blg Balau
blh Kuwaa
bli Bolia
blj Bolongan
blk Pa'o Karen Pa'O
bll Biloxi
blm Beli (South Sudan)
bln Southern Catanduanes Bikol
blo Anii
blp Blablanga
blq Baluan-Pam
blr Blang
bls Balaesang
blt Tai Dam
blv Kibala Bolo
blw Balangao
blx Mag-Indi Ayta
bly Notre
blz Balantak
bma Lame
bmb Bembe
bmc Biem
bmd Baga Manduri
bme Limassa
bmf Bom-Kim
bmg Bamwe
bmh Kein
bmi Bagirmi
bmj Bote-Majhi
bmk Ghayavi
bml Bomboli
bmm Northern Betsimisaraka Malagasy
bmn Bina (Papua New Guinea)
bmo Bambalang
bmp Bulgebi
bmq Bomu
bmr Muinane
bms Bilma Kanuri
bmt Biao Mon
bmu Somba-Siawari
bmv Bum
bmw Bomwali
bmx Baimak
bmz Baramu
bna Bonerate
bnb Bookan
bnc Bontok
bnd Banda (Indonesia)
bne Bintauna
bnf Masiwang
bng Benga
bni Bangi
bnj Eastern Tawbuid
bnk Bierebo
bnl Boon
bnm Batanga
bnn Bunun
bno Bantoanon
bnp Bola
bnq Bantik
bnr Butmas-Tur
bns Bundeli
bnu Bentong
bnv Bonerif Beneraf,Edwas
bnw Bisis
bnx Bangubangu
bny Bintulu
bnz Beezen
boa Bora
bob Aweer
bod Tibetan
boe Mundabli
bof Bolon
bog Bamako Sign Language
boh Boma
boi Barbareño
boj Anjam
bok Bonjo
bol Bole
bom Berom
bon Bine
boo Tiemacèwè Bozo
bop Bonkiman
boq Bogaya
bor Borôro
bos Bosnian
bot Bongo
bou Bondei
bov Tuwuli
bow Rema
box Buamu
boy Bodo (Central African Republic)
boz Tiéyaxo Bozo
bpa Daakaka
bpd Banda-Banda
bpg Bonggo
bph Botlikh
bpi Bagupi
bpj Binji
bpk Orowe 'Ôrôê
bpl Broome Pearling Lugger Pidgin
bpm Biyom
bpn Dzao Min
bpo Anasi
bpp Kaure
bpq Banda Malay
bpr Koronadal Blaan
bps Sarangani Blaan
bpt Barrow Point
bpu Bongu
bpv Bian Marind
bpw Bo (Papua New Guinea)
bpx Palya Bareli
bpy Bishnupriya
bpz Bilba
bqa Tchumbuli
bqb Bagusa
bqc Boko (Benin) Boo
bqd Bung
bqf Baga Kaloum
bqg Bago-Kusuntu
bqh Baima
bqi Bakhtiari
bqj Bandial
bqk Banda-Mbrès
bql Bilakura
bqm Wumboko
bqn Bulgarian Sign Language
bqo Balo
bqp Busa
bqq Biritai
bqr Burusu
bqs Bosngun
bqt Bamukumbit
bqu Boguru
bqv Koro Wachi Begbere-Ejar
bqw Buru (Nigeria)
bqx Baangi
bqy Bengkala Sign Language
bqz Bakaka
bra Braj
brb Lave
brc Berbice Creole Dutch
brd Baraamu
bre Breton
brf Bira
brg Baure
brh Brahui
bri Mokpwe
brj Bieria
brk Birked
brl Birwa
brm Barambu
brn Boruca
bro Brokkat
brp Barapasi
brq Breri
brr Birao
brs Baras
brt Bitare
bru Eastern Bru
brv Western Bru
brw Bellari
brx Bodo (India)
bry Burui
brz Bilbil
bsa Abinomn
bsb Brunei Bisaya
bsc Bassari Oniyan
bse Wushi
bsf Bauchi
bsg Bashkardi
bsh Kati
bsi Bassossi
bsj Bangwinji
bsk Burushaski
bsl Basa-Gumna
bsm Busami
bsn Barasana-Eduria
bso Buso
bsp Baga Sitemu
bsq Bassa
bsr Bassa-Kontagora
bss Akoose
bst Basketo
bsu Bahonsuai
bsv Baga Sobané
bsw Baiso
bsx Yangkam
bsy Sabah Bisaya
bta Bata
btc Bati (Cameroon)
btd Batak Dairi
bte Gamo-Ningi
btf Birgit
btg Gagnoa Bété
bth Biatah Bidayuh
bti Burate
btj Bacanese Malay
btm Batak Mandailing
btn Ratagnon
bto Rinconada Bikol
btp Budibud
btq Batek
btr Baetora
bts Batak Simalungun
btt Bete-Bendi
btu Batu
btv Bateri
btw Butuanon
btx Batak Karo
bty Bobot
btz Batak Alas-Kluet
bua Buriat
bub Bua
buc Bushi
bud Ntcham
bue Beothuk
buf Bushoong
bug Buginese
buh Younuo Bunu
bui Bongili
buj Basa-Gurmana
buk Bugawac
bul Bulgarian
bum Bulu (Cameroon)
bun Sherbro
buo Terei
bup Busoa
buq Brem
bus Bokobaru
but Bungain
buu Budu
buv Bun
buw Bubi
bux Boghom
buy Bullom So
buz Bukwen
bva Barein
bvb Bube
bvc Baelelea
bvd Baeggu
bve Berau Malay
bvf Boor
bvg Bonkeng
bvh Bure
bvi Belanda Viri
bvj Baan
bvk Bukat
bvl Bolivian Sign Language
bvm Bamunka
bvn Buna
bvo Bolgo
bvp Bumang
bvq Birri
bvr Burarra
bvt Bati (Indonesia)
bvu Bukit Malay
bvv Baniva
bvw Boga
bvx Dibole
bvy Baybayanon
bvz Bauzi
bwa Bwatoo
bwb Namosi-Naitasiri-Serua
bwc Bwile
bwd Bwaidoka
bwe Bwe Karen
bwf Boselewa
bwg Barwe
bwh Bishuo
bwi Baniwa
bwj Láá Láá Bwamu
bwk Bauwaki
bwl Bwela
bwm Biwat
bwn Wunai Bunu
bwo Boro (Ethiopia) Borna (Ethiopia)
bwp Mandobo Bawah
bwq Southern Bobo Madaré
bwr Bura-Pabir
bws Bomboma
bwt Bafaw-Balong
bwu Buli (Ghana)
bww Bwa
bwx Bu-Nao Bunu
bwy Cwi Bwamu
bwz Bwisi
bxa Tairaha
bxb Belanda Bor
bxc Molengue
bxd Pela
bxe Birale
bxf Bilur Minigir
bxg Bangala
bxh Buhutu
bxi Pirlatapa
bxj Bayungu
bxk Bukusu Lubukusu
bxl Jalkunan
bxm Mongolia Buriat
bxn Burduna
bxo Barikanchi
bxp Bebil
bxq Beele
bxr Russia Buriat
bxs Busam
bxu China Buriat
bxv Berakou
bxw Bankagooma
bxz Binahari
bya Batak
byb Bikya
byc Ubaghara
byd Benyadu'
bye Pouye
byf Bete
byg Baygo
byh Bhujel
byi Buyu
byj Bina (Nigeria)
byk Biao
byl Bayono
bym Bidjara
byn Bilin Blin
byo Biyo
byp Bumaji
byq Basay
byr Baruya Yipma
bys Burak
byt Berti
byv Medumba
byw Belhariya
byx Qaqet
byz Banaro
bza Bandi
bzb Andio
bzc Southern Betsimisaraka Malagasy
bzd Bribri
bze Jenaama Bozo
bzf Boikin
bzg Babuza
bzh Mapos Buang
bzi Bisu
bzj Belize Kriol English
bzk Nicaragua Creole English
bzl Boano (Sulawesi)
bzm Bolondo
bzn Boano (Maluku)
bzo Bozaba
bzp Kemberano
bzq Buli (Indonesia)
bzr Biri
bzs Brazilian Sign Language
bzt Brithenig
bzu Burmeso
bzv Naami
bzw Basa (Nigeria)
bzx Kɛlɛngaxo Bozo
bzy Obanliku
bzz Evant
caa Chortí
cab Garifuna
cac Chuj
cad Caddo
cae Lehar Laalaa
caf Southern Carrier
cag Nivaclé
cah Cahuarano
caj Chané
cak Kaqchikel Cakchiquel
cal Carolinian
cam Cemuhî
can Chambri
cao Chácobo
cap Chipaya
caq Car Nicobarese
car Galibi Carib
cas Tsimané
cat Catalan Valencian
cav Cavineña
caw Callawalla
cax Chiquitano
cay Cayuga
caz Canichana
cbb Cabiyarí
cbc Carapana
cbd Carijona
cbg Chimila
cbi Chachi
cbj Ede Cabe
cbk Chavacano
cbl Bualkhaw Chin
cbn Nyahkur
cbo Izora
cbq Tsucuba Cuba
cbr Cashibo-Cacataibo
cbs Cashinahua
cbt Chayahuita
cbu Candoshi-Shapra
cbv Cacua
cbw Kinabalian
cby Carabayo
ccc Chamicuro
ccd Cafundo Creole
cce Chopi
ccg Samba Daka
cch Atsam
ccj Kasanga
ccl Cutchi-Swahili
ccm Malaccan Creole Malay
cco Comaltepec Chinantec
ccp Chakma
ccr Cacaopera
cda Choni
cde Chenchu
cdf Chiru
cdh Chambeali
cdi Chodri
cdj Churahi
cdm Chepang
cdn Chaudangsi
cdo Min Dong Chinese
cdr Cinda-Regi-Tiyal
cds Chadian Sign Language
cdy Chadong
cdz Koda
cea Lower Chehalis
ceb Cebuano
ceg Chamacoco
cek Eastern Khumi Chin
cen Cen
ces Czech
cet Centúúm
cey Ekai Chin
cfa Dijim-Bwilim
cfd Cara
cfg Como Karim
cfm Falam Chin
cga Changriwa
cgc Kagayanen
cgg Chiga
cgk Chocangacakha
cha Chamorro
chb Chibcha
chc Catawba
chd Highland Oaxaca Chontal
che Chechen
chf Tabasco Chontal
chg Chagatai
chh Chinook
chj <NAME>
chk Chuukese
chl Cahuilla
chm Mari (Russia)
chn Chinook jargon
cho Choctaw
chp Chipewyan Dene Suline
chq Quiotepec Chinantec
chr Cherokee
cht Cholón
chu Church Slavic Old Bulgarian,Old Church Slavonic,Church Slavonic,Old Slavonic
chv Chuvash
chw Chuwabu
chx Chantyal
chy Cheyenne
chz Ozumacín Chinantec
cia Cia-Cia
cib Ci Gbe
cic Chickasaw
cid Chimariko
cie Cineni
cih Chinali
cik <NAME>
cim Cimbrian
cin <NAME>
cip Chiapanec
cir Tiri Haméa,Méa
ciw Chippewa
ciy Chaima
cja Western Cham
cje Chru
cjh Upper Chehalis
cji Chamalal
cjk Chokwe
cjm Eastern Cham
cjn Chenapian
cjo Ashéninka Pajonal
cjp Cabécar
cjs Shor
cjv Chuave
cjy Jinyu Chinese
ckb Central Kurdish
ckh Chak
ckl Cibak
ckm Chakavian
ckn Kaang Chin
cko Anufo
ckq Kajakse
ckr Kairak
cks Tayo
ckt Chukot
cku Koasati
ckv Kavalan
ckx Caka
cky Cakfem-Mushere
ckz Cakchiquel-Quiché Mixed Language
cla Ron
clc Chilcotin
cld Chaldean Neo-Aramaic
cle Lealao Chinantec
clh Chilisso
cli Chakali
clj Laitu Chin
clk Idu-Mishmi
cll Chala
clm Clallam
clo Lowland Oaxaca Chontal
clt Lautu Chin
clu Caluyanun
clw Chulym
cly Eastern Highland Chatino
cma Maa
cme Cerma
cmg Classical Mongolian
cmi Emberá-Chamí
cml Campalagian
cmm Michigamea
cmn Mandarin Chinese
cmo Central Mnong
cmr Mro-Khimi Chin
cms Messapic
cmt Camtho
cna Changthang
cnb Chinbon Chin
cnc Côông
cng Northern Qiang
cnh Hakha Chin Haka Chin
cni Asháninka
cnk Khumi Chin
cnl Lalana Chinantec
cno Con
cnp Northern Ping Chinese Northern Pinghua
cnr Montenegrin
cns Central Asmat
cnt Tepetotutla Chinantec
cnu Chenoua
cnw Ngawn Chin
cnx Middle Cornish
coa Cocos Islands Malay
cob Chicomuceltec
coc Cocopa
cod Cocama-Cocamilla
coe Koreguaje
cof Colorado
cog Chong
coh Chonyi-Dzihana-Kauma Chichonyi-Chidzihana-Chikauma
coj Cochimi
cok Santa Teresa Cora
col Columbia-Wenatchi
com Comanche
con Cofán
coo Comox
cop Coptic
coq Coquille
cor Cornish
cos Corsican
cot Caquinte
cou Wamey
cov Cao Miao
cow Cowlitz
cox Nanti
coz Chochotec
cpa Palantla Chinantec
cpb Ucayali-Yurúa Ashéninka
cpc Ajyíninka Apurucayali
cpg Cappadocian Greek
cpi Chinese Pidgin English
cpn Cherepon
cpo Kpeego
cps Capiznon
cpu Pichis Ashéninka
cpx Pu-Xian Chinese
cpy South Ucayali Ashéninka
cqd Chuanqiandian Cluster Miao
cra Chara
crb Island Carib
crc Lonwolwol
crd Coeur d'Alene
cre Cree
crf Caramanta
crg Michif
crh Crimean Tatar Crimean Turkish
cri Sãotomense
crj Southern East Cree
crk Plains Cree
crl Northern East Cree
crm Moose Cree
crn El Nayar Cora
cro Crow
crq Iyo'wujwa Chorote
crr Carolina Algonquian
crs Seselwa Creole French
crt Iyojwa'ja Chorote
crv Chaura
crw Chrau
crx Carrier
cry Cori
crz Cruzeño
csa Chiltepec Chinantec
csb Kashubian
csc Catalan Sign Language Lengua de señas catalana,Llengua de Signes Catalana
csd Chiangmai Sign Language
cse Czech Sign Language
csf Cuba Sign Language
csg Chilean Sign Language
csh Asho Chin
csi Coast Miwok
csj Songlai Chin
csk Jola-Kasa
csl Chinese Sign Language
csm Central Sierra Miwok
csn Colombian Sign Language
cso Sochiapam Chinantec Sochiapan Chinantec
csp Southern Ping Chinese Southern Pinghua
csq Croatia Sign Language
csr Costa Rican Sign Language
css Southern Ohlone
cst Northern Ohlone
csv Sumtu Chin
csw Swampy Cree
csy Siyin Chin
csz Coos
cta Tataltepec Chatino
ctc Chetco
ctd Tedim Chin
cte Tepinapa Chinantec
ctg Chittagonian
cth Thaiphum Chin
ctl Tlacoatzintepec Chinantec
ctm Chitimacha
ctn Chhintange
cto Emberá-Catío
ctp Western Highland Chatino
cts Northern Catanduanes Bikol
ctt Wayanad Chetti
ctu Chol
ctz Zacatepec Chatino
cua Cua
cub Cubeo
cuc Usila Chinantec
cug Chungmboko Cung
cuh Chuka Gichuka
cui Cuiba
cuj Mashco Piro
cuk San Blas Kuna
cul Culina Kulina
cuo Cumanagoto
cup Cupeño
cuq Cun
cur Chhulung
cut Teutila Cuicatec
cuu Tai Ya
cuv Cuvok
cuw Chukwa
cux Tepeuxila Cuicatec
cuy Cuitlatec
cvg Chug
cvn Valle Nacional Chinantec
cwa Kabwa
cwb Maindo
cwd Woods Cree
cwe Kwere
cwg Chewong Cheq Wong
cwt Kuwaataay
cya Nopala Chatino
cyb Cayubaba
cym Welsh
cyo Cuyonon
czh Huizhou Chinese
czk Knaanic
czn Zenzontepec Chatino
czo Min Zhong Chinese
czt Zotung Chin
daa Dangaléat
dac Dambi
dad Marik
dae Duupa
dag Dagbani
dah Gwahatike
dai Day
daj <NAME>
dak Dakota
dal Dahalo
dam Damakawa
dan Danish
dao Daai Chin
daq <NAME>
dar Dargwa
das Daho-Doo
dau <NAME>
dav Taita Dawida
daw Davawenyo
dax Dayi
daz Dao
dba Bangime
dbb Deno
dbd Dadiya
dbe Dabe
dbf Edopi
dbg Dogul Dom Dogon
dbi Doka
dbj Ida'an
dbl Dyirbal
dbm Duguri
dbn Duriankere
dbo Dulbu
dbp Duwai
dbq Daba
dbr Dabarre
dbt <NAME> Dogon
dbu Bondum Dom Dogon
dbv Dungu
dbw Bankan Tey Dogon
dby Dibiyaso
dcc Deccan
dcr Negerhollands
dda Dadi Dadi
ddd Dongotono
dde Doondo
ddg Fataluku
ddi West Goodenough
ddj Jaru
ddn Dendi (Benin)
ddo Dido
ddr Dhudhuroa
dds Donno So Dogon
ddw Dawera-Daweloor
dec Dagik
ded Dedua
dee Dewoin
def Dezfuli
deg Degema
deh Dehwari
dei Demisa
dek Dek
del Delaware
dem Dem
den Slave (Athapascan)
dep Pidgin Delaware
deq Dendi (Central African Republic)
der Deori
des Desano
deu German
dev Domung
dez Dengese
dga Southern Dagaare
dgb Bunoge Dogon
dgc Casiguran Dumagat Agta
dgd Dagaari Dioula
dge Degenan
dgg Doga
dgh Dghwede
dgi Northern Dagara
dgk Dagba
dgl Andaandi Dongolawi
dgn Dagoman
dgo Dogri (individual language)
dgr Dogrib Tłı̨chǫ
dgs Dogoso
dgt Ndra'ngith
dgw Daungwurrung
dgx Doghoro
dgz Daga
dhd Dhundari
dhg Dhangu-Djangu Dhangu,Djangu
dhi Dhimal
dhl Dhalandji
dhm Zemba
dhn Dhanki
dho Dhodia
dhr Dhargari
dhs Dhaiso
dhu Dhurga
dhv Dehu Drehu
dhw Dhanwar (Nepal)
dhx Dhungaloo
dia Dia
dib South Central Dinka
dic Lakota Dida
did Didinga
dif Dieri Diyari
dig Digo Chidigo
dih Kumiai
dii Dimbong
dij Dai
dik Southwestern Dinka
dil Dilling
dim Dime
din Dinka
dio Dibo
dip Northeastern Dinka
diq Dimli (individual language)
dir Dirim
dis Dimasa
diu Diriku
div Dhivehi Maldivian,Divehi
diw Northwestern Dinka
dix Dixon Reef
diy Diuwe
diz Ding
dja Djadjawurrung
djb Djinba
djc <NAME>
djd Djamindjung Ngaliwurru
dje Zarma
djf Djangun
dji Djinang
djj Djeebbana
djk Eastern Maroon Creole Businenge Tongo,Nenge
djm Jamsay Dogon
djn Jawoyn Djauan
djo Jangkang
djr Djambarrpuyngu
dju Kapriman
djw Djawi
dka Dakpakha
dkk Dakka
dkr Kuijau
dks Southeastern Dinka
dkx Mazagway
dlg Dolgan
dlk Dahalik
dlm Dalmatian
dln Darlong
dma Duma
dmb Mombo Dogon
dmc Gavak
dmd Madhi Madhi
dme Dugwor
dmf Medefaidrin
dmg Upper Kinabatangan
dmk Domaaki
dml Dameli
dmm Dama
dmo Kemedzung
dmr East Damar
dms Dampelas
dmu Dubu Tebi
dmv Dumpas
dmw Mudburra
dmx Dema
dmy Demta Sowari
dna Upper Grand Valley Dani
dnd Daonda
dne Ndendeule
dng Dungan
dni Lower Grand Valley Dani
dnj Dan
dnk Dengka
dnn Dzùùngoo
dno Ndrulo Northern Lendu
dnr Danaru
dnt Mid Grand Valley Dani
dnu Danau
dnv Danu
dnw Western Dani
dny Dení
doa Dom
dob Dobu
doc Northern Dong
doe Doe
dof Domu
doh Dong
doi Dogri (macrolanguage)
dok Dondo
dol Doso
don Toura (Papua New Guinea)
doo Dongo
dop Lukpa
doq Dominican Sign Language
dor Dori'o
dos Dogosé
dot Dass
dov Dombe
dow Doyayo
dox Bussa
doy Dompo
doz Dorze
dpp Papar
drb Dair
drc Minderico
drd Darmiya
dre Dolpo
drg Rungus
dri C'Lela
drl Paakantyi
drn West Damar
dro Daro-Matu Melanau
drq Dura
drs Gedeo
drt Drents
dru Rukai
dry Darai
dsb Lower Sorbian
dse Dutch Sign Language
dsh Daasanach
dsi Disa
dsl Danish Sign Language
dsn Dusner
dso Desiya
dsq Tadaksahak
dta Daur
dtb Labuk-Kinabatangan Kadazan
dtd Ditidaht
dth Adithinngithigh
dti Ana Tinga Dogon
dtk Tene Kan Dogon
dtm Tomo Kan Dogon
dtn Daatsʼíin
dto Tommo So Dogon
dtp Kadazan Dusun Central Dusun
dtr Lotud
dts Toro So Dogon
dtt Toro Tegu Dogon
dtu Tebul Ure Dogon
dty Dotyali
dua Duala
dub Dubli
duc Duna
due Umiray Dumaget Agta
duf Dumbea Drubea
dug Duruma Chiduruma
duh Dungra Bhil
dui Dumun
duk Uyajitaya
dul Alabat Island Agta
dum Middle Dutch (ca. 1050-1350)
dun Dusun Deyah
duo Dupaninan Agta
dup Duano
duq Dusun Malang
dur Dii
dus Dumi
duu Drung
duv Duvle
duw Dusun Witu
dux Duungooma
duy Dicamay Agta
duz Duli-Gey
dva Duau
dwa Diri
dwk Dawik Kui
dwr Dawro
dws Dutton World Speedwords
dwu Dhuwal
dww Dawawa
dwy Dhuwaya
dwz Dewas Rai
dya Dyan
dyb Dyaberdyaber
dyd Dyugun
dyg <NAME>
dyi Dj<NAME>
dym <NAME>
dyn Dyangadi Dhanggatti
dyo Jola-Fonyi
dyu Dyula
dyy Djabugay Dyaabugay
dza Tunzu
dze Djiwarli
dzg Dazaga
dzl Dzalakha
dzn Dzando
dzo Dzongkha
eaa Karenggapa
ebc Beginci
ebg Ebughu
ebk Eastern Bontok
ebo Teke-Ebo
ebr Ebrié
ebu Embu Kiembu
ecr Eteocretan
ecs Ecuadorian Sign Language
ecy Eteocypriot
eee E
efa Efai
efe Efe
efi Efik
ega Ega
egl Emilian
ego Eggon
egy Egyptian (Ancient)
ehu Ehueun
eip Eipomek
eit Eitiep
eiv Askopan
eja Ejamat
eka Ekajuk
eke Ekit
ekg Ekari
eki Eki
ekk Standard Estonian
ekl Kol (Bangladesh) Kol
ekm Elip
eko Koti
ekp Ekpeye
ekr Yace
eky Eastern Kayah
ele Elepi
elh El Hugeirat
eli Nding
elk Elkei
ell Modern Greek (1453-)
elm Eleme
elo El Molo
elu Elu
elx Elamite
ema Emai-Iuleha-Ora
emb Embaloh
eme Emerillon
emg Eastern Meohang
emi Mussau-Emira
emk Eastern Maninkakan
emm Mamulique
emn Eman
emp Northern Emberá
ems Pacific Gulf Yupik
emu Eastern Muria
emw Emplawas
emx Erromintxela
emy Epigraphic Mayan
ena Apali
enb Markweeta
enc En
end Ende
enf Forest Enets
eng English
enh Tundra Enets
enl Enlhet
enm Middle English (1100-1500)
enn Engenni
eno Enggano
enq Enga
enr Emumu Emem
enu Enu
env Enwan (Edu State)
enw Enwan (Akwa Ibom State)
enx Enxet
eot Beti (Côte d'Ivoire)
epi Epie
epo Esperanto
era Eravallan
erg Sie
erh Eruwa
eri Ogea
erk South Efate
ero Horpa
err Erre
ers Ersu
ert Eritai
erw Erokwanas
ese Ese Ejja
esg Aheri Gondi
esh Eshtehardi
esi North Alaskan Inupiatun
esk Northwest Alaska Inupiatun
esl Egypt Sign Language
esm Esuma
esn Salvadoran Sign Language
eso Estonian Sign Language
esq Esselen
ess Central Siberian Yupik
est Estonian
esu Central Yupik
esy Eskayan
etb Etebi
etc Etchemin
eth Ethiopian Sign Language
etn Eton (Vanuatu)
eto Eton (Cameroon)
etr Edolo
ets Yekhee
ett Etruscan
etu Ejagham
etx Eten
etz Semimi
eus Basque
eve Even
evh Uvbie
evn Evenki
ewe Ewe
ewo Ewondo
ext Extremaduran
eya Eyak
eyo Keiyo
eza Ezaa
eze Uzekwe
faa Fasu
fab Fa d'Ambu
fad Wagi
faf Fagani
fag Finongan
fah Baissa Fali
fai Faiwol
faj Faita
fak Fang (Cameroon)
fal South Fali
fam Fam
fan Fang (Equatorial Guinea)
fao Faroese
fap Paloor
far Fataleka
fas Persian
fat Fanti
fau Fayu
fax Fala
fay Southwestern Fars
faz Northwestern Fars
fbl West Albay Bikol
fcs Quebec Sign Language
fer Feroge
ffi Foia Foia
ffm Ma<NAME>
fgr Fongoro
fia Nobiin
fie Fyer
fij Fijian
fil Filipino Pilipino
fin Finnish
fip Fipa
fir Firan
fit Tornedalen Finnish
fiw Fiwaga
fkk Kirya-Konzəl
fkv Kven Finnish
fla Kalispel-Pend d'Oreille
flh Foau
fli Fali
fll North Fali
fln Flinders Island
flr Fuliiru
fly Flaaitaal Tsotsitaal
fmp Fe'fe'
fmu Far Western Muria
fnb Fanbak
fng Fanagalo
fni Fania
fod Foodo
foi Foi
fom Foma
fon Fon
for Fore
fos Siraya
fpe Fernando Po Creole English
fqs Fas
fra French
frc Cajun French
frd Fordata
frk Frankish
frm Middle French (ca. 1400-1600)
fro Old French (842-ca. 1400)
frp Arpitan Francoprovençal
frq Forak
frr Northern Frisian
frs Eastern Frisian
frt Fortsenal
fry Western Frisian
fse Finnish Sign Language
fsl French Sign Language
fss Finland-Swedish Sign Language finlandssvenskt teckenspråk,suomenruotsalainen viittomakieli
fub Adamawa Fulfulde
fuc Pulaar
fud East Futuna
fue Borgu Fulfulde
fuf Pular
fuh Western Niger Fulfulde
fui Bagirmi Fulfulde
fuj Ko
ful Fulah
fum Fum
fun Fulniô
fuq Central-Eastern Niger Fulfulde
fur Friulian
fut Futuna-Aniwa
fuu Furu
fuv Nigerian Fulfulde
fuy Fuyug
fvr Fur
fwa Fwâi
fwe Fwe
gaa Ga
gab Gabri
gac Mixed Great Andamanese
gad Gaddang
gae Guarequena
gaf Gende
gag Gagauz
gah Alekano
gai Borei
gaj Gadsup
gak Gamkonora
gal Galolen
gam Kandawo
gan Gan Chinese
gao Gants
gap Gal
gaq Gata'
gar Galeya
gas Adiwasi Garasia
gat Kenati
gau Mudhili Gadaba
gaw Nobonob
gax Borana-Arsi-Guji Oromo
gay Gayo
gaz West Central Oromo
gba Gbaya (Central African Republic)
gbb Kaytetye
gbd Karajarri
gbe Niksek
gbf Gaikundi
gbg Gbanziri
gbh Defi Gbe
gbi Galela
gbj Bodo Gadaba
gbk Gaddi
gbl Gamit
gbm Garhwali
gbn Mo'da
gbo Northern Grebo
gbp Gbaya-Bossangoa
gbq Gbaya-Bozoum
gbr Gbagyi
gbs Gbesi Gbe
gbu Gagadu
gbv Gbanu
gbw Gabi-Gabi
gbx Eastern Xwla Gbe
gby Gbari
gbz Zoroastrian Dari
gcc Mali
gcd Ganggalida
gce Galice
gcf Guadeloupean Creole French
gcl Grenadian Creole English
gcn Gaina
gcr Guianese Creole French
gct Colonia Tovar German
gda G<NAME>
gdb Pottangi Ollar Gadaba
gdc Gugu Badhun
gdd Gedaged
gde Gude
gdf Guduf-Gava
gdg Ga'dang
gdh Gadjerawang Gajirrabeng
gdi Gundi
gdj Gurdjar
gdk Gadang
gdl Dirasha
gdm Laal
gdn Umanakaina
gdo Ghodoberi
gdq Mehri
gdr Wipi
gds Ghandruk Sign Language
gdt Kungardutyi
gdu Gudu
gdx Godwari
gea Geruma
geb Kire
gec Gboloo Grebo
ged Gade
gef Gerai
geg Gengle
geh Hutterite German Hutterisch
gei Gebe
gej Gen
gek Ywom
gel ut-Ma'in
geq Geme
ges Geser-Gorom
gev Eviya
gew Gera
gex Garre
gey Enya
gez Geez
gfk Patpatar
gft Gafat
gga Gao
ggb Gbii
ggd Gugadj
gge Gurr-goni
ggg Gurgula
ggk Kungarakany
ggl Ganglau
ggt Gitua
ggu Gagu Gban
ggw Gogodala
gha Ghadamès
ghc Hiberno-Scottish Gaelic
ghe Southern Ghale
ghh Northern Ghale
ghk Geko Karen
ghl Ghulfan
ghn Ghanongga
gho Ghomara
ghr Ghera
ghs Guhu-Samane
ght Kuke Kutang Ghale
gia Kija
gib Gibanawa
gic Gail
gid Gidar
gie Gaɓogbo Guébie
gig Goaria
gih Githabul
gil Gilbertese
gim Gimi (Eastern Highlands)
gin Hinukh
gip Gimi (West New Britain)
giq Green Gelao
gir Red Gelao
gis North Giziga
git Gitxsan
giu Mulao
giw White Gelao
gix Gilima
giy Giyug
giz South Giziga
gji Geji
gjk Kachi Koli
gjm Gunditjmara
gjn Gonja
gjr Gurindji Kriol
gju Gujari
gka Guya
gkd Magɨ (Madang Province)
gke Ndai
gkn Gokana
gko Kok-Nar
gkp Guinea Kpelle
gku ǂUngkue
gla Scottish Gaelic Gaelic
glc Bon Gula
gld Nanai
gle Irish
glg Galician
glh Northwest Pashai Northwest Pashayi
glj Gula Iro
glk Gilaki
gll Garlali
glo Galambu
glr Glaro-Twabo
glu Gula (Chad)
glv Manx
glw Glavda
gly Gule
gma Gambera
gmb Gula'alaa
gmd Mághdì
gmg Magɨyi
gmh Middle High German (ca. 1050-1500)
gml Middle Low German
gmm Gbaya-Mbodomo
gmn Gimnime
gmr Mirning Mirniny
gmu Gumalu
gmv Gamo
gmx Magoma
gmy Mycenaean Greek
gmz Mgbolizhia
gna Kaansa
gnb Gangte
gnc Guanche
gnd Zulgo-Gemzek
gne Ganang
gng Ngangam
gnh Lere
gni Gooniyandi
gnj Ngen
gnk ǁGana
gnl Gangulu
gnm Ginuman
gnn Gumatj
gno Northern Gondi
gnq Gana
gnr Gureng Gureng
gnt Guntai
gnu Gnau
gnw Western Bolivian Guaraní
gnz Ganzi
goa Guro
gob Playero
goc Gorakor
god Godié
goe Gongduk
gof Gofa
gog Gogo
goh Old High German (ca. 750-1050)
goi Gobasi
goj Gowlan
gok Gowli
gol Gola
gom Goan Konkani
gon Gondi
goo Gone Dau
gop Yeretuar
goq Gorap
gor Gorontalo
gos Gronings
got Gothic
gou Gavar
gow Gorowa
gox Gobu
goy Goundo
goz Gozarkhani
gpa Gupa-Abawa
gpe Ghanaian Pidgin English
gpn Taiap
gqa Ga'anda
gqi Guiqiong
gqn Guana (Brazil)
gqr Gor
gqu Qau
gra Rajput Garasia
grb Grebo
grc Ancient Greek (to 1453)
grd Guruntum-Mbaaru
grg Madi
grh Gbiri-Niragu
gri Ghari
grj Southern Grebo
grm Kota Marudu Talantang
grn Guarani
gro Groma
grq Gorovu
grr Taznatit
grs Gresi
grt Garo
gru Kistane
grv Central Grebo
grw Gweda
grx Guriaso
gry Barclayville Grebo
grz Guramalum
gse Ghanaian Sign Language
gsg German Sign Language
gsl Gusilay
gsm Guatemalan Sign Language
gsn Nema Gusan
gso Southwest Gbaya
gsp Wasembo
gss Greek Sign Language
gsw Swiss German Alsatian,Alemannic
gta Guató
gtu Aghu-Tharnggala
gua Shiki
gub Guajajára
guc Wayuu
gud Yocoboué Dida
gue Gurindji
guf Gupapuyngu
gug Paraguayan Guaraní
guh Guahibo
gui Eastern Bolivian Guaraní
guj Gujarati
guk Gumuz
gul Sea Island Creole English
gum Guambiano
gun Mbyá Guaraní
guo Guayabero
gup Gunwinggu
guq Aché
gur Farefare
gus Guinean Sign Language
gut Maléku Jaíka
guu Yanomamö
guw Gun
gux Gourmanchéma
guz Gusii Ekegusii
gva Guana (Paraguay)
gvc Guanano
gve Duwet
gvf Golin
gvj Guajá
gvl Gulay
gvm Gurmana
gvn Kuku-Yalanji
gvo Gavião Do Jiparaná
gvp Pará Gavião
gvr Gurung
gvs Gumawana
gvy Guyani
gwa Mbato
gwb Gwa
gwc Gawri Kalami
gwd Gawwada
gwe Gweno
gwf Gowro
gwg Moo
gwi Gwichʼin
gwj ǀGwi
gwm Awngthim
gwn Gwandara
gwr Gwere
gwt Gawar-Bati
gwu Guwamu
gww Kwini
gwx Gua
gxx Wè Southern
gya Northwest Gbaya
gyb Garus
gyd Kayardild
gye Gyem
gyf Gungabula
gyg Gbayi
gyi Gyele
gyl Gayil
gym Ngäbere
gyn Guyanese Creole English
gyo Gyalsumdo
gyr Guarayu
gyy Gunya
gza Ganza
gzi Gazi
gzn Gane
haa Han
hab Hanoi Sign Language
hac Gurani
had Hatam
hae Eastern Oromo
haf Haiphong Sign Language
hag Hanga
hah Hahon
hai Haida
haj Hajong
hak Hakka Chinese
hal Halang
ham Hewa
han Hangaza
hao Hakö
hap Hupla
haq Ha
har Harari
has Haisla
hat Haitian Haitian Creole
hau Hausa
hav Havu
haw Hawaiian
hax Southern Haida
hay Haya
haz Hazaragi
hba Hamba
hbb Huba
hbn Heiban
hbo Ancient Hebrew
hbs Serbo-Croatian
hbu Habu
hca Andaman Creole Hindi
hch Huichol
hdn Northern Haida
hds Honduras Sign Language
hdy Hadiyya
hea Northern Qiandong Miao
heb Hebrew
hed Herdé
heg Helong
heh Hehe
hei Heiltsuk
hem Hemba
her Herero
hgm Haiǁom
hgw Haigwai
hhi Hoia Hoia
hhr Kerak
hhy Hoyahoya
hia Lamang
hib Hibito
hid Hidatsa
hif Fiji Hindi
hig Kamwe
hih Pamosu
hii Hinduri
hij Hijuk
hik Seit-Kaitetu
hil Hiligaynon
hin Hindi
hio Tsoa
hir Himarimã
hit Hittite
hiw Hiw
hix Hixkaryána
hji Haji
hka Kahe
hke Hunde
hkk Hunjara-Kaina Ke
hkn Mel-Khaonh
hks Hong Kong Sign Language Heung Kong Sau Yue
hla Halia
hlb Halbi
hld Halang Doan
hle Hlersu
hlt Matu Chin
hlu Hieroglyphic Luwian
hma Southern Mashan Hmong Southern Mashan Miao
hmb Humburi Senni Songhay
hmc Central Huishui Hmong Central Huishui Miao
hmd Large Flowery Miao Da-Hua Miao,A-hmaos
hme Eastern Huishui Hmong Eastern Huishui Miao
hmf Hmong Don
hmg Southwestern Guiyang Hmong
hmh Southwestern Huishui Hmong Southwestern Huishui Miao
hmi Northern Huishui Hmong Northern Huishui Miao
hmj Ge Gejia
hmk Maek
hml Luopohe Hmong Luopohe Miao
hmm Central Mashan Hmong Central Mashan Miao
hmn Hmong Mong
hmo Hiri Motu
hmp Northern Mashan Hmong Northern Mashan Miao
hmq Eastern Qiandong Miao
hmr Hmar
hms Southern Qiandong Miao
hmt Hamtai
hmu Hamap
hmv Hmong Dô
hmw Western Mashan Hmong Western Mashan Miao
hmy Southern Guiyang Hmong Southern Guiyang Miao
hmz Hmong Shua Sinicized Miao
hna Mina (Cameroon)
hnd Southern Hindko
hne Chhattisgarhi
hng Hungu
hnh ǁAni
hni Hani
hnj Hmong Njua Mong Njua,Mong Leng
hnn Hanunoo
hno Northern Hindko
hns Caribbean Hindustani
hnu Hung
hoa Hoava
hob Mari (Madang Province)
hoc Ho
hod Holma
hoe Horom
hoh Hobyót
hoi Holikachuk
hoj Hadothi Haroti
hol Holu
hom Homa
hoo Holoholo
hop Hopi
hor Horo
hos Ho Chi Minh City Sign Language
hot Hote Malê
hov Hovongan
how Honi
hoy Holiya
hoz Hozo
hpo Hpon
hps Hawai'i Sign Language (HSL) Hawai'i Pidgin Sign Language
hra Hrangkhol
hrc Niwer Mil
hre Hre
hrk Haruku
hrm Horned Miao
hro Haroi
hrp Nhirrpi
hrt Hértevin
hru Hruso
hrv Croatian
hrw Warwar Feni
hrx Hunsrik
hrz Harzani
hsb Upper Sorbian
hsh Hungarian Sign Language
hsl Hausa Sign Language
hsn Xiang Chinese
hss Harsusi
hti Hoti
hto Minica Huitoto
hts Hadza
htu Hitu
htx Middle Hittite
hub Huambisa
huc ǂHua ǂ'Amkhoe
hud Huaulu
hue San Francisco Del Mar Huave
huf Humene
hug Huachipaeri
huh Huilliche
hui Huli
huj Northern Guiyang Hmong Northern Guiyang Miao
huk Hulung
hul Hula
hum Hungana
hun Hungarian
huo Hu
hup Hupa
huq Tsat
hur Halkomelem
hus Huastec
hut Humla
huu Murui Huitoto
huv San Mateo Del Mar Huave
huw Hukumina
hux Nüpode Huitoto
huy Hulaulá
huz Hunzib
hvc Haitian Vodoun Culture Language
hve San Dionisio Del Mar Huave
hvk Haveke
hvn Sabu
hvv Santa María Del Mar Huave
hwa Wané
hwc Hawai'i Creole English Hawai'i Pidgin
hwo Hwana
hya Hya
hye Armenian
hyw Western Armenian
iai Iaai
ian Iatmul
iar Purari
iba Iban
ibb Ibibio
ibd Iwaidja
ibe Akpes
ibg Ibanag
ibh Bih
ibl Ibaloi
ibm Agoi
ibn Ibino
ibo Igbo
ibr Ibuoro
ibu Ibu
iby Ibani
ica Ede Ica
ich Etkywan
icl Icelandic Sign Language
icr Islander Creole English
ida Idakho-Isukha-Tiriki Luidakho-Luisukha-Lutirichi
idb Indo-Portuguese
idc Idon Ajiya
idd Ede Idaca
ide Idere
idi Idi
ido Ido
idr Indri
ids Idesa
idt Idaté
idu Idoma
ifa Amganad Ifugao
ifb Batad Ifugao Ayangan Ifugao
ife Ifè
iff Ifo
ifk Tuwali Ifugao
ifm Teke-Fuumu
ifu Mayoyao Ifugao
ify Keley-I Kallahan
igb Ebira
ige Igede
igg Igana
igl Igala
igm Kanggape
ign Ignaciano
igo Isebe
igs Interglossa
igw Igwe
ihb Iha Based Pidgin
ihi Ihievbe
ihp Iha
ihw Bidhawal
iii Sichuan Yi Nuosu
iin Thiin
ijc Izon
ije Biseni
ijj Ede Ije
ijn Kalabari
ijs Southeast Ijo
ike Eastern Canadian Inuktitut
iki Iko
ikk Ika
ikl Ikulu
iko Olulumo-Ikom
ikp Ikpeshi
ikr Ikaranggal
iks Inuit Sign Language
ikt Inuinnaqtun Western Canadian Inuktitut
iku Inuktitut
ikv Iku-Gora-Ankwa
ikw Ikwere
ikx Ik
ikz Ikizu
ila Ile Ape
ilb Ila
ile Interlingue Occidental
ilg Garig-Ilgar
ili Ili Turki
ilk Ilongot
ilm Iranun (Malaysia)
ilo Iloko
ilp Iranun (Philippines)
ils International Sign
ilu Ili'uun
ilv Ilue
ima Mala Malasar
imi Anamgura
iml Miluk
imn Imonda
imo Imbongu
imr Imroing
ims Marsian
imy Milyan
ina Interlingua (International Auxiliary Language Association)
inb Inga
ind Indonesian
ing Degexit'an
inh Ingush
inj Jungle Inga
inl Indonesian Sign Language
inm Minaean
inn Isinai
ino Inoke-Yate
inp Iñapari
ins Indian Sign Language
int Intha
inz Ineseño
ior Inor
iou Tuma-Irumu
iow Iowa-Oto
ipi Ipili
ipk Inupiaq
ipo Ipiko
iqu Iquito
iqw Ikwo
ire Iresim
irh Irarutu
iri Rigwe Irigwe
irk Iraqw
irn Irántxe
irr Ir
iru Irula
irx Kamberau
iry Iraya
isa Isabi
isc Isconahua
isd Isnag
ise Italian Sign Language
isg Irish Sign Language
ish Esan
isi Nkem-Nkum
isk Ishkashimi
isl Icelandic
ism Masimasi
isn Isanzu
iso Isoko
isr Israeli Sign Language
ist Istriot
isu Isu (Menchum Division)
ita Italian
itb Binongan Itneg
itd Southern Tidung
ite Itene
iti Inlaod Itneg
itk Judeo-Italian
itl Itelmen
itm Itu Mbon Uzo
ito Itonama
itr Iteri
its Isekiri
itt Maeng Itneg
itv Itawit
itw Ito
itx Itik
ity Moyadan Itneg
itz Itzá
ium Iu Mien
ivb Ibatan
ivv Ivatan
iwk I-Wak
iwm Iwam
iwo Iwur
iws Sepik Iwam
ixc Ixcatec
ixl Ixil
iya Iyayu
iyo Mesaka
iyx Yaka (Congo)
izh Ingrian
izr Izere
izz Izii
jaa Jamamadí
jab Hyam
jac Popti' Jakalteko
jad Jahanka
jae Yabem
jaf Jara
jah Jah Hut
jaj Zazao
jak Jakun
jal Yalahatan
jam Jamaican Creole English
jan Jandai
jao Yanyuwa
jaq Yaqay
jas New Caledonian Javanese
jat Jakati
jau Yaur
jav Javanese
jax Jambi Malay
jay Yan-nhangu Nhangu
jaz Jawe
jbe Judeo-Berber
jbi Badjiri
jbj Arandai
jbk Barikewa
jbn Nafusi
jbo Lojban
jbr Jofotek-Bromnya
jbt Jabutí
jbu Jukun Takum
jbw Yawijibaya
jcs Jamaican Country Sign Language
jct Krymchak
jda Jad
jdg Jadgali
jdt Judeo-Tat
jeb Jebero
jee Jerung
jeh Jeh
jei Yei
jek <NAME>
jel Yelmek
jen Dza
jer Jere
jet Manem
jeu <NAME>
jgb Ngbee
jge Judeo-Georgian
jgk Gwak
jgo Ngomba
jhi Jehai
jhs Jhankot Sign Language
jia Jina
jib Jibu
jic Tol
jid Bu
jie Jilbe
jig Jingulu Djingili
jih sTodsde Shangzhai
jii Jiiddu
jil Jilim
jim Jimi (Cameroon)
jio Jiamao
jiq Guanyinqiao Lavrung
jit Jita
jiu <NAME>
jiv Shuar
jiy Buyuan Jinuo
jje Jejueo
jjr Bankal
jka Kaera
jkm Mobwa Karen
jko Kubo
jkp Paku Karen
jkr Koro (India)
jku Labir
jle Ngile
jls Jamaican Sign Language
jma Dima
jmb Zumbun
jmc Machame
jmd Yamdena
jmi Jimi (Nigeria)
jml Jumli
jmn Makuri Naga
jmr Kamara
jms Mashi (Nigeria)
jmw Mouwase
jmx Western Juxtlahuaca Mixtec
jna Jangshung
jnd Jandavra
jng Yangman
jni Janji
jnj Yemsa
jnl Rawat
jns Jaunsari
job Joba
jod Wojenaka
jog Jogi
jor Jorá
jos Jordanian Sign Language
jow Jowulu
jpa Jewish Palestinian Aramaic
jpn Japanese
jpr Judeo-Persian
jqr Jaqaru
jra Jarai
jrb Judeo-Arabic
jrr Jiru
jrt Jorto
jru Japrería
jsl Japanese Sign Language
jua Júma
jub Wannu
juc Jurchen
jud Worodougou
juh Hõne
jui Ngadjuri
juk Wapan
jul Jirel
jum Jumjum
jun Juang
juo Jiba
jup Hupdë
jur Jurúna
jus Jumla Sign Language
jut Jutish
juu Ju
juw Wãpha
juy Juray
jvd Javindo
jvn Caribbean Javanese
jwi Jwira-Pepesa
jya Jiarong
jye Judeo-Yemeni Arabic
jyy Jaya
kaa Kara-Kalpak Karakalpak
kab Kabyle
kac Kachin Jingpho
kad Adara
kae Ketangalan
kaf Katso
kag Kajaman
kah Kara (Central African Republic)
kai Karekare
kaj Jju
kak Kalanguya Kayapa Kallahan
kal Kalaallisut Greenlandic
kam Kamba (Kenya)
kan Kannada
kao Xaasongaxango
kap Bezhta
kaq Capanahua
kas Kashmiri
kat Georgian
kau Kanuri
kav Katukína
kaw Kawi
kax Kao
kay Kamayurá
kaz Kazakh
kba Kalarko
kbb Kaxuiâna
kbc Kadiwéu
kbd Kabardian
kbe Kanju
kbg Khamba
kbh Camsá
kbi Kaptiau
kbj Kari
kbk Grass Koiari
kbl Kanembu
kbm Iwal
kbn Kare (Central African Republic)
kbo Keliko
kbp Kabiyè
kbq Kamano
kbr Kafa
kbs Kande
kbt Abadi
kbu Kabutra
kbv Dera (Indonesia)
kbw Kaiep
kbx Ap Ma
kby Manga Kanuri
kbz Duhwa
kca Khanty
kcb Kawacha
kcc Lubila
kcd Ngkâlmpw Kanum
kce Kaivi
kcf Ukaan
kcg Tyap
kch Vono
kci Kamantan
kcj Kobiana
kck Kalanga
kcl Kela (Papua New Guinea) Kala
kcm Gula (Central African Republic)
kcn Nubi
kco Kinalakna
kcp Kanga
kcq Kamo
kcr Katla
kcs Koenoem
kct Kaian
kcu Kami (Tanzania)
kcv Kete
kcw Kabwari
kcx Kachama-Ganjule
kcy Korandje
kcz Konongo
kda Worimi
kdc Kutu
kdd Yankunytjatjara
kde Makonde
kdf Mamusi
kdg Seba
kdh Tem
kdi Kumam
kdj Karamojong
kdk Numèè Kwényi
kdl Tsikimba
kdm Kagoma
kdn Kunda
kdp Kaningdon-Nindem
kdq Koch
kdr Karaim
kdt Kuy
kdu Kadaru
kdw Koneraw
kdx Kam
kdy Keder Keijar
kdz Kwaja
kea Kabuverdianu
keb Kélé
kec Keiga
ked Kerewe
kee Eastern Keres
kef Kpessi
keg Tese
keh Keak
kei Kei
kej Kadar
kek Kekchí
kel Kela (Democratic Republic of Congo)
kem Kemak
ken Kenyang
keo Kakwa
kep Kaikadi
keq Kamar
ker Kera
kes Kugbo
ket Ket
keu Akebu
kev Kanikkaran
kew West Kewa
kex Kukna
key Kupia
kez Kukele
kfa Kodava
kfb Northwestern Kolami
kfc Konda-Dora
kfd Korra Koraga
kfe Kota (India)
kff Koya
kfg Kudiya
kfh Kurichiya
kfi Kannada Kurumba
kfj Kemiehua
kfk Kinnauri
kfl Kung
kfm Khunsari
kfn Kuk
kfo Koro (Côte d'Ivoire)
kfp Korwa
kfq Korku
kfr Kachhi Kutchi
kfs Bilaspuri
kft Kanjari
kfu Katkari
kfv Kurmukar
kfw Kharam Naga
kfx Kullu Pahari
kfy Kumaoni
kfz Koromfé
kga Koyaga
kgb Kawe
kge Komering
kgf Kube
kgg Kusunda
kgi Selangor Sign Language
kgj Gamale Kham
kgk Kaiwá
kgl Kunggari
kgm Karipúna
kgn Karingani
kgo Krongo
kgp Kaingang
kgq Kamoro
kgr Abun
kgs Kumbainggar
kgt Somyev
kgu Kobol
kgv Karas
kgw <NAME>
kgx Kamaru
kgy Kyerung
kha Khasi
khb Lü
khc Tukang Besi North
khd Bädi Kanum
khe Korowai
khf Khuen
khg Khams Tibetan
khh Kehu
khj Kuturmi
khk Halh Mongolian
khl Lusi
khm Khmer Central Khmer
khn Khandesi
kho Khotanese Sakan
khp Kapori Kapauri
khq Koyra Chiini Songhay
khr Kharia
khs Kasua
kht Khamti
khu Nkhumbi
khv Khvarshi
khw Khowar
khx Kanu
khy Kele (Democratic Republic of Congo)
khz Keapara
kia Kim
kib Koalib
kic Kickapoo
kid Koshin
kie Kibet
kif Eastern Parbate Kham
kig Kimaama Kimaghima
kih Kilmeri
kii Kitsai
kij Kilivila
kik Kikuyu Gikuyu
kil Kariya
kim Karagas
kin Kinyarwanda
kio Kiowa
kip Sheshi Kham
kiq Kosadle Kosare
kir Kirghiz Kyrgyz
kis Kis
kit Agob
kiu Kirmanjki (individual language)
kiv Kimbu
kiw Northeast Kiwai
kix Khiamniungan Naga
kiy Kirikiri
kiz Kisi
kja Mlap
kjb Q'anjob'al Kanjobal
kjc Coastal Konjo
kjd Southern Kiwai
kje Kisar
kjg Khmu
kjh Khakas
kji Zabana
kjj Khinalugh
kjk Highland Konjo
kjl Western Parbate Kham
kjm Kháng
kjn Kunjen
kjo <NAME>
kjp Pwo Eastern Karen
kjq Western Keres
kjr Kurudu
kjs East Kewa
kjt Phrae Pwo Karen
kju Kashaya
kjv Kaikavian Literary Language
kjx Ramopa
kjy Erave
kjz Bumthangkha
kka Kakanda
kkb Kwerisa
kkc Odoodee
kkd Kinuku
kke Kakabe
kkf Kalaktang Monpa
kkg Mabaka Valley Kalinga
kkh Khün
kki Kagulu
kkj Kako
kkk Kokota
kkl Kosarek Yale
kkm Kiong
kkn Kon Keu
kko Karko
kkp Gugubera Koko-Bera
kkq Kaeku
kkr Kir-Balar
kks Giiwo
kkt Koi
kku Tumi
kkv Kangean
kkw Teke-Kukuya
kkx Kohin
kky Guugu Yimidhirr Guguyimidjir
kkz Kaska
kla Klamath-Modoc
klb Kiliwa
klc Kolbila
kld Gamilaraay
kle Kulung (Nepal)
klf Kendeje
klg Tagakaulo
klh Weliki
kli Kalumpang
klj Khalaj
klk Kono (Nigeria)
kll Kagan Kalagan
klm Migum
kln Kalenjin
klo Kapya
klp Kamasa
klq Rumu
klr Khaling
kls Kalasha
klt Nukna
klu Klao
klv Maskelynes
klw Tado Lindu
klx Koluwawa
kly Kalao
klz Kabola
kma Konni
kmb Kimbundu
kmc Southern Dong
kmd Majukayang Kalinga
kme Bakole
kmf Kare (Papua New Guinea)
kmg Kâte
kmh Kalam
kmi Kami (Nigeria)
kmj Kumarbhag Paharia
kmk Limos Kalinga
kml Tanudan Kalinga
kmm Kom (India)
kmn Awtuw
kmo Kwoma
kmp Gimme
kmq Kwama
kmr Northern Kurdish
kms Kamasau
kmt Kemtuik
kmu Kanite
kmv Karipúna Creole French
kmw Komo (Democratic Republic of Congo)
kmx Waboda
kmy Koma
kmz Khorasani Turkish
kna Dera (Nigeria)
knb Lubuagan Kalinga
knc Central Kanuri
knd Konda
kne Kankanaey
knf Mankanya
kng Koongo
kni Kanufi
knj Western Kanjobal
knk Kuranko
knl Keninjal
knm Kanamarí
knn Konkani (individual language)
kno Kono (Sierra Leone)
knp Kwanja
knq Kintaq
knr Kaningra
kns Kensiu
knt Panoan Katukína
knu Kono (Guinea)
knv Tabo
knw Kung-Ekoka
knx Kendayan Salako
kny Kanyok
knz Kalamsé
koa Konomala
koc Kpati
kod Kodi
koe Kacipo-Balesi
kof Kubi
kog Cogui Kogi
koh Koyo
koi Komi-Permyak
kok Konkani (macrolanguage)
kol Kol (Papua New Guinea)
kom Komi
kon Kongo
koo Konzo
kop Waube
koq Kota (Gabon)
kor Korean
kos Kosraean
kot Lagwan
kou Koke
kov Kudu-Camo
kow Kugama
koy Koyukon
koz Korak
kpa Kutto
kpb Mullu Kurumba
kpc Curripaco
kpd Koba
kpe Kpelle
kpf Komba
kpg Kapingamarangi
kph Kplang
kpi Kofei
kpj Karajá
kpk Kpan
kpl Kpala
kpm Koho
kpn Kepkiriwát
kpo Ikposo
kpq Korupun-Sela
kpr Korafe-Yegha
kps Tehit
kpt Karata
kpu Kafoa
kpv Komi-Zyrian
kpw Kobon
kpx Mountain Koiali
kpy Koryak
kpz Kupsabiny
kqa Mum
kqb Kovai
kqc Doromu-Koki
kqd Koy Sanjaq Surat
kqe Kalagan
kqf Kakabai
kqg Khe
kqh Kisankasa
kqi Koitabu
kqj Koromira
kqk Kotafon Gbe
kql Kyenele
kqm Khisa
kqn Kaonde
kqo Eastern Krahn
kqp Kimré
kqq Krenak
kqr Kimaragang
kqs Northern Kissi
kqt Klias River Kadazan
kqu Seroa
kqv Okolod
kqw Kandas
kqx Mser
kqy Koorete
kqz Korana
kra Kumhali
krb Karkin
krc Karachay-Balkar
krd Kairui-Midiki
kre Panará
krf Koro (Vanuatu)
krh Kurama
kri Krio
krj Kinaray-A
krk Kerek
krl Karelian
krn Sapo
krp Korop
krr Krung
krs Gbaya (Sudan)
krt Tumari Kanuri
kru Kurukh
krv Kavet
krw Western Krahn
krx Karon
kry Kryts
krz Sota Kanum
ksa Shuwa-Zamani
ksb Shambala
ksc Southern Kalinga
ksd Kuanua
kse Kuni
ksf Bafia
ksg Kusaghe
ksh Kölsch
ksi Krisa I'saka
ksj Uare
ksk Kansa
ksl Kumalu
ksm Kumba
ksn Kasiguranin
kso Kofa
ksp Kaba
ksq Kwaami
ksr Borong
kss Southern Kisi
kst Winyé
ksu Khamyang
ksv Kusu
ksw S'gaw Karen
ksx Kedang
ksy Kharia Thar
ksz Kodaku
kta Katua
ktb Kambaata
ktc Kholok
ktd Kokata Kukatha
kte Nubri
ktf Kwami
ktg Kalkutung
kth Karanga
kti North Muyu
ktj Plapo Krumen
ktk Kaniet
ktl Koroshi
ktm Kurti
ktn Karitiâna
kto Kuot
ktp Kaduo
ktq Katabaga
kts South Muyu
ktt Ketum
ktu Kituba (Democratic Republic of Congo)
ktv Eastern Katu
ktw Kato
ktx Kaxararí
kty Kango (Bas-Uélé District)
ktz Juǀʼhoan Juǀʼhoansi
kua Kuanyama Kwanyama
kub Kutep
kuc Kwinsu
kud 'Auhelawa
kue Kuman (Papua New Guinea)
kuf Western Katu
kug Kupa
kuh Kushi
kui Kuikúro-Kalapálo Kalapalo
kuj Kuria
kuk Kepo'
kul Kulere
kum Kumyk
kun Kunama
kuo Kumukio
kup Kunimaipa
kuq Karipuna
kur Kurdish
kus Kusaal
kut Kutenai
kuu Upper Kuskokwim
kuv Kur
kuw Kpagua
kux Kukatja
kuy Kuuku-Ya'u
kuz Kunza
kva Bagvalal
kvb Kubu
kvc Kove
kvd Kui (Indonesia)
kve Kalabakan
kvf Kabalai
kvg Kuni-Boazi
kvh Komodo
kvi Kwang
kvj Psikye
kvk Korean Sign Language
kvl Kayaw
kvm Kendem
kvn Border Kuna
kvo Dobel
kvp Kompane
kvq Geba Karen
kvr Kerinci
kvt Lahta Karen Lahta
kvu Yinbaw Karen
kvv Kola
kvw Wersing
kvx Parkari Koli
kvy Yintale Karen Yintale
kvz Tsakwambo Tsaukambo
kwa Dâw
kwb Kwa
kwc Likwala
kwd Kwaio
kwe Kwerba
kwf Kwara'ae
kwg Sara Kaba Deme
kwh Kowiai
kwi Awa-Cuaiquer
kwj Kwanga
kwk Kwakiutl
kwl Kofyar
kwm Kwambi
kwn Kwangali
kwo Kwomtari
kwp Kodia
kwr Kwer
kws Kwese
kwt Kwesten
kwu Kwakum
kwv Sara Kaba Náà
kww Kwinti
kwx Khirwar
kwy San Salvador Kongo
kwz Kwadi
kxa Kairiru
kxb Krobu
kxc Konso Khonso
kxd Brunei
kxf Manumanaw Karen Manumanaw
kxh Karo (Ethiopia)
kxi Keningau Murut
kxj Kulfa
kxk Zayein Karen
kxm Northern Khmer
kxn Kanowit-Tanjong Melanau
kxo Kanoé
kxp Wadiyara | |
#引入套件
import tkinter as tk
import pandas as pd
#建立主視窗和框架
window = tk.Tk() #創建主視窗
top_frame = tk.Frame(window) #創建Frame
window.title("SLOT RTP計算機") #顯示標題
window.geometry('440x1000') #設定視窗大小
def dataimport(): #設定函數 : 取得輸入的名稱
result1_str.set("{}".format(ICON1_entry0.get()))
result2_str.set("{}".format(ICON2_entry0.get()))
result3_str.set("{}".format(ICON3_entry0.get()))
result4_str.set("{}".format(ICON4_entry0.get()))
result5_str.set("{}".format(ICON5_entry0.get()))
result6_str.set("{}".format(ICON6_entry0.get()))
result7_str.set("{}".format(ICON7_entry0.get()))
SCAT_str.set("{}".format(SCAT_entry0.get()))
WILD_str.set("{}".format(WILD_entry0.get()))
#1 #取得輸入的數字
ICON1_entry1_num = int(ICON1_entry1.get())
ICON1_entry2_num = int(ICON1_entry2.get())
ICON1_entry3_num = int(ICON1_entry3.get())
ICON1_entry4_num = int(ICON1_entry4.get())
ICON1_entry5_num = int(ICON1_entry5.get())
#2
ICON2_entry1_num = int(ICON2_entry1.get())
ICON2_entry2_num = int(ICON2_entry2.get())
ICON2_entry3_num = int(ICON2_entry3.get())
ICON2_entry4_num = int(ICON2_entry4.get())
ICON2_entry5_num = int(ICON2_entry5.get())
#3
ICON3_entry1_num = int(ICON3_entry1.get())
ICON3_entry2_num = int(ICON3_entry2.get())
ICON3_entry3_num = int(ICON3_entry3.get())
ICON3_entry4_num = int(ICON3_entry4.get())
ICON3_entry5_num = int(ICON3_entry5.get())
#4
ICON4_entry1_num = int(ICON4_entry1.get())
ICON4_entry2_num = int(ICON4_entry2.get())
ICON4_entry3_num = int(ICON4_entry3.get())
ICON4_entry4_num = int(ICON4_entry4.get())
ICON4_entry5_num = int(ICON4_entry5.get())
#5
ICON5_entry1_num = int(ICON5_entry1.get())
ICON5_entry2_num = int(ICON5_entry2.get())
ICON5_entry3_num = int(ICON5_entry3.get())
ICON5_entry4_num = int(ICON5_entry4.get())
ICON5_entry5_num = int(ICON5_entry5.get())
#6
ICON6_entry1_num = int(ICON6_entry1.get())
ICON6_entry2_num = int(ICON6_entry2.get())
ICON6_entry3_num = int(ICON6_entry3.get())
ICON6_entry4_num = int(ICON6_entry4.get())
ICON6_entry5_num = int(ICON6_entry5.get())
#7
ICON7_entry1_num = int(ICON7_entry1.get())
ICON7_entry2_num = int(ICON7_entry2.get())
ICON7_entry3_num = int(ICON7_entry3.get())
ICON7_entry4_num = int(ICON7_entry4.get())
ICON7_entry5_num = int(ICON7_entry5.get())
#SCAT
SCAT_entry1_num = int(SCAT_entry1.get())
SCAT_entry2_num = int(SCAT_entry2.get())
SCAT_entry3_num = int(SCAT_entry3.get())
SCAT_entry4_num = int(SCAT_entry4.get())
SCAT_entry5_num = int(SCAT_entry5.get())
#WILD
WILD_entry1_num = int(WILD_entry1.get())
WILD_entry2_num = int(WILD_entry2.get())
WILD_entry3_num = int(WILD_entry3.get())
WILD_entry4_num = int(WILD_entry4.get())
WILD_entry5_num = int(WILD_entry5.get())
#計算各輪總個數
#第一輪
wheel1=[]
wheel1.append(ICON1_entry1_num)
wheel1.append(ICON2_entry1_num)
wheel1.append(ICON3_entry1_num)
wheel1.append(ICON4_entry1_num)
wheel1.append(ICON5_entry1_num)
wheel1.append(ICON6_entry1_num)
wheel1.append(ICON7_entry1_num)
wheel1.append(SCAT_entry1_num)
wheel1.append(WILD_entry1_num)
wheel1_sum = sum(wheel1)
#第二輪
wheel2=[]
wheel2.append(ICON1_entry2_num)
wheel2.append(ICON2_entry2_num)
wheel2.append(ICON3_entry2_num)
wheel2.append(ICON4_entry2_num)
wheel2.append(ICON5_entry2_num)
wheel2.append(ICON6_entry2_num)
wheel2.append(ICON7_entry2_num)
wheel2.append(SCAT_entry2_num)
wheel2.append(WILD_entry2_num)
wheel2_sum = sum(wheel2)
#第三輪
wheel3=[]
wheel3.append(ICON1_entry3_num)
wheel3.append(ICON2_entry3_num)
wheel3.append(ICON3_entry3_num)
wheel3.append(ICON4_entry3_num)
wheel3.append(ICON5_entry3_num)
wheel3.append(ICON6_entry3_num)
wheel3.append(ICON7_entry3_num)
wheel3.append(SCAT_entry3_num)
wheel3.append(WILD_entry3_num)
wheel3_sum = sum(wheel3)
#第四輪
wheel4=[]
wheel4.append(ICON1_entry4_num)
wheel4.append(ICON2_entry4_num)
wheel4.append(ICON3_entry4_num)
wheel4.append(ICON4_entry4_num)
wheel4.append(ICON5_entry4_num)
wheel4.append(ICON6_entry4_num)
wheel4.append(ICON7_entry4_num)
wheel4.append(SCAT_entry4_num)
wheel4.append(WILD_entry4_num)
wheel4_sum = sum(wheel4)
#第五輪
wheel5=[]
wheel5.append(ICON1_entry5_num)
wheel5.append(ICON2_entry5_num)
wheel5.append(ICON3_entry5_num)
wheel5.append(ICON4_entry5_num)
wheel5.append(ICON5_entry5_num)
wheel5.append(ICON6_entry5_num)
wheel5.append(ICON7_entry5_num)
wheel5.append(SCAT_entry5_num)
wheel5.append(WILD_entry5_num)
wheel5_sum = sum(wheel5)
#DataFrame
wheel_table = pd.DataFrame({
"first": wheel1, "second":wheel2, "third":wheel3, "fourth":wheel4, "fivth":wheel5
}, index = [ICON1_entry0.get(),
ICON2_entry0.get(),
ICON3_entry0.get(),
ICON4_entry0.get(),
ICON5_entry0.get(),
ICON6_entry0.get(),
ICON7_entry0.get(),
SCAT_entry0.get(),
WILD_entry0.get() ])
wheelist1 = [ICON1_entry1_num + WILD_entry1_num , ICON2_entry1_num + WILD_entry1_num , ICON3_entry1_num + WILD_entry1_num ,ICON4_entry1_num + WILD_entry1_num,
ICON5_entry1_num + WILD_entry1_num , ICON6_entry1_num + WILD_entry1_num , ICON7_entry1_num + WILD_entry1_num]
wheelist2 = [ICON1_entry2_num + WILD_entry2_num , ICON2_entry2_num + WILD_entry2_num , ICON3_entry2_num + WILD_entry2_num ,ICON4_entry2_num + WILD_entry2_num,
ICON5_entry2_num + WILD_entry2_num , ICON6_entry2_num + WILD_entry2_num , ICON7_entry2_num + WILD_entry2_num]
wheelist3 = [ICON1_entry3_num + WILD_entry3_num , ICON2_entry3_num + WILD_entry3_num , ICON3_entry3_num + WILD_entry3_num ,ICON4_entry3_num + WILD_entry3_num,
ICON5_entry3_num + WILD_entry3_num , ICON6_entry3_num + WILD_entry3_num , ICON7_entry3_num + WILD_entry3_num]
wheelist4 = [ICON1_entry4_num + WILD_entry4_num , ICON2_entry4_num + WILD_entry4_num , ICON3_entry4_num + WILD_entry4_num ,ICON4_entry4_num + WILD_entry4_num,
ICON5_entry4_num + WILD_entry4_num , ICON6_entry4_num + WILD_entry4_num , ICON7_entry4_num + WILD_entry4_num]
wheelist5 = [ICON1_entry5_num + WILD_entry5_num , ICON2_entry5_num + WILD_entry5_num , ICON3_entry5_num + WILD_entry5_num ,ICON4_entry5_num + WILD_entry5_num,
ICON5_entry5_num + WILD_entry5_num , ICON6_entry5_num + WILD_entry5_num , ICON7_entry5_num + WILD_entry5_num]
#WILD+各圖案數量
wildsum = pd.DataFrame({
"轉輪1":wheelist1,
"轉輪2":wheelist2,
"轉輪3":wheelist3,
"轉輪4":wheelist4,
"轉輪5":wheelist5
}, index = [ICON1_entry0.get(),
ICON2_entry0.get(),
ICON3_entry0.get(),
ICON4_entry0.get(),
ICON5_entry0.get(),
ICON6_entry0.get(),
ICON7_entry0.get()
])
notlist1 = [ wheel1_sum-wheelist1[0], wheel1_sum-wheelist1[1] , wheel1_sum-wheelist1[2], wheel1_sum-wheelist1[3] , wheel1_sum-wheelist1[4] , wheel1_sum-wheelist1[5], wheel1_sum-wheelist1[6]]
notlist2 = [ wheel2_sum-wheelist2[0], wheel2_sum-wheelist2[1] , wheel2_sum-wheelist2[2], wheel2_sum-wheelist2[3] , wheel2_sum-wheelist2[4] , wheel2_sum-wheelist2[5], wheel2_sum-wheelist2[6]]
notlist3 = [ wheel3_sum-wheelist3[0], wheel3_sum-wheelist3[1] , wheel3_sum-wheelist3[2], wheel3_sum-wheelist3[3] , wheel3_sum-wheelist3[4] , wheel3_sum-wheelist3[5], wheel3_sum-wheelist3[6]]
notlist4 = [ wheel4_sum-wheelist4[0], wheel4_sum-wheelist4[1] , wheel4_sum-wheelist4[2], wheel4_sum-wheelist4[3] , wheel4_sum-wheelist4[4] , wheel4_sum-wheelist4[5], wheel4_sum-wheelist4[6]]
notlist5 = [ wheel5_sum-wheelist5[0], wheel5_sum-wheelist5[1] , wheel5_sum-wheelist5[2], wheel5_sum-wheelist5[3] , wheel5_sum-wheelist5[4] , wheel5_sum-wheelist5[5], wheel5_sum-wheelist5[6]]
#非WILD也非各圖案
notwild = pd.DataFrame({
"非WILD數輪1":notlist1,
"非WILD數輪2":notlist2,
"非WILD數輪3":notlist3,
"非WILD數輪4":notlist4,
"非WILD數輪5":notlist5
}, index = [ICON1_entry0.get(),
ICON2_entry0.get(),
ICON3_entry0.get(),
ICON4_entry0.get(),
ICON5_entry0.get(),
ICON6_entry0.get(),
ICON7_entry0.get()
])
print(wheel_table)
print(wildsum)
print(notwild)
def datacalcul():
#1
ICON1_bet1_num = int(ICON1_bet1.get())
ICON1_bet2_num = int(ICON1_bet2.get())
ICON1_bet3_num = int(ICON1_bet3.get())
ICON1_bet4_num = int(ICON1_bet4.get())
ICON1_bet5_num = int(ICON1_bet5.get())
#2
ICON2_bet1_num = int(ICON2_bet1.get())
ICON2_bet2_num = int(ICON2_bet2.get())
ICON2_bet3_num = int(ICON2_bet3.get())
ICON2_bet4_num = int(ICON2_bet4.get())
ICON2_bet5_num = int(ICON2_bet5.get())
#3
ICON3_bet1_num = int(ICON3_bet1.get())
ICON3_bet2_num = int(ICON3_bet2.get())
ICON3_bet3_num = int(ICON3_bet3.get())
ICON3_bet4_num = int(ICON3_bet4.get())
ICON3_bet5_num = int(ICON3_bet5.get())
#4
ICON4_bet1_num = int(ICON4_bet1.get())
ICON4_bet2_num = int(ICON4_bet2.get())
ICON4_bet3_num = int(ICON4_bet3.get())
ICON4_bet4_num = int(ICON4_bet4.get())
ICON4_bet5_num = int(ICON4_bet5.get())
#5
ICON5_bet1_num = int(ICON5_bet1.get())
ICON5_bet2_num = int(ICON5_bet2.get())
ICON5_bet3_num = int(ICON5_bet3.get())
ICON5_bet4_num = int(ICON5_bet4.get())
ICON5_bet5_num = int(ICON5_bet5.get())
#6
ICON6_bet1_num = int(ICON6_bet1.get())
ICON6_bet2_num = int(ICON6_bet2.get())
ICON6_bet3_num = int(ICON6_bet3.get())
ICON6_bet4_num = int(ICON6_bet4.get())
ICON6_bet5_num = int(ICON6_bet5.get())
#7
ICON7_bet1_num = int(ICON7_bet1.get())
ICON7_bet2_num = int(ICON7_bet2.get())
ICON7_bet3_num = int(ICON7_bet3.get())
ICON7_bet4_num = int(ICON7_bet4.get())
ICON7_bet5_num = int(ICON7_bet5.get())
#SCAT
SCAT_bet1_num = int(SCAT_bet1.get())
SCAT_bet2_num = int(SCAT_bet2.get())
SCAT_bet3_num = int(SCAT_bet3.get())
SCAT_bet4_num = int(SCAT_bet4.get())
SCAT_bet5_num = int(SCAT_bet5.get())
#WILD
WILD_bet1_num = int(WILD_bet1.get())
WILD_bet2_num = int(WILD_bet2.get())
WILD_bet3_num = int(WILD_bet3.get())
WILD_bet4_num = int(WILD_bet4.get())
WILD_bet5_num = int(WILD_bet5.get())
#列出組合表
#圖案種類
table_icon = []
for i in range(3):
table_icon.append(ICON1_entry0.get())
for i in range(3):
table_icon.append(ICON2_entry0.get())
for i in range(3):
table_icon.append(ICON3_entry0.get())
for i in range(3):
table_icon.append(ICON4_entry0.get())
for i in range(3):
table_icon.append(ICON5_entry0.get())
for i in range(3):
table_icon.append(ICON6_entry0.get())
for i in range(3):
table_icon.append(ICON7_entry0.get())
table_icon.append(SCAT_entry0.get())
#連線數
link = [5,4,3]*7+[3]
#計算出現次數
appear = []
#DataFrame
wheel_betable = pd.DataFrame({
"圖案名稱":table_icon,
"連線數":link,
#"出現次數":appear
}, index = [ "ICON1","","","ICON2","","","ICON3","","",
"ICON4","","","ICON5","","","ICON6","","",
"ICON7","","","SCAT"])
#建立輸入提示訊息
tk.Label(window, text = "請輸入各ICON的名稱\n並請輸入各ICON在各轉輪的個數, 預設為0",
font = ("Arial",10), width = 40 , height = 5).place(x=60,y = 0)
#視窗內訊息
tk.Label(window, text = "圖案名稱").place(x=70,y=70)
tk.Label(window, text = "轉輪1").place(x=150,y=70)
tk.Label(window, text = "轉輪2").place(x=200,y=70)
tk.Label(window, text = "轉輪3").place(x=250,y=70)
tk.Label(window, text = "轉輪4").place(x=300,y=70)
tk.Label(window, text = "轉輪5").place(x=350,y=70)
#輸入欄位
#ICON1欄位
tk.Label(window,text = "ICON1").place(x=10, y=90)
#ICON1_entry0 = tk.StringVar() #令格子可輸入文字
ICON1_entry0 = tk.Entry(window,width = 10)#,textvariable=ICON1_entry0) #設定接收 Icon1_entry0 的值
ICON1_entry0.place(x=60,y=90) #設定座標
ICON1_entry1 = tk.Entry(window,width = 5)
ICON1_entry1.place(x=150,y=90)
ICON1_entry1.insert(0,"0") #插入預設數字0
ICON1_entry2 = tk.Entry(window,width = 5)
ICON1_entry2.place(x=200,y=90)
ICON1_entry2.insert(1,"0")
ICON1_entry3 = tk.Entry(window,width = 5)
ICON1_entry3.place(x=250,y=90)
ICON1_entry3.insert(2,"0")
ICON1_entry4 = tk.Entry(window,width = 5)
ICON1_entry4.place(x=300,y=90)
ICON1_entry4.insert(3,"0")
ICON1_entry5 = tk.Entry(window,width = 5)
ICON1_entry5.place(x=350,y=90)
ICON1_entry5.insert(4,"0")
#ICON2欄位
tk.Label(window,text = "ICON2").place(x=10, y=115)
ICON2_entry0 = tk.Entry(window,width = 10)
ICON2_entry0.place(x=60,y=115)
ICON2_entry1 = tk.Entry(window,width = 5)
ICON2_entry1.place(x=150,y=115)
ICON2_entry1.insert(0,"0")
ICON2_entry2 = tk.Entry(window,width = 5)
ICON2_entry2.place(x=200,y=115)
ICON2_entry2.insert(1,"0")
ICON2_entry3 = tk.Entry(window,width = 5)
ICON2_entry3.place(x=250,y=115)
ICON2_entry3.insert(2,"0")
ICON2_entry4 = tk.Entry(window,width = 5)
ICON2_entry4.place(x=300,y=115)
ICON2_entry4.insert(3,"0")
ICON2_entry5 = tk.Entry(window,width = 5)
ICON2_entry5.place(x=350,y=115)
ICON2_entry5.insert(4,"0")
#ICON3欄位
tk.Label(window,text = "ICON3").place(x=10, y=140)
ICON3_entry0 = tk.Entry(window,width = 10)
ICON3_entry0.place(x=60,y=140)
ICON3_entry1 = tk.Entry(window,width = 5)
ICON3_entry1.place(x=150,y=140)
ICON3_entry1.insert(0,"0")
ICON3_entry2 = tk.Entry(window,width = 5)
ICON3_entry2.place(x=200,y=140)
ICON3_entry2.insert(1,"0")
ICON3_entry3 = tk.Entry(window,width = 5)
ICON3_entry3.place(x=250,y=140)
ICON3_entry3.insert(2,"0")
ICON3_entry4 = tk.Entry(window,width = 5)
ICON3_entry4.place(x=300,y=140)
ICON3_entry4.insert(3,"0")
ICON3_entry5 = tk.Entry(window,width = 5)
ICON3_entry5.place(x=350,y=140)
ICON3_entry5.insert(4,"0")
#ICON4欄位
tk.Label(window,text = "ICON4").place(x=10, y=165)
ICON4_entry0 = tk.Entry(window,width = 10)
ICON4_entry0.place(x=60,y=165)
ICON4_entry1 = tk.Entry(window,width = 5)
ICON4_entry1.place(x=150,y=165)
ICON4_entry1.insert(0,"0")
ICON4_entry2 = tk.Entry(window,width = 5)
ICON4_entry2.place(x=200,y=165)
ICON4_entry2.insert(1,"0")
ICON4_entry3 = tk.Entry(window,width = 5)
ICON4_entry3.place(x=250,y=165)
ICON4_entry3.insert(2,"0")
ICON4_entry4 = tk.Entry(window,width = 5)
ICON4_entry4.place(x=300,y=165)
ICON4_entry4.insert(3,"0")
ICON4_entry5 = tk.Entry(window,width = 5)
ICON4_entry5.place(x=350,y=165)
ICON4_entry5.insert(4,"0")
#ICON5欄位
tk.Label(window,text = "ICON5").place(x=10, y=190)
ICON5_entry0 = tk.Entry(window,width = 10)
ICON5_entry0.place(x=60,y=190)
ICON5_entry1 = tk.Entry(window,width = 5)
ICON5_entry1.place(x=150,y=190)
ICON5_entry1.insert(0,"0")
ICON5_entry2 = tk.Entry(window,width = 5)
ICON5_entry2.place(x=200,y=190)
ICON5_entry2.insert(1,"0")
ICON5_entry3 = tk.Entry(window,width = 5)
ICON5_entry3.place(x=250,y=190)
ICON5_entry3.insert(2,"0")
ICON5_entry4 = tk.Entry(window,width = 5)
ICON5_entry4.place(x=300,y=190)
ICON5_entry4.insert(3,"0")
ICON5_entry5 = tk.Entry(window,width = 5)
ICON5_entry5.place(x=350,y=190)
ICON5_entry5.insert(4,"0")
#ICON6欄位
tk.Label(window,text = "ICON6").place(x=10, y=215)
ICON6_entry0 = tk.Entry(window,width = 10)
ICON6_entry0.place(x=60,y=215)
ICON6_entry1 = tk.Entry(window,width = 5)
ICON6_entry1.place(x=150,y=215)
ICON6_entry1.insert(0,"0")
ICON6_entry2 = tk.Entry(window,width = 5)
ICON6_entry2.place(x=200,y=215)
ICON6_entry2.insert(1,"0")
ICON6_entry3 = tk.Entry(window,width = 5)
ICON6_entry3.place(x=250,y=215)
ICON6_entry3.insert(2,"0")
ICON6_entry4 = tk.Entry(window,width = 5)
ICON6_entry4.place(x=300,y=215)
ICON6_entry4.insert(3,"0")
ICON6_entry5 = tk.Entry(window,width = 5)
ICON6_entry5.place(x=350,y=215)
ICON6_entry5.insert(4,"0")
#ICON7欄位
tk.Label(window,text = "ICON7").place(x=10, y=240)
ICON7_entry0 = tk.Entry(window,width = 10)
ICON7_entry0.place(x=60,y=240)
ICON7_entry1 = tk.Entry(window,width = 5)
ICON7_entry1.place(x=150,y=240)
ICON7_entry1.insert(0,"0")
ICON7_entry2 = tk.Entry(window,width = 5)
ICON7_entry2.place(x=200,y=240)
ICON7_entry2.insert(1,"0")
ICON7_entry3 = tk.Entry(window,width = 5)
ICON7_entry3.place(x=250,y=240)
ICON7_entry3.insert(2,"0")
ICON7_entry4 = tk.Entry(window,width = 5)
ICON7_entry4.place(x=300,y=240)
ICON7_entry4.insert(3,"0")
ICON7_entry5 = tk.Entry(window,width = 5)
ICON7_entry5.place(x=350,y=240)
ICON7_entry5.insert(4,"0")
#SCAT欄位
tk.Label(window,text = "SCAT").place(x=10, y=265)
SCAT_entry0 = tk.Entry(window,width = 10)
SCAT_entry0.place(x=60,y=265)
SCAT_entry1 = tk.Entry(window,width = 5)
SCAT_entry1.place(x=150,y=265)
SCAT_entry1.insert(0,"0")
SCAT_entry2 = tk.Entry(window,width = 5)
SCAT_entry2.place(x=200,y=265)
SCAT_entry2.insert(1,"0")
SCAT_entry3 = tk.Entry(window,width = 5)
SCAT_entry3.place(x=250,y=265)
SCAT_entry3.insert(2,"0")
SCAT_entry4 = tk.Entry(window,width = 5)
SCAT_entry4.place(x=300,y=265)
SCAT_entry4.insert(3,"0")
SCAT_entry5 = tk.Entry(window,width = 5)
SCAT_entry5.place(x=350,y=265)
SCAT_entry5.insert(4,"0")
#WILD欄位
tk.Label(window,text = "WILD").place(x=10, y=290)
WILD_entry0 = tk.Entry(window,width = 10)
WILD_entry0.place(x=60,y=290)
WILD_entry1 = tk.Entry(window,width = 5)
WILD_entry1.place(x=150,y=290)
WILD_entry1.insert(0,"0")
WILD_entry2 = tk.Entry(window,width = 5)
WILD_entry2.place(x=200,y=290)
WILD_entry2.insert(1,"0")
WILD_entry3 = tk.Entry(window,width = 5)
WILD_entry3.place(x=250,y=290)
WILD_entry3.insert(2,"0")
WILD_entry4 = tk.Entry(window,width = 5)
WILD_entry4.place(x=300,y=290)
WILD_entry4.insert(3,"0")
WILD_entry5 = tk.Entry(window,width = 5)
WILD_entry5.place(x=350,y=290)
WILD_entry5.insert(4,"0")
#輸入資料的按鈕
button1 = tk.Button(window, text = "輸入資料",command = dataimport) #創建按鈕,設定按鈕名稱,設定按鈕觸發函數
button1.place(x=190,y=320)
#分隔線 #設置分隔線 版面美觀
tk.Label(window, text = "-"*35+"我是分隔線"+"-"*35).place(x=10,y=350)
#建立輸入提示訊息 (押注額)
tk.Label(window, text = "請輸入各ICON的賠率, 預設為0",
font = ("Arial",10), width = 30 , height = 2).place(x=95,y = 370)
#賠率訊息
tk.Label(window, text = "圖案名稱").place(x=70,y=410)
tk.Label(window, text = "1連線").place(x=150,y=410)
tk.Label(window, text = "2連線").place(x=200,y=410)
tk.Label(window, text = "3連線").place(x=250,y=410)
tk.Label(window, text = "4連線").place(x=300,y=410)
tk.Label(window, text = "5連線").place(x=350,y=410)
#欄位1
tk.Label(window,text = "ICON1").place(x=10, y=440)
result1_str = tk.StringVar()
result1_label = tk.Label(window, textvariable=result1_str)
result1_label.place(x=70,y=440)
ICON1_bet1 = tk.Entry(window,width = 5)
ICON1_bet1.place(x=150,y=440)
ICON1_bet1.insert(0,"0")
ICON1_bet2 = tk.Entry(window,width = 5)
ICON1_bet2.place(x=200,y=440)
ICON1_bet2.insert(1,"0")
ICON1_bet3 = tk.Entry(window,width = 5)
ICON1_bet3.place(x=250,y=440)
ICON1_bet3.insert(2,"0")
ICON1_bet4 = tk.Entry(window,width = 5)
ICON1_bet4.place(x=300,y=440)
ICON1_bet4.insert(3,"0")
ICON1_bet5 = tk.Entry(window,width = 5)
ICON1_bet5.place(x=350,y=440)
ICON1_bet5.insert(4,"0")
#欄位2
tk.Label(window,text = "ICON2").place(x=10, y=465)
result2_str = tk.StringVar()
result2_label = tk.Label(window, textvariable=result2_str)
result2_label.place(x=70,y=465)
ICON2_bet1 = tk.Entry(window,width = 5)
ICON2_bet1.place(x=150,y=465)
ICON2_bet1.insert(0,"0")
ICON2_bet2 = tk.Entry(window,width = 5)
ICON2_bet2.place(x=200,y=465)
ICON2_bet2.insert(1,"0")
ICON2_bet3 = tk.Entry(window,width = 5)
ICON2_bet3.place(x=250,y=465)
ICON2_bet3.insert(2,"0")
ICON2_bet4 = tk.Entry(window,width = 5)
ICON2_bet4.place(x=300,y=465)
ICON2_bet4.insert(3,"0")
ICON2_bet5 = tk.Entry(window,width = 5)
ICON2_bet5.place(x=350,y=465)
ICON2_bet5.insert(4,"0")
#欄位3
tk.Label(window,text = "ICON3").place(x=10, y=490)
result3_str = tk.StringVar()
result3_label = tk.Label(window, textvariable=result3_str)
result3_label.place(x=70,y=490)
ICON3_bet1 = tk.Entry(window,width = 5)
ICON3_bet1.place(x=150,y=490)
ICON3_bet1.insert(0,"0")
ICON3_bet2 = tk.Entry(window,width = 5)
ICON3_bet2.place(x=200,y=490)
ICON3_bet2.insert(1,"0")
ICON3_bet3 = tk.Entry(window,width = 5)
ICON3_bet3.place(x=250,y=490)
ICON3_bet3.insert(2,"0")
ICON3_bet4 = tk.Entry(window,width = 5)
ICON3_bet4.place(x=300,y=490)
ICON3_bet4.insert(3,"0")
ICON3_bet5 = tk.Entry(window,width = 5)
ICON3_bet5.place(x=350,y=490)
ICON3_bet5.insert(4,"0")
#欄位4
tk.Label(window,text = "ICON4").place(x=10, y=515)
result4_str = tk.StringVar()
result4_label = tk.Label(window, textvariable=result4_str)
result4_label.place(x=70,y=515)
ICON4_bet1 = tk.Entry(window,width = 5)
ICON4_bet1.place(x=150,y=515)
ICON4_bet1.insert(0,"0")
ICON4_bet2 = tk.Entry(window,width = 5)
ICON4_bet2.place(x=200,y=515)
ICON4_bet2.insert(1,"0")
ICON4_bet3 = tk.Entry(window,width = 5)
ICON4_bet3.place(x=250,y=515)
ICON4_bet3.insert(2,"0")
ICON4_bet4 = tk.Entry(window,width = 5)
ICON4_bet4.place(x=300,y=515)
ICON4_bet4.insert(3,"0")
ICON4_bet5 = tk.Entry(window,width = 5)
ICON4_bet5.place(x=350,y=515)
ICON4_bet5.insert(4,"0")
#欄位5
tk.Label(window,text = "ICON5").place(x=10, y=540)
result5_str = tk.StringVar()
result5_label = tk.Label(window, textvariable=result5_str)
result5_label.place(x=70,y=540)
ICON5_bet1 = tk.Entry(window,width = 5)
ICON5_bet1.place(x=150,y=540)
ICON5_bet1.insert(0,"0")
ICON5_bet2 = tk.Entry(window,width = 5)
ICON5_bet2.place(x=200,y=540)
ICON5_bet2.insert(1,"0")
ICON5_bet3 = tk.Entry(window,width = 5)
ICON5_bet3.place(x=250,y=540)
ICON5_bet3.insert(2,"0")
ICON5_bet4 = tk.Entry(window,width = 5)
ICON5_bet4.place(x=300,y=540)
ICON5_bet4.insert(3,"0")
ICON5_bet5 = tk.Entry(window,width = 5)
ICON5_bet5.place(x=350,y=540)
ICON5_bet5.insert(4,"0")
#欄位6
tk.Label(window,text = "ICON6").place(x=10, y=565)
result6_str = tk.StringVar()
result6_label = tk.Label(window, textvariable=result6_str)
result6_label.place(x=70,y=565)
ICON6_bet1 = tk.Entry(window,width = 5)
ICON6_bet1.place(x=150,y=565)
ICON6_bet1.insert(0,"0")
ICON6_bet2 = tk.Entry(window,width = 5)
ICON6_bet2.place(x=200,y=565)
ICON6_bet2.insert(1,"0")
ICON6_bet3 = tk.Entry(window,width = 5)
ICON6_bet3.place(x=250,y=565)
ICON6_bet3.insert(2,"0")
ICON6_bet4 = tk.Entry(window,width = 5)
ICON6_bet4.place(x=300,y=565)
ICON6_bet4.insert(3,"0")
ICON6_bet5 = tk.Entry(window,width = 5)
ICON6_bet5.place(x=350,y=565)
ICON6_bet5.insert(4,"0")
#欄位7
tk.Label(window,text = "ICON7").place(x=10, y=590)
result7_str = tk.StringVar()
result7_label = tk.Label(window, textvariable=result7_str)
result7_label.place(x=70,y=590)
ICON7_bet1 = tk.Entry(window,width = 5)
ICON7_bet1.place(x=150,y=590)
ICON7_bet1.insert(0,"0")
ICON7_bet2 = tk.Entry(window,width = 5)
ICON7_bet2.place(x=200,y=590)
ICON7_bet2.insert(1,"0")
ICON7_bet3 = tk.Entry(window,width = 5)
ICON7_bet3.place(x=250,y=590)
ICON7_bet3.insert(2,"0")
ICON7_bet4 = tk.Entry(window,width = 5)
ICON7_bet4.place(x=300,y=590)
ICON7_bet4.insert(3,"0")
ICON7_bet5 = tk.Entry(window,width = 5)
ICON7_bet5.place(x=350,y=590)
ICON7_bet5.insert(4,"0")
#欄位SCAT
tk.Label(window,text = "SCAT").place(x=10, y=615)
SCAT_str = tk.StringVar()
SCAT_label = tk.Label(window, textvariable=SCAT_str)
SCAT_label.place(x=70,y=615)
SCAT_bet1 = tk.Entry(window,width = 5)
SCAT_bet1.place(x=150,y=615)
SCAT_bet1.insert(0,"0")
SCAT_bet2 = tk.Entry(window,width = 5)
SCAT_bet2.place(x=200,y=615)
SCAT_bet2.insert(1,"0")
SCAT_bet3 = tk.Entry(window,width = 5)
SCAT_bet3.place(x=250,y=615)
SCAT_bet3.insert(2,"0")
SCAT_bet4 = tk.Entry(window,width = 5)
SCAT_bet4.place(x=300,y=615)
SCAT_bet4.insert(3,"0")
SCAT_bet5 = | |
<reponame>dasxran/seleniumMachineLearning
MV_FLAG = 4096 # Multi-value flag
PT_UNSPECIFIED = 0
PT_NULL = 1
PT_I2 = 2
PT_LONG = 3
PT_R4 = 4
PT_DOUBLE = 5
PT_CURRENCY = 6
PT_APPTIME = 7
PT_ERROR = 10
PT_BOOLEAN = 11
PT_OBJECT = 13
PT_I8 = 20
PT_STRING8 = 30
PT_UNICODE = 31
PT_SYSTIME = 64
PT_CLSID = 72
PT_BINARY = 258
PT_SHORT = PT_I2
PT_I4 = PT_LONG
PT_FLOAT = PT_R4
PT_R8 = PT_DOUBLE
PT_LONGLONG = PT_I8
PT_MV_I2 = (MV_FLAG|PT_I2)
PT_MV_LONG = (MV_FLAG|PT_LONG)
PT_MV_R4 = (MV_FLAG|PT_R4)
PT_MV_DOUBLE = (MV_FLAG|PT_DOUBLE)
PT_MV_CURRENCY = (MV_FLAG|PT_CURRENCY)
PT_MV_APPTIME = (MV_FLAG|PT_APPTIME)
PT_MV_SYSTIME = (MV_FLAG|PT_SYSTIME)
PT_MV_STRING8 = (MV_FLAG|PT_STRING8)
PT_MV_BINARY = (MV_FLAG|PT_BINARY)
PT_MV_UNICODE = (MV_FLAG|PT_UNICODE)
PT_MV_CLSID = (MV_FLAG|PT_CLSID)
PT_MV_I8 = (MV_FLAG|PT_I8)
PT_MV_SHORT = PT_MV_I2
PT_MV_I4 = PT_MV_LONG
PT_MV_FLOAT = PT_MV_R4
PT_MV_R8 = PT_MV_DOUBLE
PT_MV_LONGLONG = PT_MV_I8
PT_TSTRING = PT_UNICODE # ???
PT_MV_TSTRING = (MV_FLAG|PT_UNICODE)
PROP_TYPE_MASK = 65535 # Mask for Property type
def PROP_TYPE(ulPropTag):
return ulPropTag & PROP_TYPE_MASK
def PROP_ID(ulPropTag):
return ulPropTag>>16
def PROP_TAG(ulPropType,ulPropID):
return (ulPropID<<16)|(ulPropType)
PROP_ID_NULL = 0
PROP_ID_INVALID = 65535
PR_NULL = PROP_TAG( PT_NULL, PROP_ID_NULL)
PR_ACKNOWLEDGEMENT_MODE = PROP_TAG( PT_LONG, 1)
PR_ACKNOWLEDGEMENT_MODE = PROP_TAG( PT_LONG, 1)
PR_ALTERNATE_RECIPIENT_ALLOWED = PROP_TAG( PT_BOOLEAN, 2)
PR_AUTHORIZING_USERS = PROP_TAG( PT_BINARY, 3)
PR_AUTO_FORWARD_COMMENT = PROP_TAG( PT_TSTRING, 4)
PR_AUTO_FORWARD_COMMENT_W = PROP_TAG( PT_UNICODE, 4)
PR_AUTO_FORWARD_COMMENT_W = PROP_TAG( PT_UNICODE, 4)
PR_AUTO_FORWARD_COMMENT_A = PROP_TAG( PT_STRING8, 4)
PR_AUTO_FORWARDED = PROP_TAG( PT_BOOLEAN, 5)
PR_CONTENT_CONFIDENTIALITY_ALGORITHM_ID = PROP_TAG( PT_BINARY, 6)
PR_CONTENT_CORRELATOR = PROP_TAG( PT_BINARY, 7)
PR_CONTENT_IDENTIFIER = PROP_TAG( PT_TSTRING, 8)
PR_CONTENT_IDENTIFIER_W = PROP_TAG( PT_UNICODE, 8)
PR_CONTENT_IDENTIFIER_A = PROP_TAG( PT_STRING8, 8)
PR_CONTENT_LENGTH = PROP_TAG( PT_LONG, 9)
PR_CONTENT_RETURN_REQUESTED = PROP_TAG( PT_BOOLEAN, 10)
PR_CONVERSATION_KEY = PROP_TAG( PT_BINARY, 11)
PR_CONVERSION_EITS = PROP_TAG( PT_BINARY, 12)
PR_CONVERSION_WITH_LOSS_PROHIBITED = PROP_TAG( PT_BOOLEAN, 13)
PR_CONVERTED_EITS = PROP_TAG( PT_BINARY, 14)
PR_DEFERRED_DELIVERY_TIME = PROP_TAG( PT_SYSTIME, 15)
PR_DELIVER_TIME = PROP_TAG( PT_SYSTIME, 16)
PR_DISCARD_REASON = PROP_TAG( PT_LONG, 17)
PR_DISCLOSURE_OF_RECIPIENTS = PROP_TAG( PT_BOOLEAN, 18)
PR_DL_EXPANSION_HISTORY = PROP_TAG( PT_BINARY, 19)
PR_DL_EXPANSION_PROHIBITED = PROP_TAG( PT_BOOLEAN, 20)
PR_EXPIRY_TIME = PROP_TAG( PT_SYSTIME, 21)
PR_IMPLICIT_CONVERSION_PROHIBITED = PROP_TAG( PT_BOOLEAN, 22)
PR_IMPORTANCE = PROP_TAG( PT_LONG, 23)
PR_IPM_ID = PROP_TAG( PT_BINARY, 24)
PR_LATEST_DELIVERY_TIME = PROP_TAG( PT_SYSTIME, 25)
PR_MESSAGE_CLASS = PROP_TAG( PT_TSTRING, 26)
PR_MESSAGE_CLASS_W = PROP_TAG( PT_UNICODE, 26)
PR_MESSAGE_CLASS_A = PROP_TAG( PT_STRING8, 26)
PR_MESSAGE_DELIVERY_ID = PROP_TAG( PT_BINARY, 27)
PR_MESSAGE_SECURITY_LABEL = PROP_TAG( PT_BINARY, 30)
PR_OBSOLETED_IPMS = PROP_TAG( PT_BINARY, 31)
PR_ORIGINALLY_INTENDED_RECIPIENT_NAME = PROP_TAG( PT_BINARY, 32)
PR_ORIGINAL_EITS = PROP_TAG( PT_BINARY, 33)
PR_ORIGINATOR_CERTIFICATE = PROP_TAG( PT_BINARY, 34)
PR_ORIGINATOR_DELIVERY_REPORT_REQUESTED = PROP_TAG( PT_BOOLEAN, 35)
PR_ORIGINATOR_RETURN_ADDRESS = PROP_TAG( PT_BINARY, 36)
PR_PARENT_KEY = PROP_TAG( PT_BINARY, 37)
PR_PRIORITY = PROP_TAG( PT_LONG, 38)
PR_ORIGIN_CHECK = PROP_TAG( PT_BINARY, 39)
PR_PROOF_OF_SUBMISSION_REQUESTED = PROP_TAG( PT_BOOLEAN, 40)
PR_READ_RECEIPT_REQUESTED = PROP_TAG( PT_BOOLEAN, 41)
PR_RECEIPT_TIME = PROP_TAG( PT_SYSTIME, 42)
PR_RECIPIENT_REASSIGNMENT_PROHIBITED = PROP_TAG( PT_BOOLEAN, 43)
PR_REDIRECTION_HISTORY = PROP_TAG( PT_BINARY, 44)
PR_RELATED_IPMS = PROP_TAG( PT_BINARY, 45)
PR_ORIGINAL_SENSITIVITY = PROP_TAG( PT_LONG, 46)
PR_LANGUAGES = PROP_TAG( PT_TSTRING, 47)
PR_LANGUAGES_W = PROP_TAG( PT_UNICODE, 47)
PR_LANGUAGES_A = PROP_TAG( PT_STRING8, 47)
PR_REPLY_TIME = PROP_TAG( PT_SYSTIME, 48)
PR_REPORT_TAG = PROP_TAG( PT_BINARY, 49)
PR_REPORT_TIME = PROP_TAG( PT_SYSTIME, 50)
PR_RETURNED_IPM = PROP_TAG( PT_BOOLEAN, 51)
PR_SECURITY = PROP_TAG( PT_LONG, 52)
PR_INCOMPLETE_COPY = PROP_TAG( PT_BOOLEAN, 53)
PR_SENSITIVITY = PROP_TAG( PT_LONG, 54)
PR_SUBJECT = PROP_TAG( PT_TSTRING, 55)
PR_SUBJECT_W = PROP_TAG( PT_UNICODE, 55)
PR_SUBJECT_A = PROP_TAG( PT_STRING8, 55)
PR_SUBJECT_IPM = PROP_TAG( PT_BINARY, 56)
PR_CLIENT_SUBMIT_TIME = PROP_TAG( PT_SYSTIME, 57)
PR_REPORT_NAME = PROP_TAG( PT_TSTRING, 58)
PR_REPORT_NAME_W = PROP_TAG( PT_UNICODE, 58)
PR_REPORT_NAME_A = PROP_TAG( PT_STRING8, 58)
PR_SENT_REPRESENTING_SEARCH_KEY = PROP_TAG( PT_BINARY, 59)
PR_X400_CONTENT_TYPE = PROP_TAG( PT_BINARY, 60)
PR_SUBJECT_PREFIX = PROP_TAG( PT_TSTRING, 61)
PR_SUBJECT_PREFIX_W = PROP_TAG( PT_UNICODE, 61)
PR_SUBJECT_PREFIX_A = PROP_TAG( PT_STRING8, 61)
PR_NON_RECEIPT_REASON = PROP_TAG( PT_LONG, 62)
PR_RECEIVED_BY_ENTRYID = PROP_TAG( PT_BINARY, 63)
PR_RECEIVED_BY_NAME = PROP_TAG( PT_TSTRING, 64)
PR_RECEIVED_BY_NAME_W = PROP_TAG( PT_UNICODE, 64)
PR_RECEIVED_BY_NAME_A = PROP_TAG( PT_STRING8, 64)
PR_SENT_REPRESENTING_ENTRYID = PROP_TAG( PT_BINARY, 65)
PR_SENT_REPRESENTING_NAME = PROP_TAG( PT_TSTRING, 66)
PR_SENT_REPRESENTING_NAME_W = PROP_TAG( PT_UNICODE, 66)
PR_SENT_REPRESENTING_NAME_A = PROP_TAG( PT_STRING8, 66)
PR_RCVD_REPRESENTING_ENTRYID = PROP_TAG( PT_BINARY, 67)
PR_RCVD_REPRESENTING_NAME = PROP_TAG( PT_TSTRING, 68)
PR_RCVD_REPRESENTING_NAME_W = PROP_TAG( PT_UNICODE, 68)
PR_RCVD_REPRESENTING_NAME_A = PROP_TAG( PT_STRING8, 68)
PR_REPORT_ENTRYID = PROP_TAG( PT_BINARY, 69)
PR_READ_RECEIPT_ENTRYID = PROP_TAG( PT_BINARY, 70)
PR_MESSAGE_SUBMISSION_ID = PROP_TAG( PT_BINARY, 71)
PR_PROVIDER_SUBMIT_TIME = PROP_TAG( PT_SYSTIME, 72)
PR_ORIGINAL_SUBJECT = PROP_TAG( PT_TSTRING, 73)
PR_ORIGINAL_SUBJECT_W = PROP_TAG( PT_UNICODE, 73)
PR_ORIGINAL_SUBJECT_A = PROP_TAG( PT_STRING8, 73)
PR_DISC_VAL = PROP_TAG( PT_BOOLEAN, 74)
PR_ORIG_MESSAGE_CLASS = PROP_TAG( PT_TSTRING, 75)
PR_ORIG_MESSAGE_CLASS_W = PROP_TAG( PT_UNICODE, 75)
PR_ORIG_MESSAGE_CLASS_A = PROP_TAG( PT_STRING8, 75)
PR_ORIGINAL_AUTHOR_ENTRYID = PROP_TAG( PT_BINARY, 76)
PR_ORIGINAL_AUTHOR_NAME = PROP_TAG( PT_TSTRING, 77)
PR_ORIGINAL_AUTHOR_NAME_W = PROP_TAG( PT_UNICODE, 77)
PR_ORIGINAL_AUTHOR_NAME_A = PROP_TAG( PT_STRING8, 77)
PR_ORIGINAL_SUBMIT_TIME = PROP_TAG( PT_SYSTIME, 78)
PR_REPLY_RECIPIENT_ENTRIES = PROP_TAG( PT_BINARY, 79)
PR_REPLY_RECIPIENT_NAMES = PROP_TAG( PT_TSTRING, 80)
PR_REPLY_RECIPIENT_NAMES_W = PROP_TAG( PT_UNICODE, 80)
PR_REPLY_RECIPIENT_NAMES_A = PROP_TAG( PT_STRING8, 80)
PR_RECEIVED_BY_SEARCH_KEY = PROP_TAG( PT_BINARY, 81)
PR_RCVD_REPRESENTING_SEARCH_KEY = PROP_TAG( PT_BINARY, 82)
PR_READ_RECEIPT_SEARCH_KEY = PROP_TAG( PT_BINARY, 83)
PR_REPORT_SEARCH_KEY = PROP_TAG( PT_BINARY, 84)
PR_ORIGINAL_DELIVERY_TIME = PROP_TAG( PT_SYSTIME, 85)
PR_ORIGINAL_AUTHOR_SEARCH_KEY = PROP_TAG( PT_BINARY, 86)
PR_MESSAGE_TO_ME = PROP_TAG( PT_BOOLEAN, 87)
PR_MESSAGE_CC_ME = PROP_TAG( PT_BOOLEAN, 88)
PR_MESSAGE_RECIP_ME = PROP_TAG( PT_BOOLEAN, 89)
PR_ORIGINAL_SENDER_NAME = PROP_TAG( PT_TSTRING, 90)
PR_ORIGINAL_SENDER_NAME_W = PROP_TAG( PT_UNICODE, 90)
PR_ORIGINAL_SENDER_NAME_A = PROP_TAG( PT_STRING8, 90)
PR_ORIGINAL_SENDER_ENTRYID = PROP_TAG( PT_BINARY, 91)
PR_ORIGINAL_SENDER_SEARCH_KEY = PROP_TAG( PT_BINARY, 92)
PR_ORIGINAL_SENT_REPRESENTING_NAME = PROP_TAG( PT_TSTRING, 93)
PR_ORIGINAL_SENT_REPRESENTING_NAME_W = PROP_TAG( PT_UNICODE, 93)
PR_ORIGINAL_SENT_REPRESENTING_NAME_A = PROP_TAG( PT_STRING8, 93)
PR_ORIGINAL_SENT_REPRESENTING_ENTRYID = PROP_TAG( PT_BINARY, 94)
PR_ORIGINAL_SENT_REPRESENTING_SEARCH_KEY = PROP_TAG( PT_BINARY, 95)
PR_START_DATE = PROP_TAG( PT_SYSTIME, 96)
PR_END_DATE = PROP_TAG( PT_SYSTIME, 97)
PR_OWNER_APPT_ID = PROP_TAG( PT_LONG, 98)
PR_RESPONSE_REQUESTED = PROP_TAG( PT_BOOLEAN, 99)
PR_SENT_REPRESENTING_ADDRTYPE = PROP_TAG( PT_TSTRING, 100)
PR_SENT_REPRESENTING_ADDRTYPE_W = PROP_TAG( PT_UNICODE, 100)
PR_SENT_REPRESENTING_ADDRTYPE_A = PROP_TAG( PT_STRING8, 100)
PR_SENT_REPRESENTING_EMAIL_ADDRESS = PROP_TAG( PT_TSTRING, 101)
PR_SENT_REPRESENTING_EMAIL_ADDRESS_W = PROP_TAG( PT_UNICODE, 101)
PR_SENT_REPRESENTING_EMAIL_ADDRESS_A = PROP_TAG( PT_STRING8, 101)
PR_ORIGINAL_SENDER_ADDRTYPE = PROP_TAG( PT_TSTRING, 102)
PR_ORIGINAL_SENDER_ADDRTYPE_W = PROP_TAG( PT_UNICODE, 102)
PR_ORIGINAL_SENDER_ADDRTYPE_A = PROP_TAG( PT_STRING8, 102)
PR_ORIGINAL_SENDER_EMAIL_ADDRESS = PROP_TAG( PT_TSTRING, 103)
PR_ORIGINAL_SENDER_EMAIL_ADDRESS_W = PROP_TAG( PT_UNICODE, 103)
PR_ORIGINAL_SENDER_EMAIL_ADDRESS_A = PROP_TAG( PT_STRING8, 103)
PR_ORIGINAL_SENT_REPRESENTING_ADDRTYPE = PROP_TAG( PT_TSTRING, 104)
PR_ORIGINAL_SENT_REPRESENTING_ADDRTYPE_W = PROP_TAG( PT_UNICODE, 104)
PR_ORIGINAL_SENT_REPRESENTING_ADDRTYPE_A = PROP_TAG( PT_STRING8, 104)
PR_ORIGINAL_SENT_REPRESENTING_EMAIL_ADDRESS = PROP_TAG( PT_TSTRING, 105)
PR_ORIGINAL_SENT_REPRESENTING_EMAIL_ADDRESS_W = PROP_TAG( PT_UNICODE, 105)
PR_ORIGINAL_SENT_REPRESENTING_EMAIL_ADDRESS_A = PROP_TAG( PT_STRING8, 105)
PR_CONVERSATION_TOPIC = PROP_TAG( PT_TSTRING, 112)
PR_CONVERSATION_TOPIC_W = PROP_TAG( PT_UNICODE, 112)
PR_CONVERSATION_TOPIC_A = PROP_TAG( PT_STRING8, 112)
PR_CONVERSATION_INDEX = PROP_TAG( PT_BINARY, 113)
PR_ORIGINAL_DISPLAY_BCC = PROP_TAG( PT_TSTRING, 114)
PR_ORIGINAL_DISPLAY_BCC_W = PROP_TAG( PT_UNICODE, 114)
PR_ORIGINAL_DISPLAY_BCC_A = PROP_TAG( PT_STRING8, 114)
PR_ORIGINAL_DISPLAY_CC = PROP_TAG( PT_TSTRING, 115)
PR_ORIGINAL_DISPLAY_CC_W = PROP_TAG( PT_UNICODE, 115)
PR_ORIGINAL_DISPLAY_CC_A = PROP_TAG( PT_STRING8, 115)
PR_ORIGINAL_DISPLAY_TO = PROP_TAG( PT_TSTRING, 116)
PR_ORIGINAL_DISPLAY_TO_W = PROP_TAG( PT_UNICODE, 116)
PR_ORIGINAL_DISPLAY_TO_A = PROP_TAG( PT_STRING8, 116)
PR_RECEIVED_BY_ADDRTYPE = PROP_TAG( PT_TSTRING, 117)
PR_RECEIVED_BY_ADDRTYPE_W = PROP_TAG( PT_UNICODE, 117)
PR_RECEIVED_BY_ADDRTYPE_A = PROP_TAG( PT_STRING8, 117)
PR_RECEIVED_BY_EMAIL_ADDRESS = PROP_TAG( PT_TSTRING, 118)
PR_RECEIVED_BY_EMAIL_ADDRESS_W = PROP_TAG( PT_UNICODE, 118)
PR_RECEIVED_BY_EMAIL_ADDRESS_A = PROP_TAG( PT_STRING8, 118)
PR_RCVD_REPRESENTING_ADDRTYPE = PROP_TAG( PT_TSTRING, 119)
PR_RCVD_REPRESENTING_ADDRTYPE_W = PROP_TAG( PT_UNICODE, 119)
PR_RCVD_REPRESENTING_ADDRTYPE_A = PROP_TAG( PT_STRING8, 119)
PR_RCVD_REPRESENTING_EMAIL_ADDRESS = PROP_TAG( PT_TSTRING, 120)
PR_RCVD_REPRESENTING_EMAIL_ADDRESS_W = PROP_TAG( PT_UNICODE, 120)
PR_RCVD_REPRESENTING_EMAIL_ADDRESS_A = PROP_TAG( PT_STRING8, 120)
PR_ORIGINAL_AUTHOR_ADDRTYPE = PROP_TAG( PT_TSTRING, 121)
PR_ORIGINAL_AUTHOR_ADDRTYPE_W = PROP_TAG( PT_UNICODE, 121)
PR_ORIGINAL_AUTHOR_ADDRTYPE_A = PROP_TAG( PT_STRING8, 121)
PR_ORIGINAL_AUTHOR_EMAIL_ADDRESS = PROP_TAG( PT_TSTRING, 122)
PR_ORIGINAL_AUTHOR_EMAIL_ADDRESS_W = PROP_TAG( PT_UNICODE, 122)
PR_ORIGINAL_AUTHOR_EMAIL_ADDRESS_A = PROP_TAG( PT_STRING8, 122)
PR_ORIGINALLY_INTENDED_RECIP_ADDRTYPE = PROP_TAG( PT_TSTRING, 123)
PR_ORIGINALLY_INTENDED_RECIP_ADDRTYPE_W = PROP_TAG( PT_UNICODE, 123)
PR_ORIGINALLY_INTENDED_RECIP_ADDRTYPE_A = PROP_TAG( PT_STRING8, 123)
PR_ORIGINALLY_INTENDED_RECIP_EMAIL_ADDRESS = PROP_TAG( PT_TSTRING, 124)
PR_ORIGINALLY_INTENDED_RECIP_EMAIL_ADDRESS_W = PROP_TAG( PT_UNICODE, 124)
PR_ORIGINALLY_INTENDED_RECIP_EMAIL_ADDRESS_A = PROP_TAG( PT_STRING8, 124)
PR_TRANSPORT_MESSAGE_HEADERS = PROP_TAG(PT_TSTRING, 125)
PR_TRANSPORT_MESSAGE_HEADERS_W = PROP_TAG(PT_UNICODE, 125)
PR_TRANSPORT_MESSAGE_HEADERS_A = PROP_TAG(PT_STRING8, 125)
PR_DELEGATION = PROP_TAG(PT_BINARY, 126)
PR_TNEF_CORRELATION_KEY = PROP_TAG(PT_BINARY, 127)
PR_BODY = PROP_TAG( PT_TSTRING, 4096)
PR_BODY_W = PROP_TAG( PT_UNICODE, 4096)
PR_BODY_A = PROP_TAG( PT_STRING8, 4096)
PR_BODY_HTML = PROP_TAG( PT_TSTRING, 4115)
PR_BODY_HTML_W = PROP_TAG( PT_UNICODE, 4115)
PR_BODY_HTML_A = PROP_TAG( PT_STRING8, 4115)
PR_REPORT_TEXT = PROP_TAG( PT_TSTRING, 4097)
PR_REPORT_TEXT_W = PROP_TAG( PT_UNICODE, 4097)
PR_REPORT_TEXT_A = PROP_TAG( PT_STRING8, 4097)
PR_ORIGINATOR_AND_DL_EXPANSION_HISTORY = PROP_TAG( PT_BINARY, 4098)
PR_REPORTING_DL_NAME = PROP_TAG( PT_BINARY, 4099)
PR_REPORTING_MTA_CERTIFICATE = PROP_TAG( PT_BINARY, 4100)
PR_RTF_SYNC_BODY_CRC = PROP_TAG( PT_LONG, 4102)
PR_RTF_SYNC_BODY_COUNT = PROP_TAG( PT_LONG, 4103)
PR_RTF_SYNC_BODY_TAG = PROP_TAG( PT_TSTRING, 4104)
PR_RTF_SYNC_BODY_TAG_W = PROP_TAG( PT_UNICODE, 4104)
PR_RTF_SYNC_BODY_TAG_A = PROP_TAG( PT_STRING8, 4104)
PR_RTF_COMPRESSED = PROP_TAG( PT_BINARY, 4105)
PR_RTF_SYNC_PREFIX_COUNT = PROP_TAG( PT_LONG, 4112)
PR_RTF_SYNC_TRAILING_COUNT = PROP_TAG( PT_LONG, 4113)
PR_ORIGINALLY_INTENDED_RECIP_ENTRYID = PROP_TAG( PT_BINARY, 4114)
PR_CONTENT_INTEGRITY_CHECK = PROP_TAG( PT_BINARY, 3072)
PR_EXPLICIT_CONVERSION = PROP_TAG( PT_LONG, 3073)
PR_IPM_RETURN_REQUESTED = PROP_TAG( PT_BOOLEAN, 3074)
PR_MESSAGE_TOKEN = PROP_TAG( PT_BINARY, 3075)
PR_NDR_REASON_CODE = PROP_TAG( PT_LONG, 3076)
PR_NDR_DIAG_CODE = PROP_TAG( PT_LONG, 3077)
PR_NON_RECEIPT_NOTIFICATION_REQUESTED = PROP_TAG( PT_BOOLEAN, 3078)
PR_DELIVERY_POINT = PROP_TAG( PT_LONG, 3079)
PR_ORIGINATOR_NON_DELIVERY_REPORT_REQUESTED = PROP_TAG( PT_BOOLEAN, 3080)
PR_ORIGINATOR_REQUESTED_ALTERNATE_RECIPIENT = PROP_TAG( PT_BINARY, 3081)
PR_PHYSICAL_DELIVERY_BUREAU_FAX_DELIVERY = PROP_TAG( PT_BOOLEAN, 3082)
PR_PHYSICAL_DELIVERY_MODE = PROP_TAG( PT_LONG, 3083)
PR_PHYSICAL_DELIVERY_REPORT_REQUEST = PROP_TAG( PT_LONG, 3084)
PR_PHYSICAL_FORWARDING_ADDRESS = PROP_TAG( PT_BINARY, 3085)
PR_PHYSICAL_FORWARDING_ADDRESS_REQUESTED = PROP_TAG( PT_BOOLEAN, 3086)
PR_PHYSICAL_FORWARDING_PROHIBITED = PROP_TAG( PT_BOOLEAN, 3087)
PR_PHYSICAL_RENDITION_ATTRIBUTES = PROP_TAG( PT_BINARY, 3088)
PR_PROOF_OF_DELIVERY = PROP_TAG( PT_BINARY, 3089)
PR_PROOF_OF_DELIVERY_REQUESTED = PROP_TAG( PT_BOOLEAN, 3090)
PR_RECIPIENT_CERTIFICATE = PROP_TAG( PT_BINARY, 3091)
PR_RECIPIENT_NUMBER_FOR_ADVICE = PROP_TAG( PT_TSTRING, 3092)
PR_RECIPIENT_NUMBER_FOR_ADVICE_W = PROP_TAG( PT_UNICODE, 3092)
PR_RECIPIENT_NUMBER_FOR_ADVICE_A = PROP_TAG( PT_STRING8, 3092)
PR_RECIPIENT_TYPE = PROP_TAG( PT_LONG, 3093)
PR_REGISTERED_MAIL_TYPE = PROP_TAG( PT_LONG, 3094)
PR_REPLY_REQUESTED = PROP_TAG( PT_BOOLEAN, 3095)
PR_REQUESTED_DELIVERY_METHOD = PROP_TAG( PT_LONG, 3096)
PR_SENDER_ENTRYID = PROP_TAG( PT_BINARY, 3097)
PR_SENDER_NAME = PROP_TAG( PT_TSTRING, 3098)
PR_SENDER_NAME_W = PROP_TAG( PT_UNICODE, 3098)
PR_SENDER_NAME_A = PROP_TAG( PT_STRING8, 3098)
PR_SUPPLEMENTARY_INFO = PROP_TAG( PT_TSTRING, 3099)
PR_SUPPLEMENTARY_INFO_W = PROP_TAG( PT_UNICODE, 3099)
PR_SUPPLEMENTARY_INFO_A = PROP_TAG( PT_STRING8, 3099)
PR_TYPE_OF_MTS_USER = PROP_TAG( PT_LONG, 3100)
PR_SENDER_SEARCH_KEY = PROP_TAG( PT_BINARY, 3101)
PR_SENDER_ADDRTYPE = PROP_TAG( PT_TSTRING, 3102)
PR_SENDER_ADDRTYPE_W = PROP_TAG( PT_UNICODE, 3102)
PR_SENDER_ADDRTYPE_A = PROP_TAG( PT_STRING8, 3102)
PR_SENDER_EMAIL_ADDRESS = PROP_TAG( PT_TSTRING, 3103)
PR_SENDER_EMAIL_ADDRESS_W = PROP_TAG( PT_UNICODE, 3103)
PR_SENDER_EMAIL_ADDRESS_A = PROP_TAG( PT_STRING8, 3103)
PR_CURRENT_VERSION = PROP_TAG( PT_I8, 3584)
PR_DELETE_AFTER_SUBMIT = PROP_TAG( PT_BOOLEAN, 3585)
PR_DISPLAY_BCC = PROP_TAG( PT_TSTRING, 3586)
PR_DISPLAY_BCC_W = PROP_TAG( PT_UNICODE, 3586)
PR_DISPLAY_BCC_A = PROP_TAG( PT_STRING8, 3586)
PR_DISPLAY_CC = PROP_TAG( PT_TSTRING, 3587)
PR_DISPLAY_CC_W = PROP_TAG( PT_UNICODE, 3587)
PR_DISPLAY_CC_A = PROP_TAG( PT_STRING8, 3587)
PR_DISPLAY_TO = PROP_TAG( PT_TSTRING, 3588)
PR_DISPLAY_TO_W = PROP_TAG( PT_UNICODE, 3588)
PR_DISPLAY_TO_A = PROP_TAG( PT_STRING8, 3588)
PR_PARENT_DISPLAY = PROP_TAG( PT_TSTRING, 3589)
PR_PARENT_DISPLAY_W = PROP_TAG( PT_UNICODE, 3589)
PR_PARENT_DISPLAY_A = PROP_TAG( PT_STRING8, 3589)
PR_MESSAGE_DELIVERY_TIME = PROP_TAG( PT_SYSTIME, 3590)
PR_MESSAGE_FLAGS = PROP_TAG( PT_LONG, 3591)
PR_MESSAGE_SIZE = PROP_TAG( PT_LONG, 3592)
PR_PARENT_ENTRYID = PROP_TAG( PT_BINARY, 3593)
PR_SENTMAIL_ENTRYID = PROP_TAG( PT_BINARY, 3594)
PR_CORRELATE = PROP_TAG( PT_BOOLEAN, 3596)
PR_CORRELATE_MTSID = PROP_TAG( PT_BINARY, 3597)
PR_DISCRETE_VALUES = PROP_TAG( PT_BOOLEAN, 3598)
PR_RESPONSIBILITY = PROP_TAG( PT_BOOLEAN, 3599)
PR_SPOOLER_STATUS = PROP_TAG( PT_LONG, 3600)
PR_TRANSPORT_STATUS = PROP_TAG( PT_LONG, 3601)
PR_MESSAGE_RECIPIENTS = PROP_TAG( PT_OBJECT, 3602)
PR_MESSAGE_ATTACHMENTS = PROP_TAG( PT_OBJECT, 3603)
PR_SUBMIT_FLAGS = PROP_TAG( PT_LONG, 3604)
PR_RECIPIENT_STATUS = PROP_TAG( PT_LONG, 3605)
PR_TRANSPORT_KEY = PROP_TAG( PT_LONG, 3606)
PR_MSG_STATUS = PROP_TAG( PT_LONG, 3607)
PR_MESSAGE_DOWNLOAD_TIME = PROP_TAG( PT_LONG, 3608)
PR_CREATION_VERSION = PROP_TAG( PT_I8, 3609)
PR_MODIFY_VERSION = PROP_TAG( PT_I8, 3610)
PR_HASATTACH = PROP_TAG( PT_BOOLEAN, 3611)
PR_BODY_CRC = PROP_TAG( PT_LONG, 3612)
PR_NORMALIZED_SUBJECT = PROP_TAG( PT_TSTRING, 3613)
PR_NORMALIZED_SUBJECT_W = PROP_TAG( PT_UNICODE, 3613)
PR_NORMALIZED_SUBJECT_A = PROP_TAG( PT_STRING8, | |
<filename>src/installer/src/tortuga/resourceAdapter/resourceAdapter.py
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=logging-not-lazy,no-self-use,no-member,maybe-no-member
import csv
import logging
import os.path
import re
import sys
import traceback
from typing import Any, Dict, List, Optional
import gevent
from sqlalchemy.orm.session import Session
from tortuga.addhost.addHostManager import AddHostManager
from tortuga.config.configManager import ConfigManager
from tortuga.db.nodesDbHandler import NodesDbHandler
from tortuga.db.models.hardwareProfile import HardwareProfile
from tortuga.db.models.network import Network
from tortuga.db.models.nic import Nic
from tortuga.db.models.node import Node
from tortuga.db.models.resourceAdapterConfig import ResourceAdapterConfig
from tortuga.db.models.softwareProfile import SoftwareProfile
from tortuga.db.resourceAdapterConfigDbHandler import \
ResourceAdapterConfigDbHandler
from tortuga.events.types.node import NodeStateChanged
from tortuga.exceptions.configurationError import ConfigurationError
from tortuga.exceptions.nicNotFound import NicNotFound
from tortuga.exceptions.resourceNotFound import ResourceNotFound
from tortuga.exceptions.unsupportedOperation import UnsupportedOperation
from tortuga.kit.actions.manager import KitActionsManager
from tortuga.logging import RESOURCE_ADAPTER_NAMESPACE
from tortuga.objects.node import Node as TortugaNode
from tortuga.parameter.parameterApi import ParameterApi
from tortuga.resourceAdapterConfiguration import settings
from tortuga.resourceAdapterConfiguration.validator import (ConfigurationValidator,
ValidationError)
from tortuga.schema import ResourceAdapterConfigSchema
from .userDataMixin import UserDataMixin
cm = ConfigManager()
DEFAULT_CONFIGURATION_PROFILE_NAME = 'Default'
class ResourceAdapter(UserDataMixin): \
# pylint: disable=too-many-public-methods
"""
This is the base class for all resource adapters to derive from.
The default actions simply print a debug message to show that the
subclass did not implement the action.
"""
settings = {
'tags': settings.TagListSetting(
display_name='Tags',
description='A comma-separated list of tags in the form of '
'key=value'
),
}
__adaptername__ = None
def __init__(self, addHostSession: Optional[str] = None):
if not self.__adaptername__:
raise AttributeError(
'Subclasses of ResourceAdapter must have __adaptername__'
' defined')
self._logger = logging.getLogger(
'{}.{}'.format(RESOURCE_ADAPTER_NAMESPACE, self.__adaptername__))
self.__installer_public_hostname = None
self.__installer_public_ipaddress = None
self.__private_dns_zone = None
# Tags provided by the add nodes request
self.__tags_requested = {}
# Initialize caches
self.__addHostApi = None
self.__nodeApi = None
self.__osObject = None
self.__sanApi = None
self._cm = cm
self._addHostSession = addHostSession
self.session = None
@property
def addHostSession(self):
return self._addHostSession
@property
def cacheCfgFilePath(self):
return os.path.join(
self._cm.getRoot(), 'var', '%s-instance.conf' % (
self.__adaptername__))
@property
def cfgFileName(self):
return os.path.join(
self._cm.getKitConfigBase(),
'adapter-defaults-%s.conf' % (
self.__adaptername__))
def hookAction(self, action, nodes, args=None):
# Only the 'default' resource adapter overrides the hookAction()
# method.
pass
def start(self, addNodesRequest: dict, dbSession: Session,
dbHardwareProfile: HardwareProfile,
dbSoftwareProfile: Optional[SoftwareProfile] = None) -> List[Node]:
self.__tags_requested = addNodesRequest.get('tags', {})
return []
def set_node_tag(self, node: Node, tag_name: str, tag_value: str):
"""
Sets a tag on a node in the resource adapter/provider instance.
:param Node node: the Tortuga node
:param str tag_name: the name of the tag to set
:param str tag_value: the value to set the tag to
"""
raise NotImplemented()
def unset_node_tag(self, node: Node, tag_name: str):
"""
Removes a tag from a node in the resource adapter/provider instance.
:param Node node: the Tortuga node
:param str tag_name: the name of the tag to remove
"""
raise NotImplemented()
def fire_state_change_event(self, db_node: Node, previous_state: str):
"""
Fires a node state changed event. This is a "fake" operation allowing
resource adapters to fire events without having to actually take
the node through the actual state change. The node is assumed to
have it's current state set to the new state.
:param Node db_node: a database node instance
:param str previous_state: the previous state for the node
"""
node_dict = TortugaNode.getFromDbDict(db_node.__dict__).getCleanDict()
NodeStateChanged.fire(node=node_dict,
previous_state=previous_state)
def fire_provisioned_event(self, db_node: Node):
"""
Fires the node provisioned event. This is a fake operation that
assumes two things: that the node's current state is already set
as "Provisioned" and that the previous state was "Created". If you
need it to behave differently from that, then you need to sue the
"fire_state_change_event" method instead.
:param Node db_node: the database node instance
"""
self.fire_state_change_event(db_node=db_node,
previous_state='Created')
def validate_start_arguments(self, addNodesRequest: dict,
dbHardwareProfile: HardwareProfile,
dbSoftwareProfile: SoftwareProfile): \
# pylint: disable=unused-argument
"""
Validate arguments (eventually) passed to start() API
"""
cfgname = addNodesRequest.get('resource_adapter_configuration')
if cfgname is None:
# use default resource adapter configuration, if set
cfgname = dbHardwareProfile.default_resource_adapter_config.name \
if dbHardwareProfile.default_resource_adapter_config else \
DEFAULT_CONFIGURATION_PROFILE_NAME
# ensure addNodesRequest reflects resource adapter configuration
# profile being used
addNodesRequest['resource_adapter_configuration'] = cfgname
def stop(self, hardwareProfileName: str, deviceName: str):
self.__trace(hardwareProfileName, deviceName)
def updateNode(self, session: Session, node: Node,
updateNodeRequest: dict): \
# pylint: disable=unused-argument
self.__trace(session, node, updateNodeRequest)
def deleteNode(self, nodes: List[Node]) -> None:
"""Remove the given node(s) from the system"""
self.__trace(nodes)
def _async_delete_nodes(self, nodes):
"""
Asynchronously delete nodes; calls "ResourceAdapter._delete_node()"
method for each deleted nodes
:param dbNodes: list of Nodes objects
:return: None
"""
greenlets = []
for node in nodes:
greenlets.append(gevent.spawn(self._delete_node, node))
# TODO: implement timeout
gevent.joinall(greenlets)
def startupNode(self, nodes: List[Node],
remainingNodeList: Optional[str] = None,
tmpBootMethod: Optional[str] = 'n'): \
# pylint: disable=unused-argument
"""
Start nodes
"""
# By default raise unsupported operation
raise UnsupportedOperation('Node does not support starting')
def shutdownNode(self, nodes: List[Node],
bSoftReset: Optional[bool] = False): \
# pylint: disable=unused-argument
"""Shutdown the given node"""
# By default raise unsupported operation
raise UnsupportedOperation('Node does not support shutdown')
def rebootNode(self, nodes: List[Node],
bSoftReset: Optional[bool] = False): \
# pylint: disable=unused-argument
"""Reboot the given node"""
# By default raise unsupported operation
raise UnsupportedOperation('Node does not support rebooting')
def addVolumeToNode(self, node: Node, volume: str, isDirect: bool): \
# pylint: disable=unused-argument
"""Add a disk to a node"""
# By default raise unsupported operation
raise UnsupportedOperation(
'Node does not support dynamic disk addition')
def removeVolumeFromNode(self, node: Node, volume: str): \
# pylint: disable=unused-argument
"""Remove a disk from a node"""
# By default raise unsupported operation
raise UnsupportedOperation(
'Node does not support dynamic disk deletion' % (node))
def __trace(self, *pargs, **kargs) -> None:
stack = traceback.extract_stack()
funcname = stack[-2][2]
self._logger.debug(
'-- (pass) %s::%s %s %s' % (
self.__adaptername__, funcname, pargs, kargs))
def validate_config(self,
profile: str = DEFAULT_CONFIGURATION_PROFILE_NAME
) -> ConfigurationValidator:
"""
Validates the configuration profile.
:param str profile: the name of the configuration profile to validate
:return ConfigurationValidator: the validator, loaded with the
validated data
:raises ValidationError:
"""
validator = ConfigurationValidator(self.settings)
#
# Load settings from class settings definitions if any of them
# have default values
#
validator.load(self._load_config_from_class())
#
# Load settings from default profile in database, if it exists
#
validator.load(self._load_config_from_database())
#
# Load settings from a specific profile, if one was specified
#
if profile and profile != DEFAULT_CONFIGURATION_PROFILE_NAME:
validator.load(self._load_config_from_database(profile))
#
# Validate the settings
#
validator.validate()
return validator
def get_config(self,
profile: str = DEFAULT_CONFIGURATION_PROFILE_NAME
) -> Dict[str, Any]:
"""
Gets the resource adapter configuration for the specified profile.
:param str profile: the reousrce adapter profile to get
:return Dict[str, Any]: the configuration
:raises ConfigurationError:
:raises ResourceNotFound:
"""
self._logger.debug('get_config(profile={})'.format(profile))
#
# Validate the settings and dump the config with transformed
# values
#
try:
validator = self.validate_config(profile)
processed_config: Dict[str, Any] = validator.dump()
except ValidationError as ex:
raise ConfigurationError(str(ex))
#
# Perform any required additional processing on the config
#
self.process_config(processed_config)
return processed_config
def _load_config_from_class(self) -> Dict[str, str]:
"""
Load the settings from the resource adapter class default values
into the config dict, overriding what is in there already.
:returns Dict[str, str]: the configuration
"""
config: Dict[str, str] = {}
for k, v in self.settings.items():
if v.default is not None:
config[k] = v.default
return config
def _load_config_from_database(
self,
profile: str = DEFAULT_CONFIGURATION_PROFILE_NAME
) -> Dict[str, str]:
"""
Loads a configuration profile from the database.
:param profile: the name of the configuration profile
:return Dict[str, str]: the configuration
"""
config = {}
db_handler = ResourceAdapterConfigDbHandler()
try:
db_config = db_handler.get(
self.session, self.__adaptername__, profile)
cfg_list = ResourceAdapterConfigSchema().dump(db_config).data
for s in cfg_list['configuration']:
config[s['key']] = s['value']
except ResourceNotFound:
pass
return config
def process_config(self, config: Dict[str, Any]):
"""
Override this method in subclasses to perform any additional
processing on the config. Changes to the config are performed
in-place (i.e. config is mutable).
:param Dict[str, Any] config: the configuration dict
"""
pass
def get_initial_tags(self, config: Dict[str, str], hwp_name: str,
swp_name: str) -> Dict[str, str]:
"""
Returns the list of tags that should be applied to one or more
nodes upon creation.
:param Dict[str, str] config: the resource adapter profile config
:param str hwp_name: the node | |
from datetime import date
from unittest.mock import patch, call
from django.contrib.auth.models import User
from django.core import mail
from django.core.exceptions import ValidationError
from django.urls import reverse
from django.test import TestCase, Client, override_settings, RequestFactory
from solenoid.people.models import Author, Liaison
from solenoid.records.models import Record
from .models import EmailMessage
from .views import _get_or_create_emails, EmailSend
@override_settings(LOGIN_REQUIRED=False,
USE_ELEMENTS=False,
# We need to set this or there won't be email recipients in
# EMAIL_TESTING_MODE, so emails won't send.
ADMINS=[('Admin', '<EMAIL>')])
class EmailCreatorTestCase(TestCase):
fixtures = ['testdata.yaml']
@patch('solenoid.emails.views._get_or_create_emails')
def test_posting_to_create_view_calls_creator(self, mock_create):
EmailMessage.objects.get(pk=2).delete()
mock_create.return_value = [1]
c = Client()
c.post(reverse('emails:create'), {'records': ['1']})
mock_create.assert_called_once_with(['1'])
mock_create.reset_mock()
c.post(reverse('emails:create'), {'records': ['1', '2']})
mock_create.assert_called_once_with(['1', '2'])
def test_posting_to_create_view_returns_email_eval(self):
EmailMessage.objects.get(pk=2).delete()
c = Client()
response = c.post(reverse('emails:create'), {'records': ['1']})
self.assertRedirects(response, reverse('emails:evaluate', args=(1,)))
def test_email_recipient(self):
"""The email created by _get_or_create_emails must be to: the relevant
liaison."""
# Expected to be a paper by Tonegawa, who belongs to BCS, whose
# liaison is Cutter. This record does not yet have an email.
email_pks = _get_or_create_emails([2])
email = EmailMessage.objects.get(pk=email_pks[0])
self.assertEqual(email.liaison.pk, 2)
def test_get_or_create_emails_returns_correctly(self):
"""When we pass in records to _get_or_create_emails, we should get back
one email per author in the recordset."""
EmailMessage.objects.get(pk=2).delete()
EmailMessage.objects.get(pk=4).delete()
email_pks = _get_or_create_emails([1])
self.assertEqual(len(email_pks), 1)
email_pks = _get_or_create_emails([1, 2])
self.assertEqual(len(email_pks), 2)
email_pks = _get_or_create_emails([1, 2, 3])
self.assertEqual(len(email_pks), 3)
email_pks = _get_or_create_emails([1, 2, 3, 4])
# Records 3 and 4 are by the same author.
self.assertEqual(len(email_pks), 3)
@override_settings(LOGIN_REQUIRED=False,
USE_ELEMENTS=False,
# We need to set this or there won't be email recipients in
# EMAIL_TESTING_MODE, so emails won't send.
ADMINS=[('Admin', '<EMAIL>')])
class EmailEvaluateTestCase(TestCase):
fixtures = ['testdata.yaml']
def setUp(self):
self.url = reverse('emails:evaluate', args=(1,))
self.client = Client()
def tearDown(self):
EmailMessage.objects.all().delete()
def test_latest_version_displays_on_unsent_page_if_not_blank(self):
response = self.client.get(self.url)
self.assertContains(response, "Most recent text of email 1")
def test_liaison_email_address_displays(self):
response = self.client.get(self.url)
self.assertContains(response, '<EMAIL>')
def test_users_can_save_changes_to_emails(self):
session = self.client.session
session['email_pks'] = [2, 3]
session['total_email'] = 3
session['current_email'] = 1
session.save()
new_text = 'This is what we change the email to'
self.client.post(self.url, {
'submit_save': 'save & next',
'latest_text': new_text
})
self.assertEqual(new_text, EmailMessage.objects.get(pk=1).latest_text)
def test_only_unsent_emails_are_editable_1(self):
"""The email evaluate page for a sent email shows its text as sent but
does not contain an HTML form element."""
sent_email = EmailMessage.objects.get(pk=1)
sent_email.date_sent = date.today()
sent_email.save()
response = self.client.get(self.url)
self.assertContains(response, "Most recent text of email 1")
self.assertNotContains(response, "</form>")
@patch('solenoid.emails.models.EmailMessage.send')
def test_only_unsent_emails_are_editable_2(self, mock_send):
"""On post, the email evaluate page does not re-send emails that have
already been sent."""
sent_email = EmailMessage.objects.get(pk=1)
sent_email.date_sent = date.today()
sent_email.save()
self.client.post(self.url, {'submit_send': 'send & next'})
assert not mock_send.called
def test_template_renders_form_media(self):
"""Make sure we remembered to include {{ form.media }}, which is
required for rendering the WYSIWYG editor.
It's hard to directly test that the template HTML contains that
string, but we can check that the rendered template contains part of
the expected output of form.media, which would be unlikely to get there
any other way."""
response = self.client.get(self.url)
self.assertContains(response, 'ckeditor/ckeditor.js')
def test_email_evaluate_workflow_1(self):
"""
Make sure that EmailEvaluate walks through the expected set of emails
when users are hitting 'cancel & next'.
It'd be nice to test that the session variables are set correctly, but
testing Django session is a pain.
"""
# Set up a path that should take us through the evaluate view 3 times.
# Implicitly, we entered the email evaluation workflow with the pks =
# [1, 2, 3], but 1 has already been popped by EmailCreate.
# See https://docs.djangoproject.com/en/1.8/topics/testing/tools/
# persistent-state
# for info on how to use sessions in testing.
session = self.client.session
session['email_pks'] = [2, 3]
session['total_email'] = 3
session['current_email'] = 1
session.save()
current_url = reverse('emails:evaluate', args=(1,))
self.client.get(current_url)
response = self.client.post(current_url,
data={'submit_cancel': 'submit_cancel'})
expected_url = reverse('emails:evaluate', args=(2,))
self.assertRedirects(response, expected_url)
response = self.client.post(expected_url,
data={'submit_cancel': 'submit_cancel'})
expected_url = reverse('emails:evaluate', args=(3,))
self.assertRedirects(response, expected_url)
response = self.client.post(expected_url,
data={'submit_cancel': 'submit_cancel'})
expected_url = reverse('home')
self.assertRedirects(response, expected_url)
def test_email_evaluate_workflow_2(self):
"""
Make sure that EmailEvaluate walks through the expected set of emails
when users are hitting 'save & next'.
"""
# Set up a path that should take us through the evaluate view 3 times.
# Implicitly, we entered the email evaluation workflow with the pks =
# [1, 2, 3], but 1 has already been popped by EmailCreate.
# See https://docs.djangoproject.com/en/1.8/topics/testing/tools/
# persistent-state
# for info on how to use sessions in testing.
session = self.client.session
session['email_pks'] = [2, 3]
session['total_email'] = 3
session['current_email'] = 1
session.save()
current_url = reverse('emails:evaluate', args=(1,))
self.client.get(current_url)
response = self.client.post(current_url,
data={'submit_save': 'submit_save'})
expected_url = reverse('emails:evaluate', args=(2,))
self.assertRedirects(response, expected_url)
response = self.client.post(expected_url,
data={'submit_save': 'submit_save'})
expected_url = reverse('emails:evaluate', args=(3,))
self.assertRedirects(response, expected_url)
response = self.client.post(expected_url,
data={'submit_save': 'submit_save'})
expected_url = reverse('home')
self.assertRedirects(response, expected_url)
@patch('django.contrib.auth.models.AnonymousUser')
def test_email_evaluate_workflow_3(self, mock_user):
"""
Make sure that EmailEvaluate walks through the expected set of emails
when users are hitting 'send & next'.
"""
# request.user.email needs to exist or this test will fail.
mock_user.email = '<EMAIL>'
# Set up a path that should take us through the evaluate view 3 times.
# Implicitly, we entered the email evaluation workflow with the pks =
# [1, 2, 3], but 1 has already been popped by EmailCreate.
# See https://docs.djangoproject.com/en/1.8/topics/testing/tools/
# persistent-state
# for info on how to use sessions in testing.
session = self.client.session
session['email_pks'] = [2, 3]
session['total_email'] = 3
session['current_email'] = 1
session.save()
# Make sure email 2 is sendable - in the test data it's missing a
# record, meaning its author can't be identified.
record = Record.objects.get(pk=2)
email = EmailMessage.objects.get(pk=2)
record.email = email
record.save()
current_url = reverse('emails:evaluate', args=(1,))
self.client.get(current_url)
response = self.client.post(current_url,
data={'submit_send': 'submit_send'})
expected_url = reverse('emails:evaluate', args=(2,))
self.assertRedirects(response, expected_url)
response = self.client.post(expected_url,
data={'submit_send': 'submit_send'})
expected_url = reverse('emails:evaluate', args=(3,))
self.assertRedirects(response, expected_url)
response = self.client.post(expected_url,
data={'submit_send': 'submit_send'})
expected_url = reverse('home')
self.assertRedirects(response, expected_url)
def test_saving_unsets_new_citations_flag(self):
email = EmailMessage.objects.get(pk=1)
email.new_citations = True
email.save()
session = self.client.session
session['email_pks'] = [2, 3]
session['total_email'] = 3
session['current_email'] = 1
session.save()
self.client.post(self.url, {
'submit_save': 'save & next',
'latest_text': email.latest_text
})
email.refresh_from_db()
self.assertEqual(email.new_citations, False)
@patch('django.contrib.auth.models.AnonymousUser')
def test_sending_unsets_new_citations_flag(self, mock_user):
# request.user.email needs to exist or this test will fail.
mock_user.email = '<EMAIL>'
email = EmailMessage.objects.get(pk=1)
email.new_citations = True
email.save()
session = self.client.session
session['email_pks'] = [2, 3]
session['total_email'] = 3
session['current_email'] = 1
session.save()
self.client.post(self.url, {
'submit_send': 'send & next',
'latest_text': email.latest_text
})
email.refresh_from_db()
self.assertEqual(email.new_citations, False)
def test_warning_message_shows_when_flag_set(self):
email = EmailMessage.objects.get(pk=1)
email.new_citations = True
email.save()
response = self.client.get(self.url)
expected = "New citations for this author have been imported since " \
"last time the email was edited. They've been added to this " \
"email automatically, but please proofread."
assert any([msg.message == expected and msg.level_tag == 'error'
for msg in response.context['messages']])
@override_settings(LOGIN_REQUIRED=False,
USE_ELEMENTS=False,
# We need to set this or there won't be email recipients in
# EMAIL_TESTING_MODE, so emails won't send.
ADMINS=[('Admin', '<EMAIL>')])
class EmailMessageModelTestCase(TestCase):
fixtures = ['testdata.yaml']
def tearDown(self):
EmailMessage.objects.all().delete()
def test_revert(self):
original_text = 'This is the original text'
latest_text = 'This is the subsequent text'
email = EmailMessage.objects.create(
original_text=original_text,
latest_text=latest_text,
author=Author.objects.latest('pk'),
_liaison=Liaison.objects.latest('pk'),
)
email.revert()
self.assertEqual(email.latest_text, original_text)
def test_latest_text_is_set_on_creation(self):
original_text = 'This is the original text'
email = EmailMessage.objects.create(
original_text=original_text,
# Note that latest_text is not set here, hence defaults blank.
author=Author.objects.latest('pk'),
_liaison=Liaison.objects.latest('pk'),
)
self.assertEqual(email.latest_text, original_text)
def test_email_has_all_expected_records(self):
"""The email text includes all expected records."""
records = Record.objects.filter(pk__in=[3, 4, 5])
email = EmailMessage.create_original_text(records)
assert Record.objects.get(pk=3).citation in email
assert Record.objects.get(pk=4).citation in email
def test_already_sent_records_do_not_get_emailed(self):
"""If the input set contains already-sent records, they do not make it
into the EmailMessage text."""
records = Record.objects.filter(pk__in=[3, 4, 6])
email = EmailMessage.create_original_text(records)
assert Record.objects.get(pk=6).citation not in email
def test_publisher_special_message_included(self):
"""The email text includes special messages for each publisher in its
record set with a special message."""
message = 'A very special message'
r3 = Record.objects.get(pk=3)
r3.message = message
r3.save()
records = Record.objects.filter(pk__in=[3, 4, 5])
text = EmailMessage.create_original_text(records)
self.assertEqual(text.count('A very special message'), 1)
def test_html_rendered_as_html(self):
"""Make sure that we see <p>, not <p>, and so forth, in our
constructed email text. (If we put {{ citations }} into the email
template without the |safe filter, we'll end up with escaped HTML,
which is no good for our purposes.)"""
records = Record.objects.filter(pk__in=[3, 4, 5])
email = EmailMessage.create_original_text(records)
self.assertNotIn('<p>', email)
def test_fpv_accepted_message_included(self):
"""The Recruit from Author - FPV Accepted message is included if | |
0:
raise click.UsageError('Parameter --policy-assignment-id cannot be whitespace or empty string')
kwargs = {}
client = cli_util.build_client('blockstorage', ctx)
result = client.get_volume_backup_policy_assignment(
policy_assignment_id=policy_assignment_id,
**kwargs
)
cli_util.render_response(result, ctx)
@volume_group_group.command(name=cli_util.override('get_volume_group.command_name', 'get'), help=u"""Gets information for the specified volume group. For more information, see [Volume Groups].""")
@cli_util.option('--volume-group-id', required=True, help=u"""The Oracle Cloud ID (OCID) that uniquely identifies the volume group.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'core', 'class': 'VolumeGroup'})
@cli_util.wrap_exceptions
def get_volume_group(ctx, from_json, volume_group_id):
if isinstance(volume_group_id, six.string_types) and len(volume_group_id.strip()) == 0:
raise click.UsageError('Parameter --volume-group-id cannot be whitespace or empty string')
kwargs = {}
client = cli_util.build_client('blockstorage', ctx)
result = client.get_volume_group(
volume_group_id=volume_group_id,
**kwargs
)
cli_util.render_response(result, ctx)
@volume_group_backup_group.command(name=cli_util.override('get_volume_group_backup.command_name', 'get'), help=u"""Gets information for the specified volume group backup. For more information, see [Volume Groups].""")
@cli_util.option('--volume-group-backup-id', required=True, help=u"""The Oracle Cloud ID (OCID) that uniquely identifies the volume group backup.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'core', 'class': 'VolumeGroupBackup'})
@cli_util.wrap_exceptions
def get_volume_group_backup(ctx, from_json, volume_group_backup_id):
if isinstance(volume_group_backup_id, six.string_types) and len(volume_group_backup_id.strip()) == 0:
raise click.UsageError('Parameter --volume-group-backup-id cannot be whitespace or empty string')
kwargs = {}
client = cli_util.build_client('blockstorage', ctx)
result = client.get_volume_group_backup(
volume_group_backup_id=volume_group_backup_id,
**kwargs
)
cli_util.render_response(result, ctx)
@volume_kms_key_group.command(name=cli_util.override('get_volume_kms_key.command_name', 'get'), help=u"""Gets the KMS key ID for the specified volume.""")
@cli_util.option('--volume-id', required=True, help=u"""The OCID of the volume.""")
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'core', 'class': 'VolumeKmsKey'})
@cli_util.wrap_exceptions
def get_volume_kms_key(ctx, from_json, volume_id, if_match):
if isinstance(volume_id, six.string_types) and len(volume_id.strip()) == 0:
raise click.UsageError('Parameter --volume-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
client = cli_util.build_client('blockstorage', ctx)
result = client.get_volume_kms_key(
volume_id=volume_id,
**kwargs
)
cli_util.render_response(result, ctx)
@boot_volume_backup_group.command(name=cli_util.override('list_boot_volume_backups.command_name', 'list'), help=u"""Lists the boot volume backups in the specified compartment. You can filter the results by boot volume.""")
@cli_util.option('--compartment-id', required=True, help=u"""The [OCID] of the compartment.""")
@cli_util.option('--boot-volume-id', help=u"""The OCID of the boot volume.""")
@cli_util.option('--limit', type=click.INT, help=u"""For list pagination. The maximum number of results per page, or items to return in a paginated \"List\" call. For important details about how pagination works, see [List Pagination].
Example: `50`""")
@cli_util.option('--page', help=u"""For list pagination. The value of the `opc-next-page` response header from the previous \"List\" call. For important details about how pagination works, see [List Pagination].""")
@cli_util.option('--display-name', help=u"""A filter to return only resources that match the given display name exactly.""")
@cli_util.option('--sort-by', type=custom_types.CliCaseInsensitiveChoice(["TIMECREATED", "DISPLAYNAME"]), help=u"""The field to sort by. You can provide one sort order (`sortOrder`). Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. The DISPLAYNAME sort order is case sensitive.
**Note:** In general, some \"List\" operations (for example, `ListInstances`) let you optionally filter by availability domain if the scope of the resource type is within a single availability domain. If you call one of these \"List\" operations without specifying an availability domain, the resources are grouped by availability domain, then sorted.""")
@cli_util.option('--sort-order', type=custom_types.CliCaseInsensitiveChoice(["ASC", "DESC"]), help=u"""The sort order to use, either ascending (`ASC`) or descending (`DESC`). The DISPLAYNAME sort order is case sensitive.""")
@cli_util.option('--lifecycle-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "AVAILABLE", "TERMINATING", "TERMINATED", "FAULTY", "REQUEST_RECEIVED"]), help=u"""A filter to only return resources that match the given lifecycle state. The state value is case-insensitive.""")
@cli_util.option('--all', 'all_pages', is_flag=True, help="""Fetches all pages of results. If you provide this option, then you cannot provide the --limit option.""")
@cli_util.option('--page-size', type=click.INT, help="""When fetching results, the number of results to fetch per call. Only valid when used with --all or --limit, and ignored otherwise.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'core', 'class': 'list[BootVolumeBackup]'})
@cli_util.wrap_exceptions
def list_boot_volume_backups(ctx, from_json, all_pages, page_size, compartment_id, boot_volume_id, limit, page, display_name, sort_by, sort_order, lifecycle_state):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
kwargs = {}
if boot_volume_id is not None:
kwargs['boot_volume_id'] = boot_volume_id
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
if display_name is not None:
kwargs['display_name'] = display_name
if sort_by is not None:
kwargs['sort_by'] = sort_by
if sort_order is not None:
kwargs['sort_order'] = sort_order
if lifecycle_state is not None:
kwargs['lifecycle_state'] = lifecycle_state
client = cli_util.build_client('blockstorage', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_boot_volume_backups,
compartment_id=compartment_id,
**kwargs
)
elif limit is not None:
result = cli_util.list_call_get_up_to_limit(
client.list_boot_volume_backups,
limit,
page_size,
compartment_id=compartment_id,
**kwargs
)
else:
result = client.list_boot_volume_backups(
compartment_id=compartment_id,
**kwargs
)
cli_util.render_response(result, ctx)
@boot_volume_group.command(name=cli_util.override('list_boot_volumes.command_name', 'list'), help=u"""Lists the boot volumes in the specified compartment and availability domain.""")
@cli_util.option('--availability-domain', required=True, help=u"""The name of the availability domain.
Example: `Uocm:PHX-AD-1`""")
@cli_util.option('--compartment-id', required=True, help=u"""The [OCID] of the compartment.""")
@cli_util.option('--limit', type=click.INT, help=u"""For list pagination. The maximum number of results per page, or items to return in a paginated \"List\" call. For important details about how pagination works, see [List Pagination].
Example: `50`""")
@cli_util.option('--page', help=u"""For list pagination. The value of the `opc-next-page` response header from the previous \"List\" call. For important details about how pagination works, see [List Pagination].""")
@cli_util.option('--volume-group-id', help=u"""The OCID of the volume group.""")
@cli_util.option('--all', 'all_pages', is_flag=True, help="""Fetches all pages of results. If you provide this option, then you cannot provide the --limit option.""")
@cli_util.option('--page-size', type=click.INT, help="""When fetching results, the number of results to fetch per call. Only valid when used with --all or --limit, and ignored otherwise.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'core', 'class': 'list[BootVolume]'})
@cli_util.wrap_exceptions
def list_boot_volumes(ctx, from_json, all_pages, page_size, availability_domain, compartment_id, limit, page, volume_group_id):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
kwargs = {}
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
if volume_group_id is not None:
kwargs['volume_group_id'] = volume_group_id
client = cli_util.build_client('blockstorage', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_boot_volumes,
availability_domain=availability_domain,
compartment_id=compartment_id,
**kwargs
)
elif limit is not None:
result = cli_util.list_call_get_up_to_limit(
client.list_boot_volumes,
limit,
page_size,
availability_domain=availability_domain,
compartment_id=compartment_id,
**kwargs
)
else:
result = client.list_boot_volumes(
availability_domain=availability_domain,
compartment_id=compartment_id,
**kwargs
)
cli_util.render_response(result, ctx)
@volume_backup_policy_group.command(name=cli_util.override('list_volume_backup_policies.command_name', 'list'), help=u"""Lists all volume backup policies available to the caller.""")
@cli_util.option('--limit', type=click.INT, help=u"""For list pagination. The maximum number of results per page, or items to return in a paginated \"List\" call. For important details about how pagination works, see [List Pagination].
Example: `50`""")
@cli_util.option('--page', help=u"""For list pagination. The value of the `opc-next-page` response header from the previous \"List\" call. For important details about how pagination works, see [List Pagination].""")
@cli_util.option('--all', 'all_pages', is_flag=True, help="""Fetches all pages of results. If you provide this option, then you cannot provide the --limit option.""")
@cli_util.option('--page-size', type=click.INT, help="""When fetching results, the number of results to fetch per call. Only valid when used with --all or --limit, and ignored otherwise.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'core', 'class': 'list[VolumeBackupPolicy]'})
@cli_util.wrap_exceptions
def list_volume_backup_policies(ctx, from_json, all_pages, page_size, limit, page):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
kwargs = {}
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
client = cli_util.build_client('blockstorage', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_volume_backup_policies,
**kwargs
)
elif limit is not None:
result = cli_util.list_call_get_up_to_limit(
client.list_volume_backup_policies,
limit,
page_size,
**kwargs
)
else:
result = client.list_volume_backup_policies(
**kwargs
)
cli_util.render_response(result, ctx)
@volume_backup_group.command(name=cli_util.override('list_volume_backups.command_name', 'list'), help=u"""Lists the volume backups in the specified compartment. You can filter the results by volume.""")
@cli_util.option('--compartment-id', required=True, help=u"""The [OCID] of the compartment.""")
@cli_util.option('--volume-id', help=u"""The OCID of the volume.""")
@cli_util.option('--limit', type=click.INT, help=u"""For list pagination. The maximum number of results per page, or items to return in a paginated \"List\" call. For important details about how pagination works, see [List Pagination].
Example: `50`""")
@cli_util.option('--page', help=u"""For list pagination. The value of the `opc-next-page` response header from the previous \"List\" call. For important details about how pagination works, see [List Pagination].""")
@cli_util.option('--display-name', help=u"""A filter to return only resources that match the given display name exactly.""")
@cli_util.option('--source-volume-backup-id', help=u"""A filter to return only resources that originated from the given source volume backup.""")
@cli_util.option('--sort-by', type=custom_types.CliCaseInsensitiveChoice(["TIMECREATED", "DISPLAYNAME"]), help=u"""The field to sort by. You can provide one sort order (`sortOrder`). Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. The DISPLAYNAME sort order is case sensitive.
**Note:** In general, some \"List\" operations (for example, `ListInstances`) let you optionally filter by availability domain if the scope of the resource type is within a single availability domain. If you call one of these \"List\" operations without specifying an availability domain, the resources are | |
<gh_stars>1-10
import os
import sys
import warnings
import builtins
import numpy as np
import time
import torch
import utils
from tqdm import tqdm
import glob
import random
import logging
import argparse
import torch.nn as nn
import genotypes as genotypes
import torch.utils
from torch.utils.tensorboard import SummaryWriter
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
from thop import profile
from torch.autograd import Variable
from model import NetworkImageNet as Network
parser = argparse.ArgumentParser("training imagenet")
parser.add_argument('--workers', type=int, default=16, help='number of workers to load dataset')
parser.add_argument('--data', type=str, default='datapath', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=768, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.5, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-5, help='weight decay')
parser.add_argument('--epochs', type=int, default=250, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')
parser.add_argument('--save', type=str, default='exp', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DrNAS_imagenet', help='which architecture to use')
parser.add_argument('--grad_clip', type=float, default=5., help='gradient clipping')
parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing')
parser.add_argument('--lr_scheduler', type=str, default='linear', help='lr scheduler, linear or cosine')
parser.add_argument('--world-size', default=-1, type=int, help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int, help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://192.168.127.12:23456', type=str, help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend')
parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true', help='Use multi-processing distributed training to launch N processes per node, which has N GPUs. This is the fastest way to use PyTorch for either single node or multi node data parallel training')
# args, unparsed = parser.parse_known_args()
CLASSES = 1000
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
PID = os.getpid()
# global best_acc1
args.gpu = gpu
print("<< ============== JOB (PID = %d) @ GPU %d ============== >>"%(PID, gpu))
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
else:
# set up logs
args.save = './experiments/imagenet/eval-{}-{}-{}-{}'.format(
args.save, time.strftime("%Y%m%d-%H%M%S"), args.arch, args.seed)
if args.auxiliary:
args.save += '-auxiliary-' + str(args.auxiliary_weight)
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
writer = SummaryWriter(args.save)
if not torch.cuda.is_available():
logging.info('No GPU device available')
sys.exit(1)
np.random.seed(args.seed)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info("args = %s", args)
num_gpus = torch.cuda.device_count()
genotype = eval("genotypes.%s" % args.arch)
print('---------Genotype---------')
logging.info(genotype)
print('--------------------------')
model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
model.drop_path_prob = 0
macs, params = profile(model, inputs=(torch.randn(1, 3, 224, 224), ), verbose=True)
logging.info("param = %f, flops = %f", params, macs)
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
criterion_smooth = criterion_smooth.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
traindir = os.path.join(args.data, 'train')
validdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_data = dset.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2),
transforms.ToTensor(),
normalize,
]))
valid_data = dset.ImageFolder(
validdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_data)
else:
train_sampler = None
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
train_acc = valid_acc_top1 = valid_acc_top5 = best_acc_top1 = best_acc_top5 = 0
lr = args.learning_rate
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
############ master process writes logs #####################
epoch_bar = tqdm(range(args.epochs), position=0, leave=True)
for epoch in epoch_bar:
logging.info("<< ============== JOB (PID = %d) %s ============== >>"%(PID, args.save))
if args.distributed:
train_sampler.set_epoch(epoch)
if args.lr_scheduler == 'cosine':
scheduler.step()
current_lr = scheduler.get_last_lr()[0]
elif args.lr_scheduler == 'linear':
current_lr = adjust_lr(args, optimizer, epoch)
else:
print('Wrong lr type, exit')
sys.exit(1)
if epoch < 5 and args.batch_size > 32:
current_lr = lr * (epoch + 1) / 5.0
for param_group in optimizer.param_groups:
param_group['lr'] = lr * (epoch + 1) / 5.0
# logging.info('Warming-up Epoch: %d, LR: %e', epoch, lr * (epoch + 1) / 5.0)
description = 'Epoch [{}/{}] | LR:{} | Train:{} | Validation:{}/{} | Best: {}/{}'.format(epoch+1, args.epochs, current_lr, train_acc, valid_acc_top1, valid_acc_top5, best_acc_top1, best_acc_top5)
epoch_bar.set_description(description)
if args.distributed or args.gpu is None:
model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
else:
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
epoch_start = time.time()
train_acc, train_obj = train(args, train_queue, model, criterion_smooth, optimizer)
# logging.info('Train_acc: %f', train_acc)
description = 'Epoch [{}/{}] | LR:{} | Train:{} | Validation:{}/{} | Best: {}/{}'.format(epoch+1, args.epochs, current_lr, train_acc, valid_acc_top1, valid_acc_top5, best_acc_top1, best_acc_top5)
epoch_bar.set_description(description)
valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion)
# logging.info('Valid_acc_top1: %f', valid_acc_top1)
# logging.info('Valid_acc_top5: %f', valid_acc_top5)
description = 'Epoch [{}/{}] | LR:{} | Train:{} | Validation:{}/{} | Best: {}/{}'.format(epoch+1, args.epochs, current_lr, train_acc, valid_acc_top1, valid_acc_top5, best_acc_top1, best_acc_top5)
epoch_bar.set_description(description)
epoch_duration = time.time() - epoch_start
# logging.info('Epoch time: %ds.', epoch_duration)
is_best = False
if valid_acc_top5 > best_acc_top5:
best_acc_top5 = valid_acc_top5
if valid_acc_top1 > best_acc_top1:
best_acc_top1 = valid_acc_top1
is_best = True
writer.add_scalar("acc/train", train_acc, epoch)
writer.add_scalar("acc/valid_best_top1", best_acc_top1, epoch)
writer.add_scalar("acc/valid_best_top5", best_acc_top5, epoch)
writer.add_scalar("acc/valid_top1", valid_acc_top1, epoch)
writer.add_scalar("acc/valid_top5", valid_acc_top5, epoch)
description = 'Epoch [{}/{}] | LR:{} | Train:{} | Validation:{}/{} | Best: {}/{}'.format(epoch+1, args.epochs, current_lr, train_acc, valid_acc_top1, valid_acc_top5, best_acc_top1, best_acc_top5)
epoch_bar.set_description(description)
# logging.info('Best_acc_top1: %f', best_acc_top1)
# logging.info('Best_acc_top5: %f', best_acc_top5)
utils.save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_acc_top1': best_acc_top1,
'optimizer': optimizer.state_dict(),
}, is_best, args.save)
else:
############ processes no logs #####################
for epoch in range(args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
if args.lr_scheduler == 'cosine':
scheduler.step()
current_lr = scheduler.get_last_lr()[0]
elif args.lr_scheduler == 'linear':
current_lr = adjust_lr(args, optimizer, epoch)
else:
print('Wrong lr type, exit')
sys.exit(1)
if epoch < 5 and args.batch_size > 32:
for param_group in optimizer.param_groups:
param_group['lr'] = lr * (epoch + 1) / 5.0
if args.distributed or args.gpu is None:
model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
else:
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train(args, train_queue, model, criterion_smooth, optimizer)
def adjust_lr(args, optimizer, epoch):
# Smaller slope for the last 5 epochs because lr | |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
import unittest
import inspect
from ask_sdk_runtime.skill_builder import AbstractSkillBuilder
from ask_sdk_runtime.dispatch_components import (
GenericHandlerAdapter, GenericRequestMapper, GenericRequestHandlerChain,
AbstractRequestHandler, AbstractExceptionHandler,
AbstractRequestInterceptor, AbstractResponseInterceptor,
GenericExceptionMapper)
from ask_sdk_runtime.exceptions import (
RuntimeConfigException, SkillBuilderException)
try:
import mock
except ImportError:
from unittest import mock
class CustomSkillBuilder(AbstractSkillBuilder):
# Implementing a mock skill builder, for Py2.7 tests
def create(self):
return None
class TestSkillBuilder(unittest.TestCase):
def setUp(self):
self.sb = CustomSkillBuilder()
def test_add_null_request_handler_throw_error(self):
with self.assertRaises(RuntimeConfigException) as exc:
self.sb.add_request_handler(request_handler=None)
assert "Valid Request Handler instance to be provided" in str(
exc.exception), (
"Add Request Handler method didn't throw exception when a null "
"request handler is added")
def test_add_invalid_request_handler_throw_error(self):
invalid_request_handler = mock.Mock()
with self.assertRaises(RuntimeConfigException) as exc:
self.sb.add_request_handler(
request_handler=invalid_request_handler)
assert "Input should be a RequestHandler instance" in str(
exc.exception), (
"Add Request Handler method didn't throw exception when an "
"invalid request handler is added")
def test_add_valid_request_handler(self):
mock_request_handler = mock.MagicMock(spec=AbstractRequestHandler)
self.sb.add_request_handler(request_handler=mock_request_handler)
options = self.sb.runtime_configuration_builder
assert options.request_handler_chains[0].request_handler == mock_request_handler, (
"Add Request Handler method didn't add valid request handler to "
"Skill Builder Request Handlers list")
def test_add_null_exception_handler_throw_error(self):
with self.assertRaises(RuntimeConfigException) as exc:
self.sb.add_exception_handler(exception_handler=None)
assert "Valid Exception Handler instance to be provided" in str(
exc.exception), (
"Add Exception Handler method didn't throw exception when a null "
"exception handler is added")
def test_add_invalid_exception_handler_throw_error(self):
invalid_exception_handler = mock.Mock()
with self.assertRaises(RuntimeConfigException) as exc:
self.sb.add_exception_handler(
exception_handler=invalid_exception_handler)
assert "Input should be an ExceptionHandler instance" in str(
exc.exception), (
"Add Exception Handler method didn't throw exception when an "
"invalid exception handler is added")
def test_add_valid_exception_handler(self):
mock_exception_handler = mock.MagicMock(spec=AbstractExceptionHandler)
self.sb.add_exception_handler(exception_handler=mock_exception_handler)
options = self.sb.runtime_configuration_builder
assert options.exception_handlers[0] == mock_exception_handler, (
"Add Exception Handler method didn't add valid exception handler "
"to Skill Builder Exception Handlers list")
def test_add_null_global_request_interceptor_throw_error(self):
with self.assertRaises(RuntimeConfigException) as exc:
self.sb.add_global_request_interceptor(request_interceptor=None)
assert "Valid Request Interceptor instance to be provided" in str(
exc.exception), (
"Add Global Request Interceptor method didn't throw exception "
"when a null request interceptor is added")
def test_add_invalid_global_request_interceptor_throw_error(self):
invalid_request_interceptor = mock.Mock()
with self.assertRaises(RuntimeConfigException) as exc:
self.sb.add_global_request_interceptor(
request_interceptor=invalid_request_interceptor)
assert "Input should be a RequestInterceptor instance" in str(
exc.exception), (
"Add Global Request Interceptor method didn't throw exception "
"when an invalid request interceptor is added")
def test_add_valid_global_request_interceptor(self):
mock_request_interceptor = mock.MagicMock(
spec=AbstractRequestInterceptor)
self.sb.add_global_request_interceptor(
request_interceptor=mock_request_interceptor)
options = self.sb.runtime_configuration_builder
assert (options.global_request_interceptors[0] ==
mock_request_interceptor), (
"Add Global Request Interceptor method didn't add valid request "
"interceptor to Skill Builder "
"Request Interceptors list")
def test_add_null_global_response_interceptor_throw_error(self):
with self.assertRaises(RuntimeConfigException) as exc:
self.sb.add_global_response_interceptor(response_interceptor=None)
assert "Valid Response Interceptor instance to be provided" in str(
exc.exception), (
"Add Global Response Interceptor method didn't throw exception "
"when a null response interceptor is added")
def test_add_invalid_global_response_interceptor_throw_error(self):
invalid_response_interceptor = mock.Mock()
with self.assertRaises(RuntimeConfigException) as exc:
self.sb.add_global_response_interceptor(
response_interceptor=invalid_response_interceptor)
assert "Input should be a ResponseInterceptor instance" in str(
exc.exception), (
"Add Global Response Interceptor method didn't throw exception "
"when an invalid response interceptor "
"is added")
def test_add_valid_global_response_interceptor(self):
mock_response_interceptor = mock.MagicMock(
spec=AbstractResponseInterceptor)
self.sb.add_global_response_interceptor(
response_interceptor=mock_response_interceptor)
options = self.sb.runtime_configuration_builder
assert (options.global_response_interceptors[0] ==
mock_response_interceptor), (
"Add Global Response Interceptor method didn't add valid response "
"interceptor to Skill Builder "
"Response Interceptors list")
def test_skill_configuration_getter_no_registered_components(self):
actual_config = self.sb.runtime_configuration_builder.get_runtime_configuration()
assert actual_config.request_mappers is not None, (
"Skill Configuration getter in Skill Builder didn't set request "
"mappers correctly")
assert actual_config.request_mappers[0].request_handler_chains is not None, (
"Skill Configuration getter in Skill Builder didn't set handler "
"chains in request mappers correctly")
assert len(actual_config.request_mappers[0].request_handler_chains) == 0, (
"Skill Configuration getter in Skill Builder added invalid "
"handler in handler chain, "
"when no request handlers are registered")
assert actual_config.handler_adapters is not None, (
"Skill Configuration getter in Skill Builder didn't set handler "
"adapters correctly")
assert isinstance(actual_config.handler_adapters[0], GenericHandlerAdapter), (
"Skill Configuration getter in Skill Builder didn't set default "
"handler adapter")
assert actual_config.exception_mapper is not None, (
"Skill Configuration getter in Skill Builder created invalid "
"exception mapper, "
"when no exception handlers are registered")
assert actual_config.request_interceptors == [], (
"Skill Configuration getter in Skill Builder created invalid "
"request interceptors, "
"when no global request interceptors are registered")
assert actual_config.response_interceptors == [], (
"Skill Configuration getter in Skill Builder created invalid "
"response interceptors, "
"when no global response interceptors are registered")
def test_skill_configuration_getter_handlers_registered(self):
mock_request_handler = mock.MagicMock(spec=AbstractRequestHandler)
self.sb.add_request_handler(request_handler=mock_request_handler)
mock_exception_handler = mock.MagicMock(spec=AbstractExceptionHandler)
self.sb.add_exception_handler(exception_handler=mock_exception_handler)
actual_config = self.sb.runtime_configuration_builder.get_runtime_configuration()
assert actual_config.request_mappers is not None, (
"Skill Configuration getter in Skill Builder didn't set request "
"mappers correctly")
assert actual_config.request_mappers[0].request_handler_chains is not None, (
"Skill Configuration getter in Skill Builder didn't set handler "
"chains in request mappers correctly")
assert len(actual_config.request_mappers[0].request_handler_chains) == 1, (
"Skill Configuration getter in Skill Builder didn't add valid "
"handler in handler chain, "
"when request handlers are registered")
assert actual_config.request_mappers[0].request_handler_chains[0].request_handler == mock_request_handler, (
"Skill Configuration getter in Skill Builder added invalid "
"handler in handler chain, "
"when request handlers are registered")
assert actual_config.exception_mapper is not None, (
"Skill Configuration getter in Skill Builder didn't create "
"exception mapper, "
"when exception handlers are registered")
assert len(actual_config.exception_mapper.exception_handlers) == 1, (
"Skill Configuration getter in Skill Builder added additional "
"exception handlers than the registered ones "
"in exception mapper")
assert actual_config.exception_mapper.exception_handlers[0] == mock_exception_handler, (
"Skill Configuration getter in Skill Builder added invalid "
"handler in exception mapper, "
"when exception handlers are registered")
def test_request_handler_decorator_creation(self):
request_handler_wrapper = self.sb.request_handler(can_handle_func=None)
assert callable(request_handler_wrapper), (
"Skill Builder Request Handler decorator returned an invalid "
"wrapper object")
actual_arg_spec = inspect.getargspec(request_handler_wrapper)
assert len(actual_arg_spec.args) == 1, (
"Skill Builder Request Handler decorator created a wrapper of "
"different signature than expected")
assert "handle_func" in actual_arg_spec.args, (
"Skill Builder Request Handler decorator created a wrapper "
"without named parameter handler_func")
def test_request_handler_decorator_invalid_can_handle_func(self):
request_handler_wrapper = self.sb.request_handler(
can_handle_func=None)
with self.assertRaises(SkillBuilderException) as exc:
request_handler_wrapper(handle_func=None)
assert ("can_handle_func and handle_func input parameters should "
"be callable") in str(exc.exception), (
"Request Handler Decorator accepted invalid can_handle_func "
"parameter")
def test_request_handler_decorator_invalid_handle_func(self):
request_handler_wrapper = self.sb.request_handler(
can_handle_func=lambda x: True)
with self.assertRaises(SkillBuilderException) as exc:
request_handler_wrapper(handle_func=None)
assert ("can_handle_func and handle_func input parameters should "
"be callable") in str(exc.exception), (
"Request Handler Decorator was decorated on an invalid object")
def test_request_handler_decorator_on_valid_handle_func(self):
def test_can_handle(input):
return True
def test_handle(input):
return "something"
returned_request_handler = self.sb.request_handler(can_handle_func=test_can_handle)(
handle_func=test_handle)
options = self.sb.runtime_configuration_builder
actual_request_handler_chain = options.request_handler_chains[0]
actual_request_handler = actual_request_handler_chain.request_handler
assert (actual_request_handler.__class__.__name__
== "RequestHandlerTestHandle"), (
"Request Handler decorator created Request Handler of incorrect "
"name")
assert actual_request_handler.can_handle(None) is True, (
"Request Handler decorator created Request Handler with incorrect "
"can_handle function")
assert actual_request_handler.handle(None) == "something", (
"Request Handler decorator created Request Handler with incorrect "
"handle function")
assert returned_request_handler == test_handle, (
"Request Handler wrapper returned incorrect function"
)
def test_exception_handler_decorator_creation(self):
exception_handler_wrapper = self.sb.exception_handler(
can_handle_func=None)
assert callable(exception_handler_wrapper), (
"Skill Builder Exception Handler decorator returned an invalid "
"wrapper object")
actual_arg_spec = inspect.getargspec(exception_handler_wrapper)
assert len(actual_arg_spec.args) == 1, (
"Skill Builder Exception Handler decorator created a wrapper of "
"different signature than expected")
assert "handle_func" in actual_arg_spec.args, (
"Skill Builder Exception Handler decorator created a wrapper "
"without named parameter handler_func")
def test_exception_handler_decorator_invalid_can_handle_func(self):
exception_handler_wrapper = self.sb.exception_handler(
can_handle_func=None)
with self.assertRaises(SkillBuilderException) as exc:
exception_handler_wrapper(handle_func=None)
assert ("can_handle_func and handle_func input parameters should "
"be callable") in str(exc.exception), (
"Exception Handler Decorator accepted invalid can_handle_func "
"parameter")
def test_exception_handler_decorator_invalid_handle_func(self):
exception_handler_wrapper = self.sb.exception_handler(
can_handle_func=lambda x: True)
with self.assertRaises(SkillBuilderException) as exc:
exception_handler_wrapper(handle_func=None)
assert ("can_handle_func and handle_func input parameters should "
"be callable") in str(exc.exception), (
"Exception Handler Decorator was decorated on an invalid object")
def test_exception_handler_decorator_on_valid_handle_func(self):
def test_can_handle(input, exc):
return True
def test_handle(input, exc):
return "something"
returned_exception_handler = self.sb.exception_handler(can_handle_func=test_can_handle)(
handle_func=test_handle)
options = self.sb.runtime_configuration_builder
actual_exception_handler = options.exception_handlers[0]
assert (actual_exception_handler.__class__.__name__
== "ExceptionHandlerTestHandle"), (
"Exception Handler decorator created Exception Handler of "
"incorrect name")
assert actual_exception_handler.can_handle(None, None) is True, (
"Exception Handler decorator | |
event = CEventExit()
self.m_event_dispatcher.fire_event(event)
def send_events(self, event):
pass
def set_request_go_timer(self, timeout):
"""
Set timeout thread to release debugger from waiting for a client
to attach.
"""
self.cancel_request_go_timer()
if timeout is None:
return
_timeout = max(1.0, timeout)
f = lambda: (
self.record_client_heartbeat(0, False, True),
self.request_go()
)
self.m_timer_embedded_giveup = threading.Timer(_timeout, f)
self.m_timer_embedded_giveup.start()
#
# sleep() releases control and allow timer thread to actually start
# before this scope returns.
#
time.sleep(0.1)
def cancel_request_go_timer(self):
t = self.m_timer_embedded_giveup
if t is not None:
self.m_timer_embedded_giveup = None
t.cancel()
def setbreak(self, f):
"""
Set thread to break on next statement.
"""
if not self.m_ftrace:
return
tid = thread.get_ident()
if not tid in self.m_threads:
return self.settrace(f)
ctx = self.m_threads[tid]
f.f_trace = ctx.trace_dispatch_break
self.m_saved_next = self.m_next_frame
self.m_next_frame = f
def settrace(self, f = None, f_break_on_init = True, timeout = None, builtins_hack = None):
"""
Start tracing mechanism for thread.
"""
if not self.m_ftrace:
return
tid = thread.get_ident()
if tid in self.m_threads:
return
self.set_request_go_timer(timeout)
self.m_f_break_on_init = f_break_on_init
self.m_builtins_hack = builtins_hack
threading.settrace(self.trace_dispatch_init)
sys.settrace(self.trace_dispatch_init)
if f is not None:
f.f_trace = self.trace_dispatch_init
def stoptrace(self):
"""
Stop tracing mechanism.
"""
global g_fignore_atexit
g_fignore_atexit = True
threading.settrace(None)
sys.settrace(None)
sys.setprofile(None)
self.m_ftrace = False
self.set_all_tracers()
try:
self.request_go()
except DebuggerNotBroken:
pass
#self.m_threads = {}
def get_code_context(self, frame):
try:
return self.m_code_contexts[frame.f_code]
except KeyError:
if self.m_builtins_hack != None:
if calc_frame_path(frame) == self.m_builtins_hack:
self.m_builtins_hack = None
frame.f_globals['__builtins__'] = g_builtins_module
code_context = CCodeContext(frame, self.m_bp_manager)
return self.m_code_contexts.setdefault(frame.f_code, code_context)
def get_current_ctx(self):
if len(self.m_threads) == 0:
raise NoThreads
return self.m_current_ctx
def get_ctx(self, tid):
ctx = self.m_threads.get(tid, None)
if ctx == None:
raise ThreadNotFound
return ctx
def wait_for_first_thread(self):
"""
Wait until at least one debuggee thread is alive.
Python can have 0 threads in some circumstances as
embedded Python and the Python interpreter console.
"""
if self.m_current_ctx is not None:
return
try:
self.m_threads_lock.acquire()
while self.m_current_ctx is None:
safe_wait(self.m_threads_lock, 1.0)
finally:
self.m_threads_lock.release()
def notify_first_thread(self):
"""
Notify that first thread is available for tracing.
"""
try:
self.m_threads_lock.acquire()
self.m_threads_lock.notify()
finally:
self.m_threads_lock.release()
def set_exception_trap_frame(self, frame):
"""
Set trap for unhandled exceptions in relevant frame.
"""
while frame is not None:
code_context = self.get_code_context(frame)
if code_context.is_exception_trap_frame():
code_context.m_fExceptionTrap = True
return
frame = frame.f_back
def __set_signal_handler(self):
"""
Set rpdb2 to wrap all signal handlers.
"""
for key, value in list(vars(signal).items()):
if not key.startswith('SIG') or key in ['SIG_IGN', 'SIG_DFL', 'SIGRTMIN', 'SIGRTMAX']:
continue
handler = signal.getsignal(value)
if handler in [signal.SIG_IGN, signal.SIG_DFL]:
continue
try:
signal.signal(value, handler)
except:
print_debug('Failed to set signal handler for signal %s(%d)' % (key, value))
def clear_source_cache(self):
g_lines_cache.clear()
event = CEventClearSourceCache()
self.m_event_dispatcher.fire_event(event)
def trace_dispatch_init(self, frame, event, arg):
"""
Initial tracing method.
"""
if event not in ['call', 'line', 'return']:
return None
code_context = self.get_code_context(frame)
if event == 'call' and code_context.is_untraced():
return None
self.set_exception_trap_frame(frame)
try:
t = current_thread()
name = thread_get_name(t)
except:
name = ''
if name == 'MainThread':
self.__set_signal_handler()
ctx = CDebuggerCoreThread(name, self, frame, event)
ctx.set_tracers()
try:
self.m_threads_lock.acquire()
self.m_threads[ctx.m_thread_id] = ctx
nthreads = len(self.m_threads)
if nthreads == 1:
self.prepare_embedded_sync()
finally:
self.m_threads_lock.release()
if nthreads == 1:
self.clear_source_cache()
self.m_current_ctx = ctx
self.notify_first_thread()
if self.m_f_break_on_init:
self.m_f_break_on_init = False
self.request_break()
sys.settrace(ctx.trace_dispatch_call)
sys.setprofile(ctx.profile)
self.wait_embedded_sync(nthreads == 1)
if event == 'call':
return ctx.trace_dispatch_call(frame, event, arg)
elif hasattr(frame, 'f_trace') and (frame.f_trace is not None):
return frame.f_trace(frame, event, arg)
else:
return None
def prepare_embedded_sync(self):
if not self.m_fembedded:
return
t = time.time()
t0 = self.m_embedded_sync_t0
if t0 != 0:
self.fix_heartbeats(t - t0)
if self.get_clients_attached() == 0:
return
if t - t0 < EMBEDDED_SYNC_THRESHOLD:
return
self.m_embedded_sync_t1 = t
self.m_embedded_event.clear()
def wait_embedded_sync(self, ftrigger):
if not self.m_fembedded:
return
t = time.time()
t0 = self.m_embedded_sync_t0
t1 = self.m_embedded_sync_t1
if t - t0 < EMBEDDED_SYNC_THRESHOLD:
return
if t - t1 >= EMBEDDED_SYNC_TIMEOUT:
return
if ftrigger:
event = CEventEmbeddedSync()
self.m_event_dispatcher.fire_event(event)
safe_wait(self.m_embedded_event, EMBEDDED_SYNC_TIMEOUT - (t - t1))
if ftrigger:
self.m_embedded_sync_t1 = 0
def embedded_sync(self):
self.m_embedded_event.set()
def set_all_tracers(self):
"""
Set trace methods for all frames of all threads.
"""
for ctx in list(self.m_threads.values()):
ctx.set_tracers()
def remove_thread(self, thread_id):
try:
del self.m_threads[thread_id]
if self.m_current_ctx.m_thread_id == thread_id:
self.m_current_ctx = list(self.m_threads.values())[0]
except (KeyError, IndexError):
self.m_embedded_sync_t0 = time.time()
def set_break_flag(self):
self.m_fBreak = (self.m_state_manager.get_state() == STATE_BROKEN)
def is_break(self, ctx, frame, event = None):
if self.m_fBreak:
return True
if ctx.m_fUnhandledException:
return True
if self.m_step_tid == ctx.m_thread_id:
return True
if self.m_next_frame == frame:
return True
if (self.m_return_frame == frame) and (event == 'return'):
return True
return False
def record_client_heartbeat(self, id, finit, fdetach):
"""
Record that client id is still attached.
"""
if finit:
self.m_heartbeats.pop(0, None)
if fdetach:
self.m_heartbeats.pop(id, None)
return
if finit or id in self.m_heartbeats:
self.m_heartbeats[id] = time.time()
def fix_heartbeats(self, missing_pulse):
for k, v in list(self.m_heartbeats.items()):
self.m_heartbeats[k] = v + missing_pulse
def get_clients_attached(self):
n = 0
t = time.time()
for v in list(self.m_heartbeats.values()):
if t < v + HEARTBEAT_TIMEOUT:
n += 1
return n
def is_waiting_for_attach(self):
if self.get_clients_attached() != 1:
return False
if list(self.m_heartbeats.keys()) != [0]:
return False
return True
def _break(self, ctx, frame, event, arg):
"""
Main break logic.
"""
global g_fos_exit
global g_module_main
if not self.is_break(ctx, frame, event) and not ctx.is_breakpoint():
ctx.set_tracers()
return
ctx.m_fBroken = True
f_full_notification = False
f_uhe_notification = False
step_tid = self.m_step_tid
try:
self.m_state_manager.acquire()
if self.m_state_manager.get_state() != STATE_BROKEN:
self.set_break_dont_lock()
if g_module_main == -1:
try:
g_module_main = sys.modules['__main__']
except:
g_module_main = None
if not is_py3k() and not frame.f_exc_traceback is None:
ctx.set_exc_info((frame.f_exc_type, frame.f_exc_value, frame.f_exc_traceback))
if is_py3k() and ctx.get_exc_info() == None and sys.exc_info()[2] != None:
ctx.set_exc_info(sys.exc_info())
try:
t = current_thread()
ctx.m_thread_name = thread_get_name(t)
except:
pass
if ctx.m_fUnhandledException and not self.m_fUnhandledException:
self.m_fUnhandledException = True
f_uhe_notification = True
if self.is_auto_fork_first_stage(ctx.m_thread_id):
self.m_saved_step = (self.m_step_tid, self.m_saved_next, self.m_return_frame)
self.m_saved_next = None
self.m_bp_manager.m_fhard_tbp = True
if self.m_f_first_to_break or (self.m_current_ctx == ctx):
self.m_current_ctx = ctx
self.m_lastest_event = event
self.m_step_tid = None
self.m_next_frame = None
self.m_return_frame = None
self.m_saved_next = None
self.m_bp_manager.del_temp_breakpoint(breakpoint = ctx.get_breakpoint())
self.m_f_first_to_break = False
f_full_notification = True
finally:
self.m_state_manager.release()
ffork_second_stage = self.handle_fork(ctx)
self.handle_exec(ctx)
if self.is_auto_fork_first_stage(ctx.m_thread_id):
self.request_go_quiet()
elif self.m_ffork_auto and ffork_second_stage:
(self.m_step_tid, self.m_next_frame, self.m_return_frame) = self.m_saved_step
self.m_saved_step = (None, None, None)
self.m_bp_manager.m_fhard_tbp = False
self.request_go_quiet()
elif self.get_clients_attached() == 0:
#print_debug('state: %s' % self.m_state_manager.get_state())
self.request_go_quiet()
elif step_tid == ctx.m_thread_id and frame.f_code.co_name == 'rpdb2_import_wrapper':
self.request_step_quiet()
else:
if f_full_notification:
self.send_events(None)
else:
self.notify_thread_broken(ctx.m_thread_id, ctx.m_thread_name)
self.notify_namespace()
if f_uhe_notification:
self.send_unhandled_exception_event()
state = self.m_state_manager.wait_for_state([STATE_RUNNING])
self.prepare_fork_step(ctx.m_thread_id)
self.prepare_exec_step(ctx.m_thread_id)
ctx.m_fUnhandledException = False
ctx.m_fBroken = False
ctx.set_tracers()
ctx.reset_exc_info()
if g_fos_exit:
g_fos_exit = False
self.send_event_exit()
time.sleep(1.0)
self.stoptrace()
def is_auto_fork_first_stage(self, tid):
if not self.m_ffork_auto:
return False
return tid == g_forktid and g_forkpid == None
def prepare_fork_step(self, tid):
global g_forkpid
global g_ignore_broken_pipe
if tid != g_forktid:
return
self.m_step_tid = tid
g_forkpid = os.getpid()
if not self.m_ffork_into_child:
return
n = self.get_clients_attached()
self.send_fork_switch(n)
time.sleep(0.5)
g_server.shutdown()
CThread.joinAll()
g_ignore_broken_pipe = time.time()
def handle_fork(self, ctx):
global g_forktid
global g_forkpid
tid = ctx.m_thread_id
if g_forkpid == None or tid != g_forktid:
return False
forkpid = g_forkpid
g_forkpid = None
g_forktid = None
if os.getpid() == forkpid:
#
# Parent side of fork().
#
if not self.m_ffork_into_child:
#CThread.clearJoin()
#g_server.jumpstart()
return True
self.stoptrace()
return False
#
# Child side of fork().
#
if not self.m_ffork_into_child:
self.stoptrace()
return False
self.m_threads = {tid: ctx}
CThread.clearJoin()
g_server.jumpstart()
return True
def prepare_exec_step(self, tid):
global g_execpid
if tid != g_exectid:
return
self.m_step_tid = tid
g_execpid = os.getpid()
n = self.get_clients_attached()
self.send_exec_switch(n)
time.sleep(0.5)
g_server.shutdown()
CThread.joinAll()
def handle_exec(self, ctx):
global g_exectid
global g_execpid
tid = ctx.m_thread_id
if g_execpid == None or tid != g_exectid:
return False
g_execpid = None
g_exectid = None
#
# If we are here it means that the exec failed.
# Jumpstart the debugger to allow debugging to continue.
#
CThread.clearJoin()
g_server.jumpstart()
return True
def notify_thread_broken(self, tid, name):
"""
Notify that thread (tid) has broken.
This notification is sent for each thread that breaks after
the first one.
"""
_event = CEventThreadBroken(tid, | |
# -*- coding: utf-8 -*-
##
# Copyright 2018 Telefonica S.A.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
##
import asyncio
from collections import OrderedDict
from time import time
# from osm_common.dbbase import DbException
__author__ = "<NAME>"
class LcmException(Exception):
pass
class LcmExceptionNoMgmtIP(LcmException):
pass
class LcmExceptionExit(LcmException):
pass
def versiontuple(v):
"""utility for compare dot separate versions. Fills with zeros to proper number comparison
package version will be something like 4.0.1.post11+gb3f024d.dirty-1. Where 4.0.1 is the git tag, postXX is the
number of commits from this tag, and +XXXXXXX is the git commit short id. Total length is 16 with until 999 commits
"""
filled = []
for point in v.split("."):
point, _, _ = point.partition("+")
point, _, _ = point.partition("-")
filled.append(point.zfill(20))
return tuple(filled)
def deep_get(target_dict, key_list, default_value=None):
"""
Get a value from target_dict entering in the nested keys. If keys does not exist, it returns None
Example target_dict={a: {b: 5}}; key_list=[a,b] returns 5; both key_list=[a,b,c] and key_list=[f,h] return None
:param target_dict: dictionary to be read
:param key_list: list of keys to read from target_dict
:param default_value: value to return if key is not present in the nested dictionary
:return: The wanted value if exist, None otherwise
"""
for key in key_list:
if not isinstance(target_dict, dict) or key not in target_dict:
return default_value
target_dict = target_dict[key]
return target_dict
def get_iterable(in_dict, in_key):
"""
Similar to <dict>.get(), but if value is None, False, ..., An empty tuple is returned instead
:param in_dict: a dictionary
:param in_key: the key to look for at in_dict
:return: in_dict[in_var] or () if it is None or not present
"""
if not in_dict.get(in_key):
return ()
return in_dict[in_key]
def populate_dict(target_dict, key_list, value):
"""
Update target_dict creating nested dictionaries with the key_list. Last key_list item is asigned the value.
Example target_dict={K: J}; key_list=[a,b,c]; target_dict will be {K: J, a: {b: {c: value}}}
:param target_dict: dictionary to be changed
:param key_list: list of keys to insert at target_dict
:param value:
:return: None
"""
for key in key_list[0:-1]:
if key not in target_dict:
target_dict[key] = {}
target_dict = target_dict[key]
target_dict[key_list[-1]] = value
class LcmBase:
def __init__(self, db, msg, fs, logger):
"""
:param db: database connection
"""
self.db = db
self.msg = msg
self.fs = fs
self.logger = logger
def update_db_2(self, item, _id, _desc):
"""
Updates database with _desc information. If success _desc is cleared
:param item:
:param _id:
:param _desc: dictionary with the content to update. Keys are dot separated keys for
:return: None. Exception is raised on error
"""
if not _desc:
return
now = time()
_desc["_admin.modified"] = now
self.db.set_one(item, {"_id": _id}, _desc)
_desc.clear()
# except DbException as e:
# self.logger.error("Updating {} _id={} with '{}'. Error: {}".format(item, _id, _desc, e))
class TaskRegistry(LcmBase):
"""
Implements a registry of task needed for later cancelation, look for related tasks that must be completed before
etc. It stores a four level dict
First level is the topic, ns, vim_account, sdn
Second level is the _id
Third level is the operation id
Fourth level is a descriptive name, the value is the task class
The HA (High-Availability) methods are used when more than one LCM instance is running.
To register the current task in the external DB, use LcmBase as base class, to be able
to reuse LcmBase.update_db_2()
The DB registry uses the following fields to distinguish a task:
- op_type: operation type ("nslcmops" or "nsilcmops")
- op_id: operation ID
- worker: the worker ID for this process
"""
# NS/NSI: "services" VIM/WIM/SDN: "accounts"
topic_service_list = ['ns', 'nsi']
topic_account_list = ['vim', 'wim', 'sdn', 'k8scluster', 'k8srepo']
# Map topic to InstanceID
topic2instid_dict = {
'ns': 'nsInstanceId',
'nsi': 'netsliceInstanceId'}
# Map topic to DB table name
topic2dbtable_dict = {
'ns': 'nslcmops',
'nsi': 'nsilcmops',
'vim': 'vim_accounts',
'wim': 'wim_accounts',
'sdn': 'sdns',
'k8scluster': 'k8sclusters',
'k8srepo': 'k8srepos'}
def __init__(self, worker_id=None, db=None, logger=None):
self.task_registry = {
"ns": {},
"nsi": {},
"vim_account": {},
"wim_account": {},
"sdn": {},
"k8scluster": {},
"k8srepo": {},
}
self.worker_id = worker_id
self.db = db
self.logger = logger
def register(self, topic, _id, op_id, task_name, task):
"""
Register a new task
:param topic: Can be "ns", "nsi", "vim_account", "sdn"
:param _id: _id of the related item
:param op_id: id of the operation of the related item
:param task_name: Task descriptive name, as create, instantiate, terminate. Must be unique in this op_id
:param task: Task class
:return: none
"""
if _id not in self.task_registry[topic]:
self.task_registry[topic][_id] = OrderedDict()
if op_id not in self.task_registry[topic][_id]:
self.task_registry[topic][_id][op_id] = {task_name: task}
else:
self.task_registry[topic][_id][op_id][task_name] = task
# print("registering task", topic, _id, op_id, task_name, task)
def remove(self, topic, _id, op_id, task_name=None):
"""
When task is ended, it should be removed. It ignores missing tasks. It also removes tasks done with this _id
:param topic: Can be "ns", "nsi", "vim_account", "sdn"
:param _id: _id of the related item
:param op_id: id of the operation of the related item
:param task_name: Task descriptive name. If none it deletes all tasks with same _id and op_id
:return: None
"""
if not self.task_registry[topic].get(_id):
return
if not task_name:
self.task_registry[topic][_id].pop(op_id, None)
elif self.task_registry[topic][_id].get(op_id):
self.task_registry[topic][_id][op_id].pop(task_name, None)
# delete done tasks
for op_id_ in list(self.task_registry[topic][_id]):
for name, task in self.task_registry[topic][_id][op_id_].items():
if not task.done():
break
else:
del self.task_registry[topic][_id][op_id_]
if not self.task_registry[topic][_id]:
del self.task_registry[topic][_id]
def lookfor_related(self, topic, _id, my_op_id=None):
task_list = []
task_name_list = []
if _id not in self.task_registry[topic]:
return "", task_name_list
for op_id in reversed(self.task_registry[topic][_id]):
if my_op_id:
if my_op_id == op_id:
my_op_id = None # so that the next task is taken
continue
for task_name, task in self.task_registry[topic][_id][op_id].items():
if not task.done():
task_list.append(task)
task_name_list.append(task_name)
break
return ", ".join(task_name_list), task_list
def cancel(self, topic, _id, target_op_id=None, target_task_name=None):
"""
Cancel all active tasks of a concrete ns, nsi, vim_account, sdn identified for _id. If op_id is supplied only
this is cancelled, and the same with task_name
"""
if not self.task_registry[topic].get(_id):
return
for op_id in reversed(self.task_registry[topic][_id]):
if target_op_id and target_op_id != op_id:
continue
for task_name, task in self.task_registry[topic][_id][op_id].items():
if target_task_name and target_task_name != task_name:
continue
# result =
task.cancel()
# if result:
# self.logger.debug("{} _id={} order_id={} task={} cancelled".format(topic, _id, op_id, task_name))
# Is topic NS/NSI?
def _is_service_type_HA(self, topic):
return topic in self.topic_service_list
# Is topic VIM/WIM/SDN?
def _is_account_type_HA(self, topic):
return topic in self.topic_account_list
# Input: op_id, example: 'abc123def:3' Output: account_id='<KEY>', op_index=3
def _get_account_and_op_HA(self, op_id):
if not op_id:
return None, None
account_id, _, op_index = op_id.rpartition(':')
if not account_id or not op_index.isdigit():
return None, None
return account_id, op_index
# Get '_id' for any topic and operation
def _get_instance_id_HA(self, topic, op_type, op_id):
_id = None
# Special operation 'ANY', for SDN account associated to a VIM account: op_id as '_id'
if op_type == 'ANY':
_id = op_id
# NS/NSI: Use op_id as '_id'
elif self._is_service_type_HA(topic):
_id = op_id
# VIM/SDN/WIM/K8SCLUSTER: Split op_id to get Account ID and Operation Index, use Account ID as '_id'
elif self._is_account_type_HA(topic):
_id, _ = self._get_account_and_op_HA(op_id)
return _id
# Set DB _filter for querying any related process state
def _get_waitfor_filter_HA(self, db_lcmop, topic, op_type, op_id):
_filter = {}
# Special operation 'ANY', for SDN account associated to a VIM account: op_id as '_id'
# In this special case, the timestamp is ignored
if op_type == 'ANY':
_filter = {'operationState': 'PROCESSING'}
# Otherwise, get 'startTime' timestamp for this operation
else:
# NS/NSI
if self._is_service_type_HA(topic):
now = time()
starttime_this_op = db_lcmop.get("startTime")
instance_id_label = self.topic2instid_dict.get(topic)
instance_id = db_lcmop.get(instance_id_label)
_filter = {instance_id_label: instance_id,
'operationState': 'PROCESSING',
'startTime.lt': starttime_this_op,
"_admin.modified.gt": now - 2*3600, # ignore if tow hours of inactivity
}
# VIM/WIM/SDN/K8scluster
elif self._is_account_type_HA(topic):
_, op_index = self._get_account_and_op_HA(op_id)
_ops = db_lcmop['_admin']['operations']
_this_op = _ops[int(op_index)]
starttime_this_op = _this_op.get('startTime', None)
_filter = {'operationState': 'PROCESSING',
'startTime.lt': starttime_this_op}
return _filter
| |
s3.dataTable.js and the values used.
config = Storage()
config.id = id
config.lengthMenu = attr.get("dt_lengthMenu",
[[ 25, 50, -1], [ 25, 50, str(current.T("All"))]]
)
config.displayLength = attr.get("dt_displayLength", s3.ROWSPERPAGE)
config.sDom = attr.get("dt_sDom", 'fril<"dataTable_table"t>pi')
config.pagination = attr.get("dt_pagination", "true")
config.paginationType = attr.get("dt_pagination_type", "full_numbers")
config.bFilter = attr.get("dt_bFilter", "true")
url = URL(c=request.controller,
f=request.function,
args=request.args,
vars=request.get_vars,
)
_ajaxUrl = s3_set_extension( url, "aadata")
config.ajaxUrl = attr.get("dt_ajax_url", _ajaxUrl)
config.rowStyles = attr.get("dt_styles", [])
rowActions = attr.get("dt_row_actions", s3.actions)
if rowActions:
config.rowActions = rowActions
else:
config.rowActions = []
bulkActions = attr.get("dt_bulk_actions", None)
if bulkActions and not isinstance(bulkActions, list):
bulkActions = [bulkActions]
config.bulkActions = bulkActions
config.bulkCol = bulkCol = attr.get("dt_bulk_col", 0)
action_col = attr.get("dt_action_col", 0)
if bulkActions and bulkCol <= action_col:
action_col += 1
config.actionCol = action_col
group_list = attr.get("dt_group", [])
if not isinstance(group_list, list):
group_list = [group_list]
dt_group = []
for group in group_list:
if bulkActions and bulkCol <= group:
group += 1
if action_col >= group:
group -= 1
dt_group.append([group, "asc"])
config.group = dt_group
config.groupTotals = attr.get("dt_group_totals", [])
config.groupTitles = attr.get("dt_group_titles", [])
config.groupSpacing = attr.get("dt_group_space", "false")
for order in orderby:
if bulkActions:
if bulkCol <= order[0]:
order[0] += 1
if action_col >= order[0]:
order[0] -= 1
config.aaSort = orderby
config.textMaxLength = attr.get("dt_text_maximum_len", 80)
config.textShrinkLength = attr.get("dt_text_condense_len", 75)
config.shrinkGroupedRows = attr.get("dt_shrink_groups", "false")
config.groupIcon = attr.get("dt_group_types", [])
# Wrap the table in a form and add some data in hidden fields
form = FORM(_class="dt-wrapper")
if not s3.no_formats and len(html) > 0:
# @todo: always *render* both export options and permalink,
# even if the initial table is empty, so that
# Ajax-update can unhide them once there are results
# @todo: move export-format update into fnDrawCallback
# @todo: poor UX with onclick-JS, better to render real
# links which can be bookmarked, and then update them
# in fnDrawCallback
permalink = attr.get("dt_permalink", None)
base_url = attr.get("dt_base_url", None)
form.append(S3DataTable.listFormats(rfields,
permalink=permalink,
base_url=base_url))
form.append(html)
# Add the configuration details for this dataTable
form.append(INPUT(_type="hidden",
_id="%s_configurations" % id,
_name="config",
_value=jsons(config)))
# If we have a cache set up then pass it in
if cache:
form.append(INPUT(_type="hidden",
_id="%s_dataTable_cache" %id,
_name="cache",
_value=jsons(cache)))
# If we have bulk actions then add the hidden fields
if bulkActions:
form.append(INPUT(_type="hidden",
_id="%s_dataTable_bulkMode" % id,
_name="mode",
_value="Inclusive"))
bulk_selected = attr.get("dt_bulk_selected", "")
if isinstance(bulk_selected, list):
bulk_selected = ",".join(bulk_selected)
form.append(INPUT(_type="hidden",
_id="%s_dataTable_bulkSelection" % id,
_name="selected",
_value="[%s]" % bulk_selected))
form.append(INPUT(_type="hidden",
_id="%s_dataTable_filterURL" % id,
_class="dataTable_filterURL",
_name="filterURL",
_value="%s" % config.ajaxUrl))
return form
# -------------------------------------------------------------------------
# Helper methods
# -------------------------------------------------------------------------
def table(self, id, flist=None, action_col=0):
"""
Method to render the data as an html table. This is of use if
and html table is required without the dataTable goodness. However
if you want html for a dataTable then use the html() method
@param id: The id of the table
@param flist: The list of fields
@param action_col: The column where action columns will be displayed
(this is required by dataTables)
"""
data = self.data
heading = self.heading
start = self.start
end = self.end
if not flist:
flist = self.colnames
# Build the header row
header = THEAD()
tr = TR()
for field in flist:
if field == "BULK":
tr.append(TH(""))
else:
tr.append(TH(heading[field]))
header.append(tr)
body = TBODY()
if data:
# Build the body rows (the actual data)
rc = 0
for i in xrange(start, end):
row = data[i]
if rc % 2 == 0:
_class = "even"
else:
_class = "odd"
rc += 1
tr = TR(_class=_class)
for field in flist:
# Insert a checkbox for bulk select
if field == "BULK":
tr.append(TD(INPUT(_id="select%s" % row[flist[action_col]],
_type="checkbox",
_class="bulkcheckbox",
)))
else:
tr.append(TD(row[field]))
body.append(tr)
table = TABLE([header, body], _id=id, _class="dataTable display")
return table
# -------------------------------------------------------------------------
def aadata(self,
totalrows,
displayrows,
id,
sEcho,
flist,
stringify=True,
action_col=None,
**attr
):
"""
Method to render the data into a json object
@param totalrows: The total rows in the unfiltered query.
@param displayrows: The total rows in the filtered query.
@param id: The id of the table for which this ajax call will
respond to.
@param sEcho: An unaltered copy of sEcho sent from the client used
by dataTables as a draw count.
@param flist: The list of fields
@param attr: dictionary of attributes which can be passed in
dt_action_col: The column where the action buttons will be placed
dt_bulk_actions: list of labels for the bulk actions.
dt_bulk_col: The column in which the checkboxes will appear,
by default it will be the column immediately
before the first data item
dt_group_totals: The number of record in each group.
This will be displayed in parenthesis
after the group title.
"""
data = self.data
if not flist:
flist = self.colnames
start = self.start
end = self.end
if action_col is None:
action_col = attr.get("dt_action_col", 0)
structure = {}
aadata = []
for i in xrange(start, end):
row = data[i]
details = []
for field in flist:
if field == "BULK":
details.append("<INPUT id='select%s' type='checkbox' class='bulkcheckbox'>" % \
row[flist[action_col]])
else:
details.append(s3_unicode(row[field]))
aadata.append(details)
structure["dataTable_id"] = id
structure["dataTable_filter"] = self.filterString
structure["dataTable_groupTotals"] = attr.get("dt_group_totals", [])
structure["dataTable_sort"] = self.orderby
structure["aaData"] = aadata
structure["iTotalRecords"] = totalrows
structure["iTotalDisplayRecords"] = displayrows
structure["sEcho"] = sEcho
if stringify:
from gluon.serializers import json as jsons
return jsons(structure)
else:
return structure
# =============================================================================
class S3DataList(object):
""" Class representing a data list """
# -------------------------------------------------------------------------
# Standard API
# -------------------------------------------------------------------------
def __init__(self,
resource,
list_fields,
records,
start=None,
limit=None,
total=None,
list_id=None,
layout=None,
row_layout=None):
"""
Constructor
@param resource: the S3Resource
@param list_fields: the list fields
(list of field selector strings)
@param records: the records
@param start: index of the first item
@param limit: maximum number of items
@param total: total number of available items
@param list_id: the HTML ID for this list
@param layout: item renderer (optional) as function
(list_id, item_id, resource, rfields, record)
@param row_layout: row renderer (optional) as
function(list_id, resource, rowsize, items)
"""
self.resource = resource
self.list_fields = list_fields
self.records = records
if list_id is None:
self.list_id = "datalist"
else:
self.list_id = list_id
if layout is not None:
self.layout = layout
else:
self.layout = S3DataListLayout()
self.row_layout = row_layout
self.start = start if start else 0
self.limit = limit if limit else 0
self.total = total if total else 0
# ---------------------------------------------------------------------
def html(self,
start=None,
limit=None,
pagesize=None,
rowsize=None,
ajaxurl=None,
empty=None,
popup_url=None,
popup_title=None,
):
"""
Render list data as HTML (nested DIVs)
@param start: index of the first item (in this page)
@param limit: (actual) number of items (in this page)
@param pagesize: maximum number of items per page
@param rowsize: number of items per row
@param ajaxurl: the URL to Ajax-update the datalist
@param popup_url: the URL for the modal used for the 'more'
button (=> we deactivate InfiniteScroll)
@param popup_title: the title for the modal
"""
T = current.T
resource = self.resource
list_fields = self.list_fields
rfields = resource.resolve_selectors(list_fields)[0]
list_id = self.list_id
render = self.layout
render_row = self.row_layout
if not rowsize:
rowsize = 1
pkey = str(resource._id)
records = self.records
if records is not None:
# Call prep if present
if hasattr(render, "prep"):
render.prep(resource, records)
items = [
DIV(T("Total Records: %(numrows)s") % {"numrows": self.total},
_class="dl-header",
_id="%s-header" % list_id)
]
if empty is None:
empty = resource.crud.crud_string(resource.tablename,
"msg_no_match")
empty = DIV(empty, _class="dl-empty")
if self.total > 0:
empty.update(_style="display:none;")
items.append(empty)
row_idx = int(self.start / rowsize) + 1
for group in self.groups(records, rowsize):
row = []
col_idx = 0
for record in group:
if pkey in record:
item_id = "%s-%s" % (list_id, record[pkey])
else:
# template
item_id = "%s-[id]" % list_id
item = render(list_id,
item_id,
resource,
rfields,
record)
if hasattr(item, "add_class"):
_class = "dl-item dl-%s-cols dl-col-%s" % (rowsize, col_idx)
item.add_class(_class)
row.append(item)
col_idx += 1
_class = "dl-row %s" % ((row_idx % 2) and "even" or "odd")
if render_row:
row = render_row(list_id,
resource,
rowsize,
row)
if hasattr(row, "add_class"):
row.add_class(_class)
else:
row = DIV(row, _class=_class)
items.append(row)
row_idx += 1
else:
# template
raise NotImplementedError
dl = DIV(items,
_class="dl",
_id=list_id,
)
dl_data = {"startindex": start,
"maxitems": limit,
"totalitems": self.total,
"pagesize": pagesize,
"rowsize": rowsize,
"ajaxurl": ajaxurl,
}
if popup_url:
input_class = "dl-pagination"
a_class = "s3_modal"
#dl_data["popup_url"] = popup_url
#dl_data["popup_title"] = popup_title
else:
input_class = "dl-pagination | |
return X.asformat("csr")
return X
def kronpow(a, p, **kron_opts):
"""Returns `a` tensored with itself `p` times
Equivalent to ``reduce(lambda x, y: x & y, [a] * p)``.
Parameters
----------
a : dense or sparse vector or operator
Object to tensor power.
p : int
Tensor power.
kron_opts :
Supplied to :func:`~quimb.kron`.
Returns
-------
dense or sparse vector or operator
"""
ops = (a,) * p
return kron(*ops, **kron_opts)
def _find_shape_of_nested_int_array(x):
"""Take a n-nested list/tuple of integers and find its array shape.
"""
shape = [len(x)]
sub_x = x[0]
while not np.issubdtype(type(sub_x), np.integer):
shape.append(len(sub_x))
sub_x = sub_x[0]
return tuple(shape)
def _dim_map_1d(sza, coos):
for coo in coos:
if 0 <= coo < sza:
yield coo
else:
raise ValueError("One or more coordinates out of range.")
def _dim_map_1dtrim(sza, coos):
return (coo for coo in coos if (0 <= coo < sza))
def _dim_map_1dcyclic(sza, coos):
return (coo % sza for coo in coos)
def _dim_map_2dcyclic(sza, szb, coos):
return (szb * (coo[0] % sza) + coo[1] % szb for coo in coos)
def _dim_map_2dtrim(sza, szb, coos):
for coo in coos:
x, y = coo
if 0 <= x < sza and 0 <= y < szb:
yield szb * x + y
def _dim_map_2d(sza, szb, coos):
for coo in coos:
x, y = coo
if 0 <= x < sza and 0 <= y < szb:
yield szb * x + y
else:
raise ValueError("One or more coordinates out of range.")
def _dim_map_nd(szs, coos, cyclic=False, trim=False):
strides = [1]
for sz in szs[-1:0:-1]:
strides.insert(0, sz * strides[0])
if cyclic:
coos = ((c % sz for c, sz in zip(coo, szs)) for coo in coos)
elif trim:
coos = (c for c in coos if all(x == x % sz for x, sz in zip(c, szs)))
elif not all(all(c == c % sz for c, sz in zip(coo, szs)) for coo in coos):
raise ValueError("One or more coordinates out of range.")
return (sum(c * m for c, m in zip(coo, strides)) for coo in coos)
_dim_mapper_methods = {(1, False, False): _dim_map_1d,
(1, False, True): _dim_map_1dtrim,
(1, True, False): _dim_map_1dcyclic,
(2, False, False): _dim_map_2d,
(2, False, True): _dim_map_2dtrim,
(2, True, False): _dim_map_2dcyclic}
def dim_map(dims, coos, cyclic=False, trim=False):
"""Flatten 2d+ dimensions and coordinates.
Maps multi-dimensional coordinates and indices to flat arrays in a
regular way. Wraps or deletes coordinates beyond the system size
depending on parameters ``cyclic`` and ``trim``.
Parameters
----------
dims : nested tuple of int
Multi-dim array of systems' internal dimensions.
coos : list of tuples of int
Array of coordinate tuples to convert
cyclic : bool, optional
Whether to automatically wrap coordinates beyond system size or
delete them.
trim : bool, optional
If True, any coordinates beyond dimensions will be deleted,
overidden by cyclic.
Returns
-------
flat_dims : tuple
Flattened version of ``dims``.
inds : tuple
Indices corresponding to the original coordinates.
Examples
--------
>>> dims = [[2, 3], [4, 5]]
>>> coords = [(0, 0), (1, 1)]
>>> flat_dims, inds = dim_map(dims, coords)
>>> flat_dims
(2, 3, 4, 5)
>>> inds
(0, 3)
>>> dim_map(dims, [(2, 0), (-1, 1)], cyclic=True)
((2, 3, 4, 5), (0, 3))
"""
# Figure out shape of dimensions given
if isinstance(dims, np.ndarray):
szs = dims.shape
ndim = dims.ndim
else:
szs = _find_shape_of_nested_int_array(dims)
ndim = len(szs)
# Ensure `coos` in right format for 1d (i.e. not single tuples)
if ndim == 1:
if isinstance(coos, np.ndarray):
coos = coos.ravel()
elif not isinstance(coos[0], Integral):
coos = (c[0] for c in coos)
# Map coordinates to indices
try:
inds = _dim_mapper_methods[(ndim, cyclic, trim)](*szs, coos)
except KeyError:
inds = _dim_map_nd(szs, coos, cyclic, trim)
# Ravel dims
while ndim > 1:
dims = itertools.chain.from_iterable(dims)
ndim -= 1
return tuple(dims), tuple(inds)
# numba decorator can't cache generator
@njit_nocache # pragma: no cover
def _dim_compressor(dims, inds): # pragma: no cover
"""Helper function for ``dim_compress`` that does the heavy lifting.
Parameters
----------
dims : sequence of int
The subsystem dimensions.
inds : sequence of int
The indices of the 'marked' subsystems.
Returns
-------
generator of (int, int)
Sequence of pairs of new dimension subsystem with marked flag {0, 1}.
"""
blocksize_id = blocksize_op = 1
autoplace_count = 0
for i, dim in enumerate(dims):
if dim < 0:
if blocksize_op > 1:
yield (blocksize_op, 1)
blocksize_op = 1
elif blocksize_id > 1:
yield (blocksize_id, 0)
blocksize_id = 1
autoplace_count += dim
elif i in inds:
if blocksize_id > 1:
yield (blocksize_id, 0)
blocksize_id = 1
elif autoplace_count < 0:
yield (autoplace_count, 1)
autoplace_count = 0
blocksize_op *= dim
else:
if blocksize_op > 1:
yield (blocksize_op, 1)
blocksize_op = 1
elif autoplace_count < 0:
yield (autoplace_count, 1)
autoplace_count = 0
blocksize_id *= dim
yield ((blocksize_op, 1) if blocksize_op > 1 else
(blocksize_id, 0) if blocksize_id > 1 else
(autoplace_count, 1))
def dim_compress(dims, inds):
"""Compress neighbouring subsytem dimensions.
Take some dimensions and target indices and compress both, i.e.
merge adjacent dimensions that are both either in ``dims`` or not. For
example, if tensoring an operator onto a single site, with many sites
the identity, treat these as single large identities.
Parameters
----------
dims : tuple of int
List of system's dimensions - 1d or flattened (e.g. with
``dim_map``).
inds: tuple of int
List of target indices, i.e. dimensions not to merge.
Returns
-------
dims : tuple of int
New compressed dimensions.
inds : tuple of int
New indexes corresponding to the compressed dimensions. These are
guaranteed to now be alternating i.e. either (0, 2, ...) or
(1, 3, ...).
Examples
--------
>>> dims = [2] * 10
>>> inds = [3, 4]
>>> compressed_dims, compressed_inds = dim_compress(dims, inds)
>>> compressed_dims
(8, 4, 32)
>>> compressed_inds
(1,)
"""
if isinstance(inds, Integral):
inds = (inds,)
dims, inds = zip(*_dim_compressor(dims, inds))
inds = tuple(i for i, b in enumerate(inds) if b)
return dims, inds
def ikron(ops, dims, inds, sparse=None, stype=None,
coo_build=False, parallel=False, ownership=None):
"""Tensor an operator into a larger space by padding with identities.
Automatically placing a large operator over several dimensions is allowed
and a list of operators can be given which are then placed cyclically.
Parameters
----------
op : operator or sequence of operators
Operator(s) to place into the tensor space. If more than one, these
are cyclically placed at each of the ``dims`` specified by ``inds``.
dims : sequence of int or nested sequences of int
The subsystem dimensions. If treated as an array, should have the same
number of dimensions as the system.
inds : tuple of int, or sequence of tuple of int
Indices, or coordinates, of the dimensions to place operator(s) on.
Each dimension specified can be smaller than the size of ``op`` (as
long as it factorizes it).
sparse : bool, optional
Whether to construct the new operator in sparse form.
stype : str, optional
If sparse, which format to use for the output.
coo_build : bool, optional
Whether to build the intermediary matrices using the ``'coo'``
format - can be faster to build sparse in this way, then
convert to chosen format, including dense.
parallel : bool, optional
Whether to build the operator in parallel using threads (only good
for big (d > 2**16) operators).
ownership : (int, int), optional
If given, only construct the rows in ``range(*ownership)``. Such that
the final operator is actually ``X[slice(*ownership), :]``. Useful for
constructing operators in parallel, e.g. for MPI.
Returns
-------
qarray or sparse matrix
Operator such that ops act on ``dims[inds]``.
See Also
--------
kron, pkron
Examples
--------
Place an operator between two identities:
>>> IZI = ikron(pauli('z'), [2, 2, 2], 1)
>>> np.allclose(IZI, eye(2) & pauli('z') & eye(2))
True
Overlay a large operator on several sites:
>>> rho_ab = rand_rho(4)
>>> rho_abc = ikron(rho_ab, [5, 2, 2, 7], [1, 2]) # overlay both 2s
>>> rho_abc.shape
(140, 140)
Place an operator at specified sites, regardless of size:
>>> A = rand_herm(5)
>>> ikron(A, [2, -1, 2, -1, 2, -1], [1, 3, 5]).shape
(1000, 1000)
"""
| |
10,
(39, '<'): 10,
(39, '='): 10,
(39, '>'): 10,
(39, '?'): 10,
(39, '@'): 10,
(39, 'A'): 10,
(39, 'B'): 10,
(39, 'C'): 10,
(39, 'D'): 10,
(39, 'E'): 10,
(39, 'F'): 10,
(39, 'G'): 10,
(39, 'H'): 10,
(39, 'I'): 10,
(39, 'J'): 10,
(39, 'K'): 10,
(39, 'L'): 10,
(39, 'M'): 10,
(39, 'N'): 10,
(39, 'O'): 10,
(39, 'P'): 10,
(39, 'Q'): 10,
(39, 'R'): 10,
(39, 'S'): 10,
(39, 'T'): 10,
(39, 'U'): 10,
(39, 'V'): 10,
(39, 'W'): 10,
(39, 'X'): 10,
(39, 'Y'): 10,
(39, 'Z'): 10,
(39, '['): 10,
(39, '\\'): 10,
(39, ']'): 10,
(39, '^'): 10,
(39, '_'): 10,
(39, '`'): 10,
(39, 'a'): 10,
(39, 'b'): 10,
(39, 'c'): 10,
(39, 'd'): 10,
(39, 'e'): 10,
(39, 'f'): 10,
(39, 'g'): 10,
(39, 'h'): 10,
(39, 'i'): 10,
(39, 'j'): 10,
(39, 'k'): 10,
(39, 'l'): 10,
(39, 'm'): 10,
(39, 'n'): 10,
(39, 'o'): 10,
(39, 'p'): 10,
(39, 'q'): 10,
(39, 'r'): 10,
(39, 's'): 10,
(39, 't'): 10,
(39, 'u'): 10,
(39, 'v'): 10,
(39, 'w'): 10,
(39, 'x'): 10,
(39, 'y'): 10,
(39, 'z'): 10,
(39, '{'): 10,
(39, '|'): 10,
(39, '}'): 10,
(39, '~'): 10,
(39, '\x7f'): 10,
(39, '\x80'): 10,
(39, '\x81'): 10,
(39, '\x82'): 10,
(39, '\x83'): 10,
(39, '\x84'): 10,
(39, '\x85'): 10,
(39, '\x86'): 10,
(39, '\x87'): 10,
(39, '\x88'): 10,
(39, '\x89'): 10,
(39, '\x8a'): 10,
(39, '\x8b'): 10,
(39, '\x8c'): 10,
(39, '\x8d'): 10,
(39, '\x8e'): 10,
(39, '\x8f'): 10,
(39, '\x90'): 10,
(39, '\x91'): 10,
(39, '\x92'): 10,
(39, '\x93'): 10,
(39, '\x94'): 10,
(39, '\x95'): 10,
(39, '\x96'): 10,
(39, '\x97'): 10,
(39, '\x98'): 10,
(39, '\x99'): 10,
(39, '\x9a'): 10,
(39, '\x9b'): 10,
(39, '\x9c'): 10,
(39, '\x9d'): 10,
(39, '\x9e'): 10,
(39, '\xa0'): 10,
(39, '\xa1'): 10,
(39, '\xa2'): 10,
(39, '\xa3'): 10,
(39, '\xa4'): 10,
(39, '\xa5'): 10,
(39, '\xa6'): 10,
(39, '\xa7'): 10,
(39, '\xa8'): 10,
(39, '\xa9'): 10,
(39, '\xac'): 10,
(39, '\xad'): 10,
(39, '\xae'): 10,
(39, '\xaf'): 10,
(39, '\xb0'): 10,
(39, '\xb1'): 10,
(39, '\xb2'): 10,
(39, '\xb3'): 10,
(39, '\xb4'): 10,
(39, '\xb5'): 10,
(39, '\xb6'): 10,
(39, '\xb7'): 10,
(39, '\xb8'): 10,
(39, '\xb9'): 10,
(39, '\xba'): 10,
(39, '\xbb'): 10,
(39, '\xbc'): 10,
(39, '\xbd'): 10,
(39, '\xbe'): 10,
(39, '\xbf'): 10,
(39, '\xc0'): 10,
(39, '\xc1'): 10,
(39, '\xc2'): 10,
(39, '\xc3'): 10,
(39, '\xc4'): 10,
(39, '\xc5'): 10,
(39, '\xc6'): 10,
(39, '\xc7'): 10,
(39, '\xc8'): 10,
(39, '\xc9'): 10,
(39, '\xca'): 10,
(39, '\xcb'): 10,
(39, '\xcc'): 10,
(39, '\xcd'): 10,
(39, '\xce'): 10,
(39, '\xcf'): 10,
(39, '\xd0'): 10,
(39, '\xd1'): 10,
(39, '\xd2'): 10,
(39, '\xd3'): 10,
(39, '\xd4'): 10,
(39, '\xd5'): 10,
(39, '\xd6'): 10,
(39, '\xd7'): 10,
(39, '\xd8'): 10,
(39, '\xd9'): 10,
(39, '\xda'): 10,
(39, '\xdb'): 10,
(39, '\xdc'): 10,
(39, '\xdd'): 10,
(39, '\xde'): 10,
(39, '\xdf'): 10,
(39, '\xe0'): 10,
(39, '\xe1'): 10,
(39, '\xe3'): 10,
(39, '\xe4'): 10,
(39, '\xe5'): 10,
(39, '\xe6'): 10,
(39, '\xe7'): 10,
(39, '\xe8'): 10,
(39, '\xe9'): 10,
(39, '\xea'): 10,
(39, '\xeb'): 10,
(39, '\xec'): 10,
(39, '\xed'): 10,
(39, '\xee'): 10,
(39, '\xef'): 10,
(39, '\xf0'): 10,
(39, '\xf1'): 10,
(39, '\xf2'): 10,
(39, '\xf3'): 10,
(39, '\xf4'): 10,
(39, '\xf5'): 10,
(39, '\xf6'): 10,
(39, '\xf7'): 10,
(39, '\xf8'): 10,
(39, '\xf9'): 10,
(39, '\xfa'): 10,
(39, '\xfb'): 10,
(39, '\xfc'): 10,
(39, '\xfd'): 10,
(39, '\xfe'): 10,
(39, '\xff'): 10,
(41, '\x00'): 10,
(41, '\x01'): 10,
(41, '\x02'): 10,
(41, '\x03'): 10,
(41, '\x04'): 10,
(41, '\x05'): 10,
(41, '\x06'): 10,
(41, '\x07'): 10,
(41, '\x08'): 10,
(41, '\x0b'): 10,
(41, '\x0e'): 10,
(41, '\x0f'): 10,
(41, '\x10'): 10,
(41, '\x11'): 10,
(41, '\x12'): 10,
(41, '\x13'): 10,
(41, '\x14'): 10,
(41, '\x15'): 10,
(41, '\x16'): 10,
(41, '\x17'): 10,
(41, '\x18'): 10,
(41, '\x19'): 10,
(41, '\x1a'): 10,
(41, '\x1b'): 10,
(41, '\x1c'): 10,
(41, '\x1d'): 10,
(41, '\x1e'): 10,
(41, '\x1f'): 10,
(41, '!'): 10,
(41, '"'): 10,
(41, '$'): 10,
(41, '%'): 10,
(41, '&'): 10,
(41, "'"): 10,
(41, '*'): 10,
(41, '+'): 10,
(41, '-'): 10,
(41, '.'): 10,
(41, '/'): 10,
(41, '0'): 10,
(41, '1'): 10,
(41, '2'): 10,
(41, '3'): 10,
(41, '4'): 10,
(41, '5'): 10,
(41, '6'): 10,
(41, '7'): 10,
(41, '8'): 10,
(41, '9'): 10,
(41, ':'): 10,
(41, ';'): 10,
(41, '<'): 10,
(41, '='): 10,
(41, '>'): 10,
(41, '?'): 10,
(41, '@'): 10,
(41, 'A'): 10,
(41, 'B'): 10,
(41, 'C'): 10,
(41, 'D'): 10,
(41, 'E'): 10,
(41, 'F'): 10,
(41, 'G'): 10,
(41, 'H'): 10,
(41, 'I'): 10,
(41, 'J'): 10,
(41, 'K'): 10,
(41, 'L'): 10,
(41, 'M'): 10,
(41, 'N'): 10,
(41, 'O'): 10,
(41, 'P'): 10,
(41, 'Q'): 10,
(41, 'R'): 10,
(41, 'S'): 10,
(41, 'T'): 10,
(41, 'U'): 10,
(41, 'V'): 10,
(41, 'W'): 10,
(41, 'X'): 10,
(41, 'Y'): 10,
(41, 'Z'): 10,
(41, '['): 10,
(41, '\\'): 10,
(41, ']'): 10,
(41, '^'): 10,
(41, '_'): 10,
(41, '`'): 10,
(41, 'a'): 10,
(41, 'b'): 10,
(41, 'c'): 10,
(41, 'd'): 10,
(41, 'e'): 10,
(41, 'f'): 10,
(41, 'g'): 10,
(41, 'h'): 10,
(41, 'i'): 10,
(41, 'j'): 10,
(41, 'k'): 10,
(41, 'l'): 10,
(41, 'm'): 10,
(41, 'n'): 10,
(41, 'o'): 10,
(41, 'p'): 10,
(41, 'q'): 10,
(41, 'r'): 10,
(41, 's'): 10,
(41, 't'): 10,
(41, 'u'): 10,
(41, 'v'): 10,
(41, 'w'): 10,
(41, 'x'): 10,
(41, 'y'): 10,
(41, 'z'): 10,
(41, '{'): 10,
(41, '|'): 10,
(41, '}'): 10,
(41, '~'): 10,
(41, '\x7f'): 10,
(41, '\x80'): 10,
(41, '\x81'): 10,
(41, '\x82'): 10,
(41, '\x83'): 10,
(41, '\x84'): 10,
(41, '\x85'): 10,
(41, '\x86'): 10,
(41, '\x87'): 10,
(41, '\x88'): 10,
(41, '\x89'): 10,
(41, '\x8a'): 10,
(41, '\x8b'): 10,
(41, '\x8c'): 10,
(41, '\x8d'): 10,
(41, '\x8e'): 10,
(41, '\x8f'): 10,
(41, '\x90'): 10,
(41, '\x91'): 10,
(41, '\x92'): 10,
(41, '\x93'): 10,
(41, '\x94'): 10,
(41, '\x95'): 10,
(41, '\x96'): 10,
(41, '\x97'): 10,
(41, '\x98'): 10,
(41, '\x99'): 10,
(41, '\x9a'): 10,
(41, '\x9b'): 10,
(41, '\x9c'): 10,
(41, '\x9d'): 10,
(41, '\x9e'): 10,
(41, '\xa0'): 10,
(41, '\xa1'): 10,
(41, '\xa2'): 10,
(41, '\xa3'): 10,
(41, '\xa4'): 10,
(41, '\xa5'): 10,
(41, '\xa6'): 10,
(41, '\xa7'): 10,
(41, '\xa8'): 10,
(41, '\xa9'): 10,
(41, '\xac'): 10,
(41, '\xad'): 10,
(41, '\xae'): 10,
(41, '\xaf'): 10,
(41, '\xb0'): 10,
(41, '\xb1'): 10,
(41, '\xb2'): 10,
(41, '\xb3'): 10,
(41, '\xb4'): 10,
(41, '\xb5'): 10,
(41, '\xb6'): 10,
(41, '\xb7'): 10,
(41, '\xb8'): 10,
(41, '\xb9'): 10,
(41, '\xba'): 10,
(41, '\xbb'): 10,
(41, '\xbc'): 10,
(41, '\xbd'): 10,
(41, '\xbe'): 10,
(41, '\xbf'): 10,
(41, '\xc0'): 10,
(41, '\xc1'): 10,
(41, '\xc2'): 10,
(41, '\xc3'): 10,
(41, '\xc4'): 10,
(41, '\xc5'): 10,
(41, '\xc6'): 10,
(41, '\xc7'): 10,
(41, '\xc8'): 10,
(41, '\xc9'): 10,
(41, '\xca'): 10,
(41, '\xcb'): 10,
(41, '\xcc'): 10,
(41, '\xcd'): 10,
(41, '\xce'): 10,
(41, '\xcf'): 10,
(41, '\xd0'): 10,
(41, '\xd1'): 10,
(41, '\xd2'): 10,
(41, '\xd3'): 10,
(41, '\xd4'): 10,
(41, '\xd5'): 10,
(41, '\xd6'): 10,
(41, '\xd7'): 10,
(41, '\xd8'): 10,
(41, '\xd9'): 10,
(41, '\xda'): 10,
(41, '\xdb'): 10,
(41, '\xdc'): 10,
(41, '\xdd'): 10,
(41, '\xde'): 10,
(41, '\xdf'): 10,
(41, '\xe0'): 10,
(41, '\xe1'): 10,
(41, '\xe3'): 10,
(41, '\xe4'): 10,
(41, '\xe5'): 10,
(41, '\xe6'): 10,
(41, '\xe7'): 10,
(41, '\xe8'): 10,
(41, '\xe9'): 10,
(41, '\xea'): 10,
(41, '\xeb'): 10,
(41, '\xec'): 10,
(41, '\xed'): 10,
(41, '\xee'): 10,
(41, '\xef'): 10,
(41, '\xf0'): 10,
(41, '\xf1'): 10,
(41, '\xf2'): 10,
(41, '\xf3'): 10,
(41, '\xf4'): 10,
(41, '\xf5'): 10,
(41, '\xf6'): 10,
(41, | |
<filename>localstack/aws/api/ssm/__init__.py
import sys
from datetime import datetime
from typing import Dict, List, Optional
if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict
from localstack.aws.api import RequestContext, ServiceException, ServiceRequest, handler
Account = str
AccountId = str
ActivationCode = str
ActivationDescription = str
ActivationId = str
AgentErrorCode = str
AggregatorSchemaOnly = bool
AllowedPattern = str
ApplyOnlyAtCronInterval = bool
ApproveAfterDays = int
AssociationExecutionFilterValue = str
AssociationExecutionId = str
AssociationExecutionTargetsFilterValue = str
AssociationFilterValue = str
AssociationId = str
AssociationName = str
AssociationResourceId = str
AssociationResourceType = str
AssociationVersion = str
AttachmentHash = str
AttachmentIdentifier = str
AttachmentName = str
AttachmentUrl = str
AttachmentsSourceValue = str
AttributeName = str
AttributeValue = str
AutomationActionName = str
AutomationExecutionFilterValue = str
AutomationExecutionId = str
AutomationParameterKey = str
AutomationParameterValue = str
AutomationTargetParameterName = str
BaselineDescription = str
BaselineId = str
BaselineName = str
BatchErrorMessage = str
Boolean = bool
CalendarNameOrARN = str
Category = str
ChangeDetailsValue = str
ChangeRequestName = str
ClientToken = str
CloudWatchLogGroupName = str
CloudWatchOutputEnabled = bool
CommandFilterValue = str
CommandId = str
CommandMaxResults = int
CommandPluginName = str
CommandPluginOutput = str
Comment = str
CompletedCount = int
ComplianceExecutionId = str
ComplianceExecutionType = str
ComplianceFilterValue = str
ComplianceItemContentHash = str
ComplianceItemId = str
ComplianceItemTitle = str
ComplianceResourceId = str
ComplianceResourceType = str
ComplianceStringFilterKey = str
ComplianceSummaryCount = int
ComplianceTypeName = str
ComputerName = str
DefaultBaseline = bool
DefaultInstanceName = str
DeliveryTimedOutCount = int
DescriptionInDocument = str
DocumentARN = str
DocumentAuthor = str
DocumentContent = str
DocumentDisplayName = str
DocumentFilterValue = str
DocumentHash = str
DocumentKeyValuesFilterKey = str
DocumentKeyValuesFilterValue = str
DocumentName = str
DocumentOwner = str
DocumentParameterDefaultValue = str
DocumentParameterDescrption = str
DocumentParameterName = str
DocumentPermissionMaxResults = int
DocumentReviewComment = str
DocumentSchemaVersion = str
DocumentSha1 = str
DocumentStatusInformation = str
DocumentVersion = str
DocumentVersionName = str
DocumentVersionNumber = str
DryRun = bool
EffectiveInstanceAssociationMaxResults = int
ErrorCount = int
ExecutionRoleName = str
GetInventorySchemaMaxResults = int
GetOpsMetadataMaxResults = int
GetParametersByPathMaxResults = int
IPAddress = str
ISO8601String = str
IamRole = str
IdempotencyToken = str
InstallOverrideList = str
InstanceAssociationExecutionSummary = str
InstanceCount = int
InstanceId = str
InstanceInformationFilterValue = str
InstanceInformationStringFilterKey = str
InstancePatchStateFilterKey = str
InstancePatchStateFilterValue = str
InstanceTagName = str
InstancesCount = int
Integer = int
InventoryAggregatorExpression = str
InventoryDeletionLastStatusMessage = str
InventoryFilterKey = str
InventoryFilterValue = str
InventoryGroupName = str
InventoryItemAttributeName = str
InventoryItemCaptureTime = str
InventoryItemContentHash = str
InventoryItemSchemaVersion = str
InventoryItemTypeName = str
InventoryItemTypeNameFilter = str
InventoryResultEntityId = str
InventoryResultItemKey = str
InventoryTypeDisplayName = str
InvocationTraceOutput = str
IsSubTypeSchema = bool
LastResourceDataSyncMessage = str
ListOpsMetadataMaxResults = int
MaintenanceWindowAllowUnassociatedTargets = bool
MaintenanceWindowCutoff = int
MaintenanceWindowDescription = str
MaintenanceWindowDurationHours = int
MaintenanceWindowEnabled = bool
MaintenanceWindowExecutionId = str
MaintenanceWindowExecutionStatusDetails = str
MaintenanceWindowExecutionTaskExecutionId = str
MaintenanceWindowExecutionTaskId = str
MaintenanceWindowExecutionTaskInvocationId = str
MaintenanceWindowExecutionTaskInvocationParameters = str
MaintenanceWindowFilterKey = str
MaintenanceWindowFilterValue = str
MaintenanceWindowId = str
MaintenanceWindowLambdaClientContext = str
MaintenanceWindowLambdaQualifier = str
MaintenanceWindowMaxResults = int
MaintenanceWindowName = str
MaintenanceWindowOffset = int
MaintenanceWindowSchedule = str
MaintenanceWindowSearchMaxResults = int
MaintenanceWindowStepFunctionsInput = str
MaintenanceWindowStepFunctionsName = str
MaintenanceWindowStringDateTime = str
MaintenanceWindowTargetId = str
MaintenanceWindowTaskArn = str
MaintenanceWindowTaskId = str
MaintenanceWindowTaskParameterName = str
MaintenanceWindowTaskParameterValue = str
MaintenanceWindowTaskPriority = int
MaintenanceWindowTaskTargetId = str
MaintenanceWindowTimezone = str
ManagedInstanceId = str
MaxConcurrency = str
MaxErrors = str
MaxResults = int
MaxResultsEC2Compatible = int
MaxSessionDuration = str
MetadataKey = str
MetadataValueString = str
NextToken = str
NotificationArn = str
OpsAggregatorType = str
OpsAggregatorValue = str
OpsAggregatorValueKey = str
OpsDataAttributeName = str
OpsDataTypeName = str
OpsEntityId = str
OpsEntityItemCaptureTime = str
OpsEntityItemKey = str
OpsFilterKey = str
OpsFilterValue = str
OpsItemCategory = str
OpsItemDataKey = str
OpsItemDataValueString = str
OpsItemDescription = str
OpsItemEventFilterValue = str
OpsItemEventMaxResults = int
OpsItemFilterValue = str
OpsItemId = str
OpsItemMaxResults = int
OpsItemPriority = int
OpsItemRelatedItemAssociationId = str
OpsItemRelatedItemAssociationResourceType = str
OpsItemRelatedItemAssociationResourceUri = str
OpsItemRelatedItemAssociationType = str
OpsItemRelatedItemsFilterValue = str
OpsItemRelatedItemsMaxResults = int
OpsItemSeverity = str
OpsItemSource = str
OpsItemTitle = str
OpsItemType = str
OpsMetadataArn = str
OpsMetadataFilterKey = str
OpsMetadataFilterValue = str
OpsMetadataResourceId = str
OutputSourceId = str
OutputSourceType = str
OwnerInformation = str
PSParameterName = str
PSParameterSelector = str
PSParameterValue = str
ParameterDataType = str
ParameterDescription = str
ParameterKeyId = str
ParameterLabel = str
ParameterName = str
ParameterPolicies = str
ParameterStringFilterKey = str
ParameterStringFilterValue = str
ParameterStringQueryOption = str
ParameterValue = str
ParametersFilterValue = str
PatchAdvisoryId = str
PatchArch = str
PatchBaselineMaxResults = int
PatchBugzillaId = str
PatchCVEId = str
PatchCVEIds = str
PatchClassification = str
PatchComplianceMaxResults = int
PatchContentUrl = str
PatchCriticalNonCompliantCount = int
PatchDescription = str
PatchEpoch = int
PatchFailedCount = int
PatchFilterValue = str
PatchGroup = str
PatchId = str
PatchInstalledCount = int
PatchInstalledOtherCount = int
PatchInstalledPendingRebootCount = int
PatchInstalledRejectedCount = int
PatchKbNumber = str
PatchLanguage = str
PatchMissingCount = int
PatchMsrcNumber = str
PatchMsrcSeverity = str
PatchName = str
PatchNotApplicableCount = int
PatchOrchestratorFilterKey = str
PatchOrchestratorFilterValue = str
PatchOtherNonCompliantCount = int
PatchProduct = str
PatchProductFamily = str
PatchRelease = str
PatchRepository = str
PatchSecurityNonCompliantCount = int
PatchSeverity = str
PatchSourceConfiguration = str
PatchSourceName = str
PatchSourceProduct = str
PatchStringDateTime = str
PatchTitle = str
PatchUnreportedNotApplicableCount = int
PatchVendor = str
PatchVersion = str
Product = str
PutInventoryMessage = str
Region = str
RegistrationLimit = int
RegistrationMetadataKey = str
RegistrationMetadataValue = str
RegistrationsCount = int
RemainingCount = int
ResourceCount = int
ResourceCountByStatus = str
ResourceDataSyncAWSKMSKeyARN = str
ResourceDataSyncDestinationDataSharingType = str
ResourceDataSyncEnableAllOpsDataSources = bool
ResourceDataSyncIncludeFutureRegions = bool
ResourceDataSyncName = str
ResourceDataSyncOrganizationSourceType = str
ResourceDataSyncOrganizationalUnitId = str
ResourceDataSyncS3BucketName = str
ResourceDataSyncS3Prefix = str
ResourceDataSyncS3Region = str
ResourceDataSyncSourceRegion = str
ResourceDataSyncSourceType = str
ResourceDataSyncState = str
ResourceDataSyncType = str
ResourceId = str
ResponseCode = int
Reviewer = str
S3BucketName = str
S3KeyPrefix = str
S3Region = str
ScheduleExpression = str
ScheduleOffset = int
ServiceRole = str
ServiceSettingId = str
ServiceSettingValue = str
SessionDetails = str
SessionFilterValue = str
SessionId = str
SessionManagerCloudWatchOutputUrl = str
SessionManagerParameterName = str
SessionManagerParameterValue = str
SessionManagerS3OutputUrl = str
SessionMaxResults = int
SessionOwner = str
SessionReason = str
SessionTarget = str
SharedDocumentVersion = str
SnapshotDownloadUrl = str
SnapshotId = str
SourceId = str
StandardErrorContent = str
StandardOutputContent = str
StatusAdditionalInfo = str
StatusDetails = str
StatusMessage = str
StatusName = str
StepExecutionFilterValue = str
StreamUrl = str
String = str
StringDateTime = str
TagKey = str
TagValue = str
TargetCount = int
TargetKey = str
TargetMapKey = str
TargetMapValue = str
TargetType = str
TargetValue = str
TimeoutSeconds = int
TokenValue = str
TotalCount = int
UUID = str
Url = str
ValidNextStep = str
Version = str
class AssociationComplianceSeverity(str):
CRITICAL = "CRITICAL"
HIGH = "HIGH"
MEDIUM = "MEDIUM"
LOW = "LOW"
UNSPECIFIED = "UNSPECIFIED"
class AssociationExecutionFilterKey(str):
ExecutionId = "ExecutionId"
Status = "Status"
CreatedTime = "CreatedTime"
class AssociationExecutionTargetsFilterKey(str):
Status = "Status"
ResourceId = "ResourceId"
ResourceType = "ResourceType"
class AssociationFilterKey(str):
InstanceId = "InstanceId"
Name = "Name"
AssociationId = "AssociationId"
AssociationStatusName = "AssociationStatusName"
LastExecutedBefore = "LastExecutedBefore"
LastExecutedAfter = "LastExecutedAfter"
AssociationName = "AssociationName"
ResourceGroupName = "ResourceGroupName"
class AssociationFilterOperatorType(str):
EQUAL = "EQUAL"
LESS_THAN = "LESS_THAN"
GREATER_THAN = "GREATER_THAN"
class AssociationStatusName(str):
Pending = "Pending"
Success = "Success"
Failed = "Failed"
class AssociationSyncCompliance(str):
AUTO = "AUTO"
MANUAL = "MANUAL"
class AttachmentHashType(str):
Sha256 = "Sha256"
class AttachmentsSourceKey(str):
SourceUrl = "SourceUrl"
S3FileUrl = "S3FileUrl"
AttachmentReference = "AttachmentReference"
class AutomationExecutionFilterKey(str):
DocumentNamePrefix = "DocumentNamePrefix"
ExecutionStatus = "ExecutionStatus"
ExecutionId = "ExecutionId"
ParentExecutionId = "ParentExecutionId"
CurrentAction = "CurrentAction"
StartTimeBefore = "StartTimeBefore"
StartTimeAfter = "StartTimeAfter"
AutomationType = "AutomationType"
TagKey = "TagKey"
TargetResourceGroup = "TargetResourceGroup"
AutomationSubtype = "AutomationSubtype"
OpsItemId = "OpsItemId"
class AutomationExecutionStatus(str):
Pending = "Pending"
InProgress = "InProgress"
Waiting = "Waiting"
Success = "Success"
TimedOut = "TimedOut"
Cancelling = "Cancelling"
Cancelled = "Cancelled"
Failed = "Failed"
PendingApproval = "PendingApproval"
Approved = "Approved"
Rejected = "Rejected"
Scheduled = "Scheduled"
RunbookInProgress = "RunbookInProgress"
PendingChangeCalendarOverride = "PendingChangeCalendarOverride"
ChangeCalendarOverrideApproved = "ChangeCalendarOverrideApproved"
ChangeCalendarOverrideRejected = "ChangeCalendarOverrideRejected"
CompletedWithSuccess = "CompletedWithSuccess"
CompletedWithFailure = "CompletedWithFailure"
class AutomationSubtype(str):
ChangeRequest = "ChangeRequest"
class AutomationType(str):
CrossAccount = "CrossAccount"
Local = "Local"
class CalendarState(str):
OPEN = "OPEN"
CLOSED = "CLOSED"
class CommandFilterKey(str):
InvokedAfter = "InvokedAfter"
InvokedBefore = "InvokedBefore"
Status = "Status"
ExecutionStage = "ExecutionStage"
DocumentName = "DocumentName"
class CommandInvocationStatus(str):
Pending = "Pending"
InProgress = "InProgress"
Delayed = "Delayed"
Success = "Success"
Cancelled = "Cancelled"
TimedOut = "TimedOut"
Failed = "Failed"
Cancelling = "Cancelling"
class CommandPluginStatus(str):
Pending = "Pending"
InProgress = "InProgress"
Success = "Success"
TimedOut = "TimedOut"
Cancelled = "Cancelled"
Failed = "Failed"
class CommandStatus(str):
Pending = "Pending"
InProgress = "InProgress"
Success = "Success"
Cancelled = "Cancelled"
Failed = "Failed"
TimedOut = "TimedOut"
Cancelling = "Cancelling"
class ComplianceQueryOperatorType(str):
EQUAL = "EQUAL"
NOT_EQUAL = "NOT_EQUAL"
BEGIN_WITH = "BEGIN_WITH"
LESS_THAN = "LESS_THAN"
GREATER_THAN = "GREATER_THAN"
class ComplianceSeverity(str):
CRITICAL = "CRITICAL"
HIGH = "HIGH"
MEDIUM = "MEDIUM"
LOW = "LOW"
INFORMATIONAL = "INFORMATIONAL"
UNSPECIFIED = "UNSPECIFIED"
class ComplianceStatus(str):
COMPLIANT = "COMPLIANT"
NON_COMPLIANT = "NON_COMPLIANT"
class ComplianceUploadType(str):
COMPLETE = "COMPLETE"
PARTIAL = "PARTIAL"
class ConnectionStatus(str):
Connected = "Connected"
NotConnected = "NotConnected"
class DescribeActivationsFilterKeys(str):
ActivationIds = "ActivationIds"
DefaultInstanceName = "DefaultInstanceName"
IamRole = "IamRole"
class DocumentFilterKey(str):
Name = "Name"
Owner = "Owner"
PlatformTypes = "PlatformTypes"
DocumentType = "DocumentType"
class DocumentFormat(str):
YAML = "YAML"
JSON = "JSON"
TEXT = "TEXT"
class DocumentHashType(str):
Sha256 = "Sha256"
Sha1 = "Sha1"
class DocumentMetadataEnum(str):
DocumentReviews = "DocumentReviews"
class DocumentParameterType(str):
String = "String"
StringList = "StringList"
class DocumentPermissionType(str):
Share = "Share"
class DocumentReviewAction(str):
SendForReview = "SendForReview"
UpdateReview = "UpdateReview"
Approve = "Approve"
Reject = "Reject"
class DocumentReviewCommentType(str):
Comment = "Comment"
class DocumentStatus(str):
Creating = "Creating"
Active = "Active"
Updating = "Updating"
Deleting = "Deleting"
Failed = "Failed"
class DocumentType(str):
Command = "Command"
Policy = "Policy"
Automation = "Automation"
Session = "Session"
Package = "Package"
ApplicationConfiguration = "ApplicationConfiguration"
ApplicationConfigurationSchema = "ApplicationConfigurationSchema"
DeploymentStrategy = "DeploymentStrategy"
ChangeCalendar = "ChangeCalendar"
Automation_ChangeTemplate = "Automation.ChangeTemplate"
ProblemAnalysis = "ProblemAnalysis"
ProblemAnalysisTemplate = "ProblemAnalysisTemplate"
class ExecutionMode(str):
Auto = "Auto"
Interactive = "Interactive"
class Fault(str):
Client = "Client"
Server = "Server"
Unknown = "Unknown"
class InstanceInformationFilterKey(str):
InstanceIds = "InstanceIds"
AgentVersion = "AgentVersion"
PingStatus = "PingStatus"
PlatformTypes = "PlatformTypes"
ActivationIds = "ActivationIds"
IamRole = "IamRole"
ResourceType = "ResourceType"
AssociationStatus = "AssociationStatus"
class InstancePatchStateOperatorType(str):
Equal = "Equal"
NotEqual = "NotEqual"
LessThan = "LessThan"
GreaterThan = "GreaterThan"
class InventoryAttributeDataType(str):
string = "string"
number = "number"
class InventoryDeletionStatus(str):
InProgress = "InProgress"
Complete = "Complete"
class InventoryQueryOperatorType(str):
Equal = "Equal"
NotEqual = "NotEqual"
BeginWith = "BeginWith"
LessThan = "LessThan"
GreaterThan = "GreaterThan"
Exists = "Exists"
class InventorySchemaDeleteOption(str):
DisableSchema = "DisableSchema"
DeleteSchema = "DeleteSchema"
class LastResourceDataSyncStatus(str):
Successful | |
by Viewport.
# Bounding Box.
self.add_argument('-B', '--bbox', dest='viewport_bbox_in',
action='store', default=None,
help='the bounding box, or viewport, for a read query')
# Exclusion Box.
self.add_argument('-E', '--bbex', dest='viewport_bbox_ex',
action='store', default=None,
help='the exclusionary bounding box')
# query_filter.filter_by_regions
self.add_argument('--region', dest='viewport_region',
action='store', default=None,
help='name of region to use as bounding box for a -read')
# *** Query_Filters
# Most/all query_filters are support via --filter.
# Query_Filter keyword, value pairs.
self.add_argument('-f', '--filter', dest='filters',
action='append', default=[], nargs=2,
help='query_filter value pair, e.g., -f filter_by_username landonb')
# But some query_filters also have a baked-in switch, too:
# query_filter.pagin_count
self.add_argument('-C', '--count', dest='filter_count',
action='store', default=None, type=int,
help='the number of records to retrieve')
# query_filter.pagin_offset
self.add_argument('-O', '--offset', dest='filter_offset',
action='store', default=None, type=int,
help='the page number of records to retrieve')
# MAYBE: centerx and centery should prob. just be filters.
# query_filter.centerx
self.add_argument('-X', '--centerx', dest='centerx',
action='store', default=None, type=float,
help='full text search query centerx')
# query_filter.centery
self.add_argument('-Y', '--centery', dest='centery',
action='store', default=None, type=int,
help='full text search query centery')
# Search by Full Text Search Query.
# query_filter.filter_by_text_smart
self.add_argument('-q', '--query', dest='query',
action='store', default=None,
help='full text search query string')
# FIXME: Not implemented.
self.add_argument('--center', dest='center',
action='store', default=(0,0), nargs=2, type=int,
help='full text search query center point')
# Search by Stack ID.
# FIXME: This can be specified by --filters, too, which seems silly.
# Choose one.
# query_filter.only_stack_ids
self.add_argument('-I', '--stack_ids', dest='only_stack_ids',
action='store', default='', type=str,
help='only get items with the specified stack ID(s)')
# query_filter.only_associate_ids
self.add_argument('--link_id', dest='only_associate_ids',
action='append', default=[], type=int,
help='filter: only get links to specified stack ID(s)')
# Thread ID for posts.
# NOTE: thread_stack_id is really context_id. So is really just a
# shortcut for --filter context_stack_id {thread_stack_id}
# query_filter.context_stack_id
self.add_argument('--thread_stack_id', dest='filter_thread_id',
action='store', default=None, type=int,
help='The thread ID of the requested posts')
# ***
# Send a file.
# To send a file, we need multipart, which Python lib does not support.
# http://atlee.ca/software/poster/index.html
self.add_argument('-F', '--sendfile', dest='sendfile',
action='store', default=None,
help='upload a file with your GWIS request (used with -c(reate))')
# BUG nnnn: Until we install and utilizer the poster library to send
# files across GWIS to pyserver, we can at least fake an upload and
# pyserver will wink, wink at us and just copy the file to its staging
# area. So we can test from ccp.py and get good code coverage and take
# advantage of the jobs queue and not have to waste time firing up
# Firefox, loading flashclient, and testing thatuh'way. See the option,
# -e download_fake /some/file/path/shps.zip
self.add_argument('--verbose', dest='verbose_details',
action='store_true', default=False,
help='display verbose fetch details')
self.add_argument('--allow_deleted', dest='allow_deleted',
action='store_true', default=False,
help='sets revision.allow_deleted')
# ***
self.add_argument('-W', '--wide-log', dest='wide_log',
action='store_true', default=False,
help='for cron, do not restrict log line width')
self.add_argument('--log-cleanly', dest='log_cleanly',
action='store_true', default=False,
help='do not emit newlines to log, e.g., from GWIS, for logcheck')
self.add_argument('--ignore-job-fail', dest='ignore_job_fail',
action='store_true', default=False,
help='if this is a work item job that fails, do not care')
# ***
#
def verify_handler(self):
ok = True
# *** For key_val, so user doesn't have to specify two switches,
# key_val action is implied when key is supplied.
if self.cli_opts.kval_keys:
if self.cli_opts.action and (self.cli_opts.action != 'key_val'):
log.error('Specified action does not recognize key_val')
ok = False
self.cli_opts.action = 'key_val'
# *** Check the action
if not (self.cli_opts.action in Ccp_Tool_Parser.actions):
log.error('Please specify an action.')
ok = False
# *** Check the gwisness: we only support direct sql via read action
# (all other do_* commands send a gwis request to apache/pyserver).
always_gwis_actions = ('read', 'key_val',)
if (self.cli_opts.always_gwis
and (self.cli_opts.action not in always_gwis_actions)):
log.error('--always_gwis only works with the actions: %s.'
% (always_gwis_actions,))
ok = False
# *** Check the item type
if self.cli_opts.action not in ('interact',
'key_val',
'search',
'find_route'):
if not self.cli_opts.item_type:
log.error('Please specify an item type.')
ok = False
else:
# Verify the item type
if not item_factory.is_item_valid(self.cli_opts.item_type):
log.error('Invalid item type: %s.' % (self.cli_opts.item_type,))
ok = False
if not (self.cli_opts.item_type
in item_type.Item_Type.lookup_id_by_str):
log.error('Unknown item type: %s.' % (self.cli_opts.item_type,))
ok = False
# *** Check the GrAC context
if ok and (self.cli_opts.action == 'read'):
item_module = item_factory.get_item_module(self.cli_opts.item_type)
if isinstance(item_module.One(), grac_record.One):
if not self.cli_opts.grac_context:
log.error('Please specify a grac context.')
ok = False
else:
if not (self.cli_opts.grac_context
in grac_record.Many.context_types):
log.error('Unknown grac context: %s.'
% (self.cli_opts.grac_context,))
ok = False
# *** Check that only one viewport is specified
if (self.cli_opts.viewport_bbox_in and self.cli_opts.viewport_region):
log.error(
'Please specify neither or one of viewport_bbox and _region, but not both.')
ok = False
# *** Check that only one viewport is specified
if (self.cli_opts.viewport_bbox_ex
and not self.cli_opts.viewport_bbox_in):
log.error('Please specify include bbox when specifying exclude bbox.')
ok = False
# *** Check the Query Filters
ok &= ( self.verify_cli_pairs_filters()
and self.verify_cli_pairs_edit_cols()
and self.verify_cli_pairs_gia_cols()
and self.verify_qf_synonyms()
and self.verify_query_filters()
and self.verify_miscellany())
# *** Noneify the branch if the user wants a branch list. This *must*
# come after self.verify_qf_synonyms().
if not self.cli_opts.branch:
if ((self.cli_opts.action == 'read')
and (self.cli_opts.item_type == 'branch')):
log.debug('verify_handler: no branch ID: none needed.')
self.cli_opts.branch = None
else:
if self.cli_opts.branch is None:
log.debug('verify_handler: no branch ID: using None.')
else:
log.debug('verify_handler: no branch ID: using public.')
#
if (self.cli_opts.allow_deleted and (self.cli_opts.action != 'read')):
log.error('Please only use allow_deleted with the read action.')
ok = False
#
if self.cli_opts.wide_log:
logging2.config_line_format(conf.log_frmat_len, ' # ', 999)
# Finally check the base class. Do this last since we may have overridden
# self.cli_opts.branch.
ok &= Ccp_Script_Args.verify_handler(self)
#
return ok
#
def verify_cli_pairs_filters(self):
ok = True
kwords = Ccp_Tool_Parser.opts_to_dict(self.cli_opts.filters)
# Check strings
kwords.setdefault('filter_by_username', '')
kwords.setdefault('filter_by_watch_item', '')
kwords.setdefault('filter_by_names_exact', '')
kwords.setdefault('filter_by_text_exact', '')
kwords.setdefault('filter_by_text_loose', '')
kwords.setdefault('filter_by_text_smart', '')
kwords.setdefault('filter_by_thread_type', '')
kwords.setdefault('filter_by_creator_include', '')
kwords.setdefault('filter_by_creator_exclude', '')
kwords.setdefault('only_stack_ids', '')
kwords.setdefault('about_stack_ids', '')
kwords.setdefault('only_lhs_stack_ids', '')
kwords.setdefault('only_rhs_stack_ids', '')
kwords.setdefault('filter_by_value_text', '')
kwords.setdefault('only_item_type_ids', '')
kwords.setdefault('use_stealth_secret', '')
kwords.setdefault('results_style', '')
# Check boolean values
try:
# NOTE: An option that's 0 might really be "0", so be sure to cast to
# int first, lest we accidentally just bool a str, which is True if
# the str is not empty (e.g., bool("0") is true).
kwords['pagin_total'] = bool(int(
kwords.get('pagin_total', False)))
kwords['filter_by_watch_geom'] = bool(int(
kwords.get('filter_by_watch_geom', False)))
kwords['filter_by_watch_feat'] = bool(int(
kwords.get('filter_by_watch_feat', False)))
kwords['filter_by_unread'] = bool(int(
kwords.get('filter_by_unread', False)))
kwords['filter_by_nearby_edits'] = bool(int(
kwords.get('filter_by_nearby_edits', False)))
kwords['include_item_stack'] = bool(int(
kwords.get('include_item_stack', False)))
kwords['include_lhs_name'] = bool(int(
kwords.get('include_lhs_name', False)))
kwords['include_rhs_name'] = bool(int(
kwords.get('include_rhs_name', False)))
kwords['include_geosummary'] = bool(int(
kwords.get('include_geosummary', False)))
kwords['rating_restrict'] = bool(int(
kwords.get('rating_restrict', False)))
kwords['gia_use_sessid'] = bool(int(
kwords.get('gia_use_sessid', False)))
kwords['skip_tag_counts'] = bool(int(
kwords.get('skip_tag_counts', False)))
kwords['dont_load_feat_attcs'] = bool(int(
kwords.get('dont_load_feat_attcs', False)))
kwords['do_load_lval_counts'] = bool(int(
kwords.get('do_load_lval_counts', False)))
kwords['include_item_aux'] = bool(int(
kwords.get('include_item_aux', False)))
kwords['findability_ignore'] = bool(int(
kwords.get('findability_ignore', False)))
kwords['findability_ignore_include_deleted'] = bool(int(
kwords.get('findability_ignore_include_deleted', False)))
kwords['findability_recent'] = bool(int(
kwords.get('findability_recent', False)))
kwords['do_load_latest_note'] = bool(int(
kwords.get('do_load_latest_note', False)))
# This one isn't technically in qb.filters (it's just in qb).
kwords['request_is_a_test'] = bool(int(
kwords.get('request_is_a_test', False)))
except ValueError, e:
log.error('Expected boolean, got something else: %s.' % (str(e),))
ok = False
# Check integer values
try:
kwords['pagin_count'] = int(kwords.get('pagin_count', 0))
kwords['pagin_offset'] = int(kwords.get('pagin_offset', 0))
kwords['context_stack_id'] = int(kwords.get('context_stack_id', 0))
kwords['min_access_level'] = int(kwords.get('min_access_level', 0))
kwords['max_access_level'] = int(kwords.get('max_access_level', 0))
kwords['only_system_id'] = int(kwords.get('only_system_id', 0))
kwords['only_lhs_stack_id'] = int(kwords.get('only_lhs_stack_id', 0))
kwords['only_rhs_stack_id'] = int(kwords.get('only_rhs_stack_id', 0))
kwords['rev_min'] = int(kwords.get('rev_min', 0))
kwords['rev_max'] = int(kwords.get('rev_max', 0))
except ValueError, e:
log.error('Expected integer, got something else: %s.' % (str(e),))
ok = False
self.cli_opts.filters = kwords
return ok
#
def verify_cli_pairs_edit_cols(self):
ok = True
opts_dict = Ccp_Tool_Parser.opts_to_dict(self.cli_opts.edit_cols)
self.cli_opts.edit_cols = opts_dict
return ok
#
def verify_cli_pairs_gia_cols(self):
ok = True
opts_dict = Ccp_Tool_Parser.opts_to_dict(self.cli_opts.gia_cols)
self.cli_opts.gia_cols = opts_dict
return ok
#
def verify_qf_parse_synonymns(self, opts_name, keyw_name):
if (getattr(self.cli_opts, opts_name)
and self.cli_opts.filters.get(keyw_name)):
log.error(
'ERROR: Please specify only the option, %s, or the filter, %s, but not both'
% (opts_name, keyw_name,))
raise Exception()
else:
value = getattr(self.cli_opts, opts_name)
if value is None:
value = self.cli_opts.filters.get(keyw_name)
# NOTE: value might just be None...
# Set both self.cli_opts and self.cli_opts.filters.
setattr(self.cli_opts, opts_name, value)
self.cli_opts.filters[keyw_name] = value
#
def verify_qf_synonyms(self):
ok = True
# Check value synonyms
try:
#self.verify_qf_parse_synonymns('branch', 'branch_id')
self.verify_qf_parse_synonymns('access_level', 'min_access_level')
self.verify_qf_parse_synonymns('filter_count', 'pagin_count')
self.verify_qf_parse_synonymns('filter_offset', 'pagin_offset')
self.verify_qf_parse_synonymns('filter_thread_id', 'context_stack_id')
self.verify_qf_parse_synonymns('query', 'filter_by_text_smart')
self.verify_qf_parse_synonymns('viewport_region', 'filter_by_regions')
except Exception, e:
# NOTE: Something has already been said, so no need to say it.
#log.error('Exception: "%s" / %s' % (str(e), traceback.format_exc(),))
ok = False
return ok
#
def verify_query_filters(self):
ok = True
| |
<gh_stars>1-10
from datetime import datetime
from contextlib import contextmanager
import os
import os.path
import re
import selectors
import shlex
import subprocess
import sys
import textwrap
import types
import pytest
import warnings
BINDIR = os.path.join(os.path.abspath(os.environ['PWD']))
class HlwmBridge:
HC_PATH = os.path.join(BINDIR, 'herbstclient')
def __init__(self, display, hlwm_process):
self.client_procs = []
self.next_client_id = 0
self.env = {
'DISPLAY': display,
}
self.hlwm_process = hlwm_process
self.hc_idle = subprocess.Popen(
[self.HC_PATH, '--idle', 'rule', 'here_is_.*'],
bufsize=1, # line buffered
universal_newlines=True,
env=self.env,
stdout=subprocess.PIPE
)
# a dictionary mapping wmclasses to window ids as reported
# by self.hc_idle
self.wmclass2winid = {}
def _parse_command(self, cmd):
"""
Parse a command (a string using shell quotes or
a string list) to a string list.
"""
if isinstance(cmd, list):
args = [str(x) for x in cmd]
assert args
else:
args = shlex.split(cmd)
return args
def unchecked_call(self, cmd, log_output=True):
"""call the command but do not check exit code or stderr"""
args = self._parse_command(cmd)
try:
proc = subprocess.run([self.HC_PATH, '-n'] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=self.env,
universal_newlines=True,
# Kill hc when it hangs due to crashed server:
timeout=2
)
except subprocess.TimeoutExpired:
self.hlwm_process.investigate_timeout('calling ' + str(args))
outcome = 'succeeded' if proc.returncode == 0 else 'failed'
allout = proc.stdout + proc.stderr
if allout:
if log_output:
print(f'Client command {args} {outcome} with output:\n{allout}')
else:
print(f'Client command {args} {outcome} with output', end='')
print(' (output suppressed).')
else:
print(f'Client command {args} {outcome} (no output)')
# Take this opportunity read and echo any hlwm output captured in the
# meantime:
self.hlwm_process.read_and_echo_output()
return proc
def call(self, cmd):
"""call the command and expect it to have exit code zero
and no output on stderr"""
proc = self.unchecked_call(cmd)
assert proc.returncode == 0
assert not proc.stderr
return proc
def call_xfail(self, cmd):
"""call the command and expect it to have non-zero exit code
and some output on stderr. The returned finished process handle is
extended by a match() method that runs a regex against the process
stderr
"""
proc = self.unchecked_call(cmd)
assert proc.returncode != 0
assert proc.stderr != ""
def f(self2, reg):
assert re.search(reg, self2.stderr)
proc.expect_stderr = types.MethodType(f, proc)
return proc
def call_xfail_no_output(self, cmd):
proc = self.unchecked_call(cmd)
assert proc.returncode != 0
assert proc.stderr == ""
return proc
def get_attr(self, attribute_path, check=True):
return self.call(['get_attr', attribute_path]).stdout
def create_client(self, term_command='sleep infinity', title=None, keep_running=False):
"""
Launch a client that will be terminated on shutdown.
"""
self.next_client_id += 1
wmclass = 'client_{}'.format(self.next_client_id)
title = ['-title', str(title)] if title else []
command = ['xterm'] + title + ['-class', wmclass, '-e', 'bash', '-c', term_command]
# enforce a hook when the window appears
self.call(['rule', 'once', 'class=' + wmclass, 'hook=here_is_' + wmclass])
proc = subprocess.Popen(command, env=self.env)
# once the window appears, the hook is fired:
winid = self.wait_for_window_of(wmclass)
if not keep_running:
# Add to list of processes to be killed on shutdown:
self.client_procs.append(proc)
return winid, proc
def complete(self, cmd, partial=False, position=None):
"""
Return a sorted list of all completions for the next argument for the
given command, if position=None. If position is given, then the
argument of the given position is completed.
Set 'partial' if some of the completions for the given command are
partial. If not in 'partial' mode, trailing spaces are stripped.
"""
args = self._parse_command(cmd)
if position is None:
position = len(args)
proc = self.call(['complete_shell', position] + args)
items = []
for i in proc.stdout.splitlines(False):
if partial:
items.append(i)
else:
if not i.endswith(' '):
raise Exception(("completion for \"{}\" returned the partial "
+ "completion item \"{}\"").format(cmd, i)
) from None
else:
items.append(i[0:-1])
return sorted(items)
def list_children_via_attr(self, object_path):
"""
List the names of children of the
given object, using the attr command internally.
"""
# regexes for list_children:
children_re = \
re.compile(r'^[0-9]* (child|children)[\\.:]((\n [^\n]*)*)')
line_re = re.compile('^ (.*)\\.$')
output = self.call(['attr', object_path]).stdout
section_match = children_re.match(output)
assert section_match
children = []
for i in section_match.group(2).split('\n')[1:]:
child_match = line_re.match(i)
assert child_match
children.append(child_match.group(1))
return sorted(children)
def list_children(self, object_path):
"""
List the names of children of the
given object, using the complete_shell command.
"""
if not object_path.endswith('.') and object_path != '':
object_path += '.'
items = self.complete(['object_tree', object_path],
partial=True, position=1)
children = []
for i in items:
children.append(i.split('.')[-2])
return sorted(children)
def create_clients(self, num):
return [self.create_client()[0] for i in range(num)]
def wait_for_window_of(self, wmclass):
"""Wait for a rule hook of the form "here_is_" + wmclass """
# We don't need to read the second argument of the hook and don't need
# to check that is indeed equals "here_is_" + wmclass. But we need to
# check this once we create clients simultaneously.
line = self.hc_idle.stdout.readline().rstrip('\n').split('\t')
try:
self.hc_idle.wait(0)
except subprocess.TimeoutExpired:
pass
if self.hc_idle.returncode is not None:
self.hlwm_process.investigate_timeout(
'waiting for hook triggered by client \"{}\"'.format(wmclass))
return line[-1]
def shutdown(self):
for client_proc in self.client_procs:
client_proc.terminate()
client_proc.wait(2)
self.hc_idle.terminate()
self.hc_idle.wait(2)
def bool(self, python_bool_var):
"""convert a boolean variable into hlwm's string representation"""
return "true" if python_bool_var else "false"
@pytest.fixture
def hlwm(hlwm_process):
display = os.environ['DISPLAY']
# display = ':13'
hlwm_bridge = HlwmBridge(display, hlwm_process)
yield hlwm_bridge
# Make sure that hlwm survived:
hlwm_bridge.call('version')
hlwm_bridge.shutdown()
class HlwmProcess:
def __init__(self, tmpdir, env):
autostart = tmpdir / 'herbstluftwm' / 'autostart'
autostart.ensure()
autostart.write(textwrap.dedent("""
#!/usr/bin/env bash
echo "hlwm started"
""".lstrip('\n')))
autostart.chmod(0o755)
bin_path = os.path.join(BINDIR, 'herbstluftwm')
self.proc = subprocess.Popen(
[bin_path, '--verbose'], env=env,
bufsize=0, # essential for reading output with selectors!
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
sel = selectors.DefaultSelector()
sel.register(self.proc.stdout, selectors.EVENT_READ, data=sys.stdout)
sel.register(self.proc.stderr, selectors.EVENT_READ, data=sys.stderr)
self.output_selector = sel
# Wait for marker output from wrapper script:
self.read_and_echo_output(until_stdout='hlwm started')
def read_and_echo_output(self, until_stdout=None, until_stderr=None, until_eof=False):
expect_sth = ((until_stdout or until_stderr) is not None)
max_wait = 5
# Track which file objects have EOFed:
eof_fileobjs = set()
fileobjs = set(k.fileobj for k in self.output_selector.get_map().values())
stderr = ''
stdout = ''
def match_found():
if until_stdout and (until_stdout in stdout):
return True
if until_stderr and (until_stderr in stderr):
return True
return False
started = datetime.now()
while (datetime.now() - started).total_seconds() < max_wait:
select_timeout = 1
# If we're not polling for a matching string (anymore), there is no
# need for a dampening timeout:
if not expect_sth or match_found():
select_timeout = 0
selected = self.output_selector.select(timeout=select_timeout)
for key, events in selected:
# Read only single byte, otherwise we might block:
ch = key.fileobj.read(1).decode('ascii')
if ch == '':
eof_fileobjs.add(key.fileobj)
# Pass it through to the real stdout/stderr:
key.data.write(ch)
key.data.flush()
# Store in temporary buffer for string matching:
if key.fileobj == self.proc.stderr:
stderr += ch
if key.fileobj == self.proc.stdout:
stdout += ch
if until_eof:
# We are going to the very end, so carry on until all file
# objects have returned EOF:
if eof_fileobjs == fileobjs:
break
else:
continue
if selected != []:
# There is still data available, so keep reading (no matter
# what):
continue
# But stop reading if there is nothing to look for or we have
# already found it:
if not expect_sth or match_found():
break
duration = (datetime.now() - started).total_seconds()
if expect_sth and not match_found():
assert False, f'Expected string not encountered within {duration:.1f} seconds'
@contextmanager
def wait_stderr_match(self, match):
"""
Context manager for wrapping commands that are expected to result in
certain output on hlwm's stderr (e.g., input events).
"""
self.read_and_echo_output()
yield
self.read_and_echo_output(until_stderr=match)
def investigate_timeout(self, reason):
"""if some kind of client request observes a timeout, investigate the
herbstluftwm server process. 'reason' is best phrased using present
participle"""
try:
self.proc.wait(0)
except subprocess.TimeoutExpired:
pass
if self.proc.returncode is None:
raise Exception(str(reason) + " took too long"
+ " but hlwm still running") from None
else:
raise Exception("{} made herbstluftwm quit with exit code {}"
.format(str(reason), self.proc.returncode)) from None
def shutdown(self):
self.proc.terminate()
# Make sure to read and echo all remaining output (esp. ASAN messages):
self.read_and_echo_output(until_eof=True)
if self.proc.returncode is None:
# only wait the process if it hasn't been cleaned up
# this also avoids the second exception if hlwm crashed
try:
assert self.proc.wait(2) == 0
except subprocess.TimeoutExpired:
self.proc.kill()
self.proc.wait(2)
raise Exception("herbstluftwm did not quit on sigterm"
+ " and had to be killed") from None
def kill_all_existing_windows(show_warnings=True):
xlsclients = subprocess.run(['xlsclients', '-l'],
stdout=subprocess.PIPE,
check=True)
clients = []
for l in xlsclients.stdout.decode().splitlines():
m = re.match(r'Window (0x[0-9a-fA-F]*):', l)
if m:
clients.append(m.group(1))
if clients and show_warnings:
warnings.warn(UserWarning("There are still some clients "
"from previous tests."))
for c in clients:
if show_warnings:
| |
firstvalid, lastvalid, votekey, selectionkey
):
assert context.response["accounts"][0]["status"] == "Online"
assert context.response["accounts"][0]["address"] == address
assert context.response["accounts"][0]["participation"][
"vote-key-dilution"
] == int(keydilution)
assert context.response["accounts"][0]["participation"][
"vote-first-valid"
] == int(firstvalid)
assert context.response["accounts"][0]["participation"][
"vote-last-valid"
] == int(lastvalid)
assert (
context.response["accounts"][0]["participation"][
"vote-participation-key"
]
== votekey
)
assert (
context.response["accounts"][0]["participation"][
"selection-participation-key"
]
== selectionkey
)
@when("we make any SearchAccounts call")
def search_accounts_any(context):
context.response = context.icl.accounts(asset_id=2)
@then(
'the parsed SearchAccounts response should be valid on round {roundNum} and the array should be of len {length} and the element at index {index} should have address "{address}"'
)
def parse_accounts(context, roundNum, length, index, address):
assert context.response["current-round"] == int(roundNum)
assert len(context.response["accounts"]) == int(length)
if int(length) > 0:
assert context.response["accounts"][int(index)]["address"] == address
@when(
'the parsed SearchAccounts response should be valid on round {roundNum} and the array should be of len {length} and the element at index {index} should have authorizing address "{authAddr:MaybeString}"'
)
def parse_accounts_auth(context, roundNum, length, index, authAddr):
assert context.response["current-round"] == int(roundNum)
assert len(context.response["accounts"]) == int(length)
if int(length) > 0:
assert (
context.response["accounts"][int(index)]["auth-addr"] == authAddr
)
@when(
"I get the next page using {indexer} to search for transactions with {limit} and {maxround}"
)
def search_txns_next(context, indexer, limit, maxround):
context.response = context.icls[indexer].search_transactions(
limit=int(limit),
max_round=int(maxround),
next_page=context.response["next-token"],
)
@when(
'I use {indexer} to search for transactions with {limit}, "{noteprefix:MaybeString}", "{txtype:MaybeString}", "{sigtype:MaybeString}", "{txid:MaybeString}", {block}, {minround}, {maxround}, {assetid}, "{beforetime:MaybeString}", "{aftertime:MaybeString}", {currencygt}, {currencylt}, "{address:MaybeString}", "{addressrole:MaybeString}", "{excludecloseto:MaybeString}" and token "{token:MaybeString}"'
)
def icl_search_txns(
context,
indexer,
limit,
noteprefix,
txtype,
sigtype,
txid,
block,
minround,
maxround,
assetid,
beforetime,
aftertime,
currencygt,
currencylt,
address,
addressrole,
excludecloseto,
token,
):
context.response = context.icls[indexer].search_transactions(
asset_id=int(assetid),
limit=int(limit),
next_page=token,
note_prefix=base64.b64decode(noteprefix),
txn_type=txtype,
sig_type=sigtype,
txid=txid,
block=int(block),
min_round=int(minround),
max_round=int(maxround),
start_time=aftertime,
end_time=beforetime,
min_amount=int(currencygt),
max_amount=int(currencylt),
address=address,
address_role=addressrole,
exclude_close_to=excludecloseto == "true",
)
@when(
'I use {indexer} to search for transactions with {limit}, "{noteprefix:MaybeString}", "{txtype:MaybeString}", "{sigtype:MaybeString}", "{txid:MaybeString}", {block}, {minround}, {maxround}, {assetid}, "{beforetime:MaybeString}", "{aftertime:MaybeString}", {currencygt}, {currencylt}, "{address:MaybeString}", "{addressrole:MaybeString}", "{excludecloseto:MaybeString}", {application_id} and token "{token:MaybeString}"'
)
def icl_search_txns_with_app(
context,
indexer,
limit,
noteprefix,
txtype,
sigtype,
txid,
block,
minround,
maxround,
assetid,
beforetime,
aftertime,
currencygt,
currencylt,
address,
addressrole,
excludecloseto,
application_id,
token,
):
context.response = context.icls[indexer].search_transactions(
asset_id=int(assetid),
limit=int(limit),
next_page=token,
note_prefix=base64.b64decode(noteprefix),
txn_type=txtype,
sig_type=sigtype,
txid=txid,
block=int(block),
min_round=int(minround),
max_round=int(maxround),
start_time=aftertime,
end_time=beforetime,
min_amount=int(currencygt),
max_amount=int(currencylt),
address=address,
address_role=addressrole,
application_id=int(application_id),
exclude_close_to=excludecloseto == "true",
)
@then(
'there are {num} transactions in the response, the first is "{txid:MaybeString}".'
)
def check_transactions(context, num, txid):
assert len(context.response["transactions"]) == int(num)
if int(num) > 0:
assert context.response["transactions"][0]["id"] == txid
@then('Every transaction has tx-type "{txtype}"')
def check_transaction_types(context, txtype):
for txn in context.response["transactions"]:
assert txn["tx-type"] == txtype
@then('Every transaction has sig-type "{sigtype}"')
def check_sig_types(context, sigtype):
for txn in context.response["transactions"]:
if sigtype == "lsig":
assert list(txn["signature"].keys())[0] == "logicsig"
if sigtype == "msig":
assert list(txn["signature"].keys())[0] == "multisig"
if sigtype == "sig":
assert list(txn["signature"].keys())[0] == sigtype
@then("Every transaction has round >= {minround}")
def check_minround(context, minround):
for txn in context.response["transactions"]:
assert txn["confirmed-round"] >= int(minround)
@then("Every transaction has round <= {maxround}")
def check_maxround(context, maxround):
for txn in context.response["transactions"]:
assert txn["confirmed-round"] <= int(maxround)
@then("Every transaction has round {block}")
def check_round(context, block):
for txn in context.response["transactions"]:
assert txn["confirmed-round"] == int(block)
@then("Every transaction works with asset-id {assetid}")
def check_assetid(context, assetid):
for txn in context.response["transactions"]:
if "asset-config-transaction" in txn:
subtxn = txn["asset-config-transaction"]
else:
subtxn = txn["asset-transfer-transaction"]
assert subtxn["asset-id"] == int(assetid) or txn[
"created-asset-index"
] == int(assetid)
@then('Every transaction is older than "{before}"')
def check_before(context, before):
for txn in context.response["transactions"]:
t = datetime.fromisoformat(before.replace("Z", "+00:00"))
assert txn["round-time"] <= datetime.timestamp(t)
@then('Every transaction is newer than "{after}"')
def check_after(context, after):
t = True
for txn in context.response["transactions"]:
t = datetime.fromisoformat(after.replace("Z", "+00:00"))
if not txn["round-time"] >= datetime.timestamp(t):
t = False
assert t
@then("Every transaction moves between {currencygt} and {currencylt} currency")
def check_currency(context, currencygt, currencylt):
for txn in context.response["transactions"]:
amt = 0
if "asset-transfer-transaction" in txn:
amt = txn["asset-transfer-transaction"]["amount"]
else:
amt = txn["payment-transaction"]["amount"]
if int(currencygt) == 0:
if int(currencylt) > 0:
assert amt <= int(currencylt)
else:
if int(currencylt) > 0:
assert int(currencygt) <= amt <= int(currencylt)
else:
assert int(currencygt) <= amt
@when(
'we make a Search For Transactions call with account "{account:MaybeString}" NotePrefix "{notePrefixB64:MaybeString}" TxType "{txType:MaybeString}" SigType "{sigType:MaybeString}" txid "{txid:MaybeString}" round {block} minRound {minRound} maxRound {maxRound} limit {limit} beforeTime "{beforeTime:MaybeString}" afterTime "{afterTime:MaybeString}" currencyGreaterThan {currencyGreaterThan} currencyLessThan {currencyLessThan} assetIndex {index} addressRole "{addressRole:MaybeString}" ExcluseCloseTo "{excludeCloseTo:MaybeString}" rekeyTo "{rekeyTo:MaybeString}"'
)
def search_txns(
context,
account,
notePrefixB64,
txType,
sigType,
txid,
block,
minRound,
maxRound,
limit,
beforeTime,
afterTime,
currencyGreaterThan,
currencyLessThan,
index,
addressRole,
excludeCloseTo,
rekeyTo,
):
if notePrefixB64 == "none":
notePrefixB64 = ""
if txType == "none":
txType = None
if sigType == "none":
sigType = None
if txid == "none":
txid = None
if beforeTime == "none":
beforeTime = None
if afterTime == "none":
afterTime = None
if account == "none":
account = None
if addressRole == "none":
addressRole = None
if excludeCloseTo == "none":
excludeCloseTo = None
if rekeyTo == "none":
rekeyTo = None
context.response = context.icl.search_transactions(
asset_id=int(index),
limit=int(limit),
next_page=None,
note_prefix=base64.b64decode(notePrefixB64),
txn_type=txType,
sig_type=sigType,
txid=txid,
block=int(block),
min_round=int(minRound),
max_round=int(maxRound),
start_time=afterTime,
end_time=beforeTime,
min_amount=int(currencyGreaterThan),
max_amount=int(currencyLessThan),
address=account,
address_role=addressRole,
exclude_close_to=excludeCloseTo,
rekey_to=rekeyTo,
)
@when(
'we make a Search For Transactions call with account "{account:MaybeString}" NotePrefix "{notePrefixB64:MaybeString}" TxType "{txType:MaybeString}" SigType "{sigType:MaybeString}" txid "{txid:MaybeString}" round {block} minRound {minRound} maxRound {maxRound} limit {limit} beforeTime "{beforeTime:MaybeString}" afterTime "{afterTime:MaybeString}" currencyGreaterThan {currencyGreaterThan} currencyLessThan {currencyLessThan} assetIndex {index} addressRole "{addressRole:MaybeString}" ExcluseCloseTo "{excludeCloseTo:MaybeString}"'
)
def search_txns(
context,
account,
notePrefixB64,
txType,
sigType,
txid,
block,
minRound,
maxRound,
limit,
beforeTime,
afterTime,
currencyGreaterThan,
currencyLessThan,
index,
addressRole,
excludeCloseTo,
):
if notePrefixB64 == "none":
notePrefixB64 = ""
if txType == "none":
txType = None
if sigType == "none":
sigType = None
if txid == "none":
txid = None
if beforeTime == "none":
beforeTime = None
if afterTime == "none":
afterTime = None
if account == "none":
account = None
if addressRole == "none":
addressRole = None
if excludeCloseTo == "none":
excludeCloseTo = None
context.response = context.icl.search_transactions(
asset_id=int(index),
limit=int(limit),
next_page=None,
note_prefix=base64.b64decode(notePrefixB64),
txn_type=txType,
sig_type=sigType,
txid=txid,
block=int(block),
min_round=int(minRound),
max_round=int(maxRound),
start_time=afterTime,
end_time=beforeTime,
min_amount=int(currencyGreaterThan),
max_amount=int(currencyLessThan),
address=account,
address_role=addressRole,
exclude_close_to=excludeCloseTo,
rekey_to=None,
)
@when("we make any SearchForTransactions call")
def search_txns_any(context):
context.response = context.icl.search_transactions(asset_id=2)
@then(
'the parsed SearchForTransactions response should be valid on round {roundNum} and the array should be of len {length} and the element at index {index} should have sender "{sender}"'
)
def parse_search_txns(context, roundNum, length, index, sender):
assert context.response["current-round"] == int(roundNum)
assert len(context.response["transactions"]) == int(length)
if int(length) > 0:
assert context.response["transactions"][int(index)]["sender"] == sender
@when(
'the parsed SearchForTransactions response should be valid on round {roundNum} and the array should be of len {length} and the element at index {index} should have rekey-to "{rekeyTo:MaybeString}"'
)
def step_impl(context, roundNum, length, index, rekeyTo):
assert context.response["current-round"] == int(roundNum)
assert len(context.response["transactions"]) == int(length)
if int(length) > 0:
assert (
context.response["transactions"][int(index)]["rekey-to"] == rekeyTo
)
@when(
'I use {indexer} to search for assets with {limit}, {assetidin}, "{creator:MaybeString}", "{name:MaybeString}", "{unit:MaybeString}", and token "{token:MaybeString}"'
)
def icl_search_assets(
context, indexer, limit, assetidin, creator, name, unit, token
):
context.response = context.icls[indexer].search_assets(
limit=int(limit),
next_page=token,
creator=creator,
name=name,
unit=unit,
asset_id=int(assetidin),
)
@then("there are {num} assets in the response, the first is {assetidout}.")
def check_assets(context, num, assetidout):
assert len(context.response["assets"]) == int(num)
if int(num) > 0:
assert context.response["assets"][0]["index"] == int(assetidout)
@when(
'I use {indexer} to search for applications with {limit}, {application_id}, "{include_all:MaybeBool}" and token "{token:MaybeString}"'
)
def search_applications_include_all(
context, indexer, limit, application_id, include_all, token
):
context.response = context.icls[indexer].search_applications(
application_id=int(application_id),
limit=int(limit),
include_all=include_all,
next_page=token,
)
@when(
'I use {indexer} to search for applications with {limit}, {application_id}, and token "{token:MaybeString}"'
)
def search_applications(context, indexer, limit, application_id, token):
context.response = context.icls[indexer].search_applications(
application_id=int(application_id), limit=int(limit), next_page=token
)
@when(
'I use {indexer} to lookup application with {application_id} and "{include_all:MaybeBool}"'
)
def lookup_application_include_all(
context, indexer, application_id, include_all
):
try:
context.response = context.icls[indexer].applications(
application_id=int(application_id), include_all=include_all
)
except IndexerHTTPError as e:
context.response = json.loads(str(e))
@when("I use {indexer} to lookup application with {application_id}")
def lookup_application(context, indexer, application_id):
context.response = context.icls[indexer].applications(
application_id=int(application_id)
)
@then('the parsed response should equal "{jsonfile}".')
def step_impl(context, jsonfile):
loaded_response = None
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.dirname(os.path.dirname(dir_path))
with open(dir_path + "/test/features/resources/" + jsonfile, "rb") as f:
loaded_response = bytearray(f.read())
# sort context.response
def recursively_sort_on_key(dictionary):
returned_dict = dict()
for k, v in sorted(dictionary.items()):
if isinstance(v, dict):
returned_dict[k] = recursively_sort_on_key(v)
elif isinstance(v, list) and all(
isinstance(item, dict) for item in v
):
if all("key" in item.keys() for item in v):
from operator import itemgetter
returned_dict[k] = sorted(v, key=itemgetter("key"))
else:
sorted_list = list()
for item in v:
sorted_list.append(recursively_sort_on_key(item))
returned_dict[k] = sorted_list
else:
returned_dict[k] = v
return returned_dict
context.response = recursively_sort_on_key(context.response)
loaded_response = recursively_sort_on_key(json.loads(loaded_response))
if context.response != loaded_response:
print("EXPECTED: " + str(loaded_response))
print("ACTUAL: " + str(context.response))
assert context.response == loaded_response
@when(
'we make a SearchForAssets call with limit {limit} creator "{creator:MaybeString}" name "{name:MaybeString}" unit "{unit:MaybeString}" index {index}'
)
def search_assets(context, limit, creator, name, unit, index):
if creator == "none":
| |
<gh_stars>0
''' Common utilities for transient bubble calculation.
'''
from numpy import (
append, arange, around, array, atleast_2d, concatenate, copy,
diag, hstack, isnan, ix_,
ones, prod, shape, sum, unique, where, zeros, exp, log, repeat)
from numpy import int64 as my_int
from scipy.sparse import csc_matrix as sparse
from scipy.special import factorial
from scipy.stats import multinomial
from model.preprocessing import ModelInput, HouseholdPopulation, SEIRInput
from model.common import (
build_state_matrix, build_external_import_matrix_SEPIRQ)
from model.imports import NoImportModel
def build_mixed_compositions_pairwise(
composition_list, composition_distribution):
no_comps = composition_list.shape[0]
if composition_list.ndim == 1:
hh_dimension = 1
else:
hh_dimension = composition_list.shape[1]
mixed_comp_list = zeros((no_comps**2, 2*hh_dimension), dtype=my_int)
mixed_comp_dist = zeros((no_comps**2, 1))
pairings = [[], []]
for hh1 in range(no_comps):
for hh2 in range(no_comps):
index = hh1*no_comps + hh2
pairings[0].append(hh1)
pairings[1].append(hh2)
mixed_comp_list[index, :hh_dimension] = \
composition_list[hh1,]
mixed_comp_list[index,hh_dimension:] = \
composition_list[hh2,]
mixed_comp_dist[index] = \
composition_distribution[hh1] * composition_distribution[hh2]
return mixed_comp_list, mixed_comp_dist, hh_dimension, pairings
def build_mixed_compositions_threewise(composition_list,
composition_distribution,
max_size):
no_comps = composition_list.shape[0]
if composition_list.ndim==1:
hh_dimension = 1
else:
hh_dimension = composition_list.shape[1]
mixed_comp_list = []
pairings = [[],[],[]]
total_prob_1 = zeros((no_comps,))
total_prob_2 = zeros((no_comps, no_comps)) # total_prob_2[i,j] stores summed probability of all possible third elements if first two are i,j
for hh1 in range(no_comps):
hh2_max = min(no_comps, int(0.5 * (max_size - (hh1+1))))
total_prob_1[hh1] = sum(composition_distribution[:hh2_max])
for hh2 in range(no_comps):
hh3_max = min(no_comps, max_size - (hh1+1) - (hh2+1))
total_prob_2[hh1, hh2] = sum(composition_distribution[:hh3_max])
if (hh2>=hh1) and (hh2<hh2_max):
for hh3 in range(hh2, hh3_max):
pairings[0].append(hh1)
pairings[1].append(hh2)
pairings[2].append(hh3)
this_merged_comp = zeros((3*hh_dimension,))
this_merged_comp[:hh_dimension] = \
composition_list[hh1,]
this_merged_comp[hh_dimension:2*hh_dimension] = \
composition_list[hh2,]
this_merged_comp[2*hh_dimension:] = \
composition_list[hh3,]
mixed_comp_list.append(this_merged_comp)
# total_prob_2[hh1, hh2] += composition_distribution[hh3]
# total_prob_1[hh1] += composition_distribution[hh2]
def mixed_comp_term(p0,p1,p2):
hh2_max = min(no_comps, max_size - (p0 + 1) - 1)
hh3_max = min(no_comps, max_size - (p0 + 1) - (p1 + 1))
return composition_distribution[p0] * \
(composition_distribution[p1] /
sum(composition_distribution[:hh2_max])) * \
(composition_distribution[p2] /
sum(composition_distribution[:hh3_max]))
no_merged_comps = len(mixed_comp_list)
mixed_comp_list = array(mixed_comp_list, dtype=my_int)
pairings = array(pairings, dtype=my_int).T
mixed_comp_dist = zeros((no_merged_comps,))
for mc in range(no_merged_comps):
p_unique = unique(pairings[mc, :])
if len(p_unique)==1:
mixed_comp_dist[mc] = mixed_comp_term(p_unique[0],p_unique[0],p_unique[0])
elif len(p_unique)==2:
if len(where(pairings[mc,:]==p_unique[0])[0])==2:
pair0 = p_unique[0]
pair1 = p_unique[1]
else:
pair0 = p_unique[1]
pair1 = p_unique[0]
mixed_comp_dist[mc] = mixed_comp_term(pair0, pair0, pair1) + \
mixed_comp_term(pair0, pair1, pair0) + \
mixed_comp_term(pair1, pair0, pair0)
else:
pair0 = p_unique[0]
pair1 = p_unique[1]
pair2 = p_unique[2]
mixed_comp_dist[mc] = mixed_comp_term(pair0,pair1,pair2) + \
mixed_comp_term(pair0, pair2, pair1) + \
mixed_comp_term(pair1, pair0, pair2) + \
mixed_comp_term(pair1, pair2, pair0) + \
mixed_comp_term(pair2, pair0, pair1) + \
mixed_comp_term(pair2, pair1, pair0)
# if len(unique(pairings[mc,:]))==2:
# mixed_comp_dist[mc] = 3 * mixed_comp_dist[mc]
# elif len(unique(pairings[mc,:]))==3:
# mixed_comp_dist[mc] = 6 * mixed_comp_dist[mc]
return mixed_comp_list, mixed_comp_dist, hh_dimension, pairings
def pairwise_merged_initial_condition(H_unmerged,
unmerged_population,
merged_population,
hh_dimension,
pairings,
no_compartments=5):
H0_len = sum(merged_population.system_sizes)
H0 = zeros((H0_len,))
reverse_prod = unmerged_population.reverse_prod
index_vector_list = unmerged_population.index_vector
which_composition = merged_population.which_composition
merged_states = merged_population.states
for i in range(H0_len):
comp_0 = pairings[0][which_composition[i]]
comp_1 = pairings[1][which_composition[i]]
state_0 = merged_states[i, :no_compartments]
state_1 = merged_states[i, no_compartments:]
index_vector_0 = index_vector_list[comp_0]
index_vector_1 = index_vector_list[comp_1]
index_0 = index_vector_0[
state_0.dot(reverse_prod[comp_0]) + state_0[-1], 0]
index_1 = index_vector_1[
state_1.dot(reverse_prod[comp_1]) + state_1[-1], 0]
H0[i] = H_unmerged[index_0] * H_unmerged[index_1]
return H0
def initialise_merged_system_threewise(
H0_unmerged,
unmerged_population,
merged_population,
state_match):
wc_um = unmerged_population.which_composition
wc_m = merged_population.which_composition
cd_um = unmerged_population.composition_distribution
cd_m = merged_population.composition_distribution
no_merged_states = len(wc_m)
H0_merged = zeros((no_merged_states,))
for state_no in range(no_merged_states):
this_H0_merged = \
cd_m[wc_m[state_no]] \
* prod(H0_unmerged[state_match[state_no, :]]) \
/ prod(cd_um[wc_um[state_match[state_no, :]]])
# for hh in range(3):
# this_H0_merged = this_H0_merged * \
# H0_unmerged[state_match[state_no, hh]] / \
# cd_um[wc_um[state_match[state_no, hh]]]
H0_merged[state_no] = this_H0_merged
return H0_merged
def pairwise_demerged_initial_condition(
H_merged,
unmerged_population,
merged_population,
hh_dimension,
pairings,
no_compartments=5):
H0_len = sum(unmerged_population.system_sizes)
H0 = zeros((H0_len,))
reverse_prod = unmerged_population.reverse_prod
index_vector_list = unmerged_population.index_vector
which_composition = merged_population.which_composition
merged_states = merged_population.states
for i in range(len(H_merged)):
comp_0 = pairings[0][which_composition[i]]
comp_1 = pairings[1][which_composition[i]]
state_0 = merged_states[i, :no_compartments]
state_1 = merged_states[i, no_compartments:]
index_vector_0 = index_vector_list[comp_0]
index_vector_1 = index_vector_list[comp_1]
index_0 = index_vector_0[
state_0.dot(reverse_prod[comp_0]) + state_0[-1], 0]
index_1 = index_vector_1[
state_1.dot(reverse_prod[comp_1]) + state_1[-1], 0]
H0[index_0] += 0.5*H_merged[i]
H0[index_1] += 0.5*H_merged[i]
return H0
def build_mixed_compositions(
composition_list,
composition_distribution,
no_hh=2,
max_size=12):
no_comps = composition_list.shape[0]
if composition_list.ndim == 1:
hh_dimension = 1
else:
hh_dimension = composition_list.shape[1]
no_mixed_comps = 0
mixed_comp_list = []
mixed_comp_dist = []
hhi = no_hh*[0]
pairings = []
for pairing_index in range(no_hh):
pairings.append([])
coeff = [] # This stores number of appearances each combination would make in a "full" merged list
def comp_iterator(depth, no_hh):
if depth < no_hh:
for i in range(hhi[depth-1], no_comps):
hhi[depth] = i
comp_iterator(depth+1, no_hh)
else:
index = 0
for hh in range(no_hh):
index += hhi[hh] * no_comps**(no_hh - 1 - hh)
pairings[hh].append(hhi[hh])
this_mix_comp = zeros((no_hh*hh_dimension,))
hist = zeros((no_comps,))
for hh in range(no_hh):
this_mix_comp[hh*hh_dimension:(hh+1)*hh_dimension] = \
composition_list[hhi[hh], ] # TODO: What happens after the comma?
hist[hhi[hh]] += 1
this_mix_prob = multinomial.pmf(
hist, n=no_hh, p=composition_distribution)
mixed_comp_list.append(this_mix_comp)
mixed_comp_dist.append(this_mix_prob)
coeff.append(factorial(no_hh)/prod(factorial(hist)))
comp_iterator(0, no_hh)
mixed_comp_list = array(mixed_comp_list, dtype=my_int)
mixed_comp_dist = array(mixed_comp_dist)
coeff = array(coeff)
pairings = array(pairings).T
print(
'Before checking for big households, sum(dist)=',
sum(mixed_comp_dist))
reverse_prod = hstack(([0], no_comps**arange(1, no_hh)))
no_mixed_comps = len(mixed_comp_dist)
rows = [
mixed_comp_list[k, :].dot(reverse_prod) + mixed_comp_list[k, 0]
for k in range(no_mixed_comps)]
mixed_comp_index_vector = sparse((
arange(no_mixed_comps),
(rows, [0]*no_mixed_comps)), dtype=my_int)
mixed_sizes = mixed_comp_list.sum(axis=1)
large_merges = where(mixed_sizes > max_size)[0]
ref_dist = deepcopy(mixed_comp_dist)
for merge_no in large_merges:
this_prob = mixed_comp_dist[merge_no]
this_comp = mixed_comp_list[merge_no, :]
current_size = mixed_sizes[merge_no]
while current_size > max_size:
this_comp[this_comp.argmax()] -= 1
current_size -= 1
new_comp_loc = mixed_comp_index_vector[
this_comp.dot(reverse_prod) + this_comp[0], 0]
mixed_comp_dist[new_comp_loc] += this_prob
print(
'After checking for big households, sum(dist)=',
sum(mixed_comp_dist))
# Stores level of inflation of probability caused by adding prob of
# compositions with size>max to ones with size<=max
comp_scaler = mixed_comp_dist / ref_dist
print(large_merges)
print('Before deletion mixed_comp_list.shape=', mixed_comp_list.shape)
mixed_comp_list = delete(mixed_comp_list, large_merges, axis=0)
print('After deletion mixed_comp_list.shape=', mixed_comp_list.shape)
print('Before deletion mixed_comp_dist.shape=', mixed_comp_dist.shape)
mixed_comp_dist = delete(mixed_comp_dist, large_merges, axis=0)
print('After deletion mixed_comp_dist.shape=', mixed_comp_dist.shape)
print('Before deletion coeff.shape=', coeff.shape)
coeff = delete(coeff, large_merges, axis=0)
print('After deletion coeff.shape=', coeff.shape)
print('Before deletion pairings.shape=', pairings.shape)
pairings = delete(pairings, large_merges, axis=0)
print('After deletion pairings.shape=', pairings.shape)
print('Before deletion comp_scaler.shape=', comp_scaler.shape)
comp_scaler = delete(comp_scaler, large_merges, axis=0)
print('After deletion comp_scaler.shape=', comp_scaler.shape)
return \
mixed_comp_list, \
mixed_comp_dist, \
hh_dimension, \
pairings, \
mixed_comp_index_vector, \
reverse_prod, \
coeff, \
comp_scaler
def match_merged_states_to_unmerged(
unmerged_population,
merged_population,
pairings,
no_hh,
no_compartments):
rp_um = unmerged_population.reverse_prod
iv_um = unmerged_population.index_vector
states_m = merged_population.states
wc_m = merged_population.which_composition
# pdb.set_trace()
# iv_shifter = hstack((array(0),cumsum(unmerged_population.system_sizes))) # This shifts the index vectors so that they give you indices in the full state list rather than in the individual matrix blocks
state_match = zeros((len(wc_m), no_hh), dtype=my_int)
for state_no in range(len(wc_m)):
merged_comp = wc_m[state_no]
for hh in range(no_hh):
unmerged_comp = pairings[merged_comp, hh]
this_iv = iv_um[unmerged_comp]
this_state = states_m[
state_no, hh * no_compartments:(hh+1) * no_compartments]
index = this_iv[
this_state.dot(rp_um[unmerged_comp]) + this_state[-1], 0]
state_match[state_no, hh] = index
return state_match
def initialise_merged_system(H0_unmerged,
merged_population,
state_match,
coeff,
comp_scaler,
no_hh):
wc_m = merged_population.which_composition
no_merged_states = len(wc_m)
H0_merged = zeros((no_merged_states,))
for state_no in range(no_merged_states):
log_H0_merged = 0
for hh in range(no_hh):
# print(hh)
# print(state_no)
# print(state_match[state_no, hh])
log_H0_merged += log(H0_unmerged[state_match[state_no, hh]])
H0_merged[state_no] = coeff[wc_m[state_no]] * \
comp_scaler[wc_m[state_no]] * \
exp(log_H0_merged)
return H0_merged
def my_multinomial(hist, n, p):
log_prob = sum(log(arange(1, n+1)))
for i in range(len(hist)):
log_prob += hist[i] * log(p[i]) - sum(log(arange(1, hist[i]+1)))
return exp(log_prob)
def merged_initial_condition(
H_unmerged,
unmerged_population,
merged_population,
hh_dimension,
pairings,
no_hh=2,
no_compartments=5):
H0_len = sum(merged_population.system_sizes)
H0 = ones((H0_len,))
reverse_prod = unmerged_population.reverse_prod
index_vector_list = unmerged_population.index_vector
which_composition = merged_population.which_composition
merged_states = merged_population.states
for i in range(H0_len):
hist = zeros(len(H_unmerged,))
for hh in range(no_hh):
comp = pairings[hh][which_composition[i]]
state = merged_states[
i,
hh * hh_dimension * no_compartments:
(hh+1) * hh_dimension * no_compartments]
index_vector = index_vector_list[comp]
index = index_vector[
state.dot(reverse_prod[comp]) + state[-1], 0]
hist[index] += 1
H0[i] = multinomial.pmf(hist, n=no_hh, p=H_unmerged)
return H0
def merged_initial_condition_alt(
H_unmerged,
unmerged_population,
merged_population,
hh_dimension,
mixed_comp_index_vector,
mixed_comp_reverse_prod,
pairings,
no_hh=2,
no_compartments=5):
no_unmerged_states = sum(unmerged_population.system_sizes)
H0_len = sum(merged_population.system_sizes)
H0 = zeros((H0_len,))
unmerged_reverse_prod = unmerged_population.reverse_prod
merged_reverse_prod = merged_population.reverse_prod
index_vector_list = merged_population.index_vector
which_composition = merged_population.which_composition
merged_states = merged_population.states
unmerged_states = unmerged_population.states
hhi = zeros((no_hh,), dtype=my_int)
unmerged_comps = zeros((no_hh,))
this_merged_state = zeros((no_hh * no_compartments))
def state_iterator(depth, no_hh):
if depth<no_hh:
for i in range(hhi[depth-1],no_unmerged_states):
hhi[depth] = i
unmerged_comps[depth] = unmerged_population.composition_list[unmerged_population.which_composition[i]]
this_merged_state[
depth * no_compartments:(depth+1) * no_compartments] = unmerged_states[i,:]
state_iterator(depth+1, no_hh)
else:
if unmerged_comps.dot(mixed_comp_reverse_prod) + unmerged_comps[0] in mixed_comp_index_vector.indices:
merged_comp = mixed_comp_index_vector[
unmerged_comps.dot(mixed_comp_reverse_prod) + unmerged_comps[0], 0]
index_vector = index_vector_list[merged_comp]
reverse_prod = merged_reverse_prod[merged_comp]
index = index_vector[
this_merged_state.dot(reverse_prod) + this_merged_state[-1], 0]
hist = zeros((no_unmerged_states,))
for hh in range(no_hh):
hist[hhi[hh]] += 1
H0[index] | |
from stanza.models.pos.hunspeller.pos import Verb, Noun, Pronoun, Adjective, Numeral, Adverb, numeralise
# Decline numerals and convert other pos features into XPOS and UFeats formats
def decline_num(num, rqrd_infl, rqrd_num=None, rqrd_gen=None):
"""Decline numerals
:param num: Numeral object
:param rqrd_infl: the inflection the numeral should be inflected for
:param rqrd_num: the number the numeral should be inflected for
:param rqrd_gen: the gender the numeral should be inflected for
:return: a Numeral object with the numeral in a desired form and its grammatical information
"""
lemma = num.lemma
word = None
number = num.number
gender = num.gender
num_type = num.num_type
if num_type == 'kiek' and number is None and gender is None:
# cardinal numbers / simple numerals from 11 to 19 are not inflected for gender
if rqrd_infl == 'V':
if lemma.endswith('lika'):
word = lemma
if rqrd_infl == 'K':
if lemma.endswith('lika'):
word = lemma[0:len(lemma) - 1] + 'os'
if rqrd_infl == 'N':
if lemma.endswith('lika'):
word = lemma[0:len(lemma) - 1] + 'ai'
if rqrd_infl == 'G':
if lemma.endswith('lika'):
word = lemma[0:len(lemma) - 1] + 'ą'
if rqrd_infl == 'Įn':
if lemma.endswith('lika'):
word = lemma[0:len(lemma) - 1] + 'a'
if rqrd_infl == 'Vt':
if lemma.endswith('lika'):
word = lemma[0:len(lemma) - 1] + 'oje'
elif number is None and gender is not None:
# cardinal numbers / simple numerals from 1 to 10 inflected for gender and cardinal plurals
if rqrd_gen == 'vyr':
if rqrd_infl == 'V':
if lemma == 'du':
word = lemma
elif lemma == 'trys':
word = lemma
elif lemma == 'keturi' or lemma == 'penki' or lemma == 'šeši' or lemma == 'septyni' \
or lemma == 'aštuoni' or lemma == 'devyni':
word = lemma
elif lemma.endswith('lika'):
word = lemma
elif lemma.endswith('eji'):
word = lemma
elif lemma.endswith('eri'):
word = lemma
if rqrd_infl == 'K':
if lemma == 'du':
word = 'dviejų'
elif lemma == 'trys':
word = 'trijų'
elif lemma == 'keturi' or lemma == 'penki' or lemma == 'šeši' or lemma == 'septyni' \
or lemma == 'aštuoni' or lemma == 'devyni':
word = lemma + 'ų'
elif lemma.endswith('lika'):
word = lemma[0:len(lemma)-1] + 'os'
elif lemma.endswith('eji'):
word = lemma[:-1] + 'ų'
elif lemma.endswith('eri'):
word = lemma + 'ų'
if rqrd_infl == 'N':
if lemma == 'du':
word = 'dviem'
elif lemma == 'trys':
word = 'trims'
elif lemma == 'keturi' or lemma == 'penki' or lemma == 'šeši' or lemma == 'septyni' \
or lemma == 'aštuoni' or lemma == 'devyni':
word = lemma + 'ems'
elif lemma.endswith('lika'):
word = lemma[0:len(lemma)-1] + 'ai'
elif lemma.endswith('eji'):
word = lemma + 'ems'
elif lemma.endswith('eri'):
word = lemma + 'ems'
if rqrd_infl == 'G':
if lemma == 'du':
word = 'du'
elif lemma == 'trys':
word = 'tris'
elif lemma == 'keturi' or lemma == 'penki' or lemma == 'šeši' or lemma == 'septyni' \
or lemma == 'aštuoni' or lemma == 'devyni':
word = lemma + 's'
elif lemma.endswith('lika'):
word = lemma[0:len(lemma)-1] + 'ą'
elif lemma.endswith('eji'):
word = lemma[:-1] + 'us'
elif lemma.endswith('eri'):
word = lemma + 'us'
if rqrd_infl == 'Įn':
if lemma == 'du':
word = 'dviem'
elif lemma == 'trys':
word = 'trimis'
elif lemma == 'keturi' or lemma == 'penki' or lemma == 'šeši' or lemma == 'septyni' \
or lemma == 'aštuoni' or lemma == 'devyni':
word = lemma + 'ais'
elif lemma.endswith('lika'):
word = lemma[0:len(lemma)-1] + 'a'
elif lemma.endswith('eji'):
word = lemma[:-1] + 'ais'
elif lemma.endswith('eri'):
word = lemma + 'ais'
if rqrd_infl == 'Vt':
if lemma == 'du':
word = 'dviejuose'
elif lemma == 'trys':
word = 'trijuose'
elif lemma == 'keturi' or lemma == 'penki' or lemma == 'šeši' or lemma == 'septyni' \
or lemma == 'aštuoni' or lemma == 'devyni':
word = lemma + 'uose'
elif lemma.endswith('lika'):
word = lemma[0:len(lemma)-1] + 'oje'
elif lemma.endswith('eji'):
word = lemma[:-1] + 'uose'
elif lemma.endswith('eri'):
word = lemma + 'uose'
else: # feminine gender
if rqrd_infl == 'V':
if lemma == 'du':
word = 'dvi'
elif lemma == 'trys':
word = 'trys'
elif lemma == 'keturi' or lemma == 'penki' or lemma == 'šeši' or lemma == 'septyni' \
or lemma == 'aštuoni' or lemma == 'devyni':
word = lemma + 'os'
elif lemma.endswith('eji'):
word = lemma[:-1] + 'os'
elif lemma.endswith('eri'):
word = lemma + 'os'
if rqrd_infl == 'K':
if lemma == 'du':
word = 'dviejų'
elif lemma == 'trys':
word = 'trijų'
elif lemma == 'keturi' or lemma == 'penki' or lemma == 'šeši' or lemma == 'septyni' \
or lemma == 'aštuoni' or lemma == 'devyni':
word = lemma + 'ų'
elif lemma.endswith('eji'):
word = lemma[:-1] + 'ų'
elif lemma.endswith('eri'):
word = lemma + 'ų'
if rqrd_infl == 'N':
if lemma == 'du':
word = 'dviem'
elif lemma == 'trys':
word = 'trim'
elif lemma == 'keturi' or lemma == 'penki' or lemma == 'šeši' or lemma == 'septyni' \
or lemma == 'aštuoni' or lemma == 'devyni':
word = lemma + 'oms'
elif lemma.endswith('eji'):
word = lemma[:-1] + 'oms'
elif lemma.endswith('eri'):
word = lemma + 'oms'
if rqrd_infl == 'G':
if lemma == 'du':
word = 'dvi'
elif lemma == 'trys':
word = 'tris'
elif lemma == 'keturi' or lemma == 'penki' or lemma == 'šeši' or lemma == 'septyni' \
or lemma == 'aštuoni' or lemma == 'devyni':
word = lemma + 'as'
elif lemma.endswith('eji'):
word = lemma[:-1] + 'as'
elif lemma.endswith('eri'):
word = lemma + 'as'
if rqrd_infl == 'Įn':
if lemma == 'du':
word = 'dviem'
elif lemma == 'trys':
word = 'trimis'
elif lemma == 'keturi' or lemma == 'penki' or lemma == 'šeši' or lemma == 'septyni' \
or lemma == 'aštuoni' or lemma == 'devyni':
word = lemma + 'omis'
elif lemma.endswith('eji'):
word = lemma[:-1] + 'omis'
elif lemma.endswith('eri'):
word = lemma + 'omis'
if rqrd_infl == 'Vt':
if lemma == 'du':
word = 'dviejose'
elif lemma == 'trys':
word = 'trijose'
elif lemma == 'keturi' or lemma == 'penki' or lemma == 'šeši' or lemma == 'septyni' \
or lemma == 'aštuoni' or lemma == 'devyni':
word = lemma + 'ose'
elif lemma.endswith('eji'):
word = lemma[:-1] + 'ose'
elif lemma.endswith('eri'):
word = lemma + 'ose'
elif rqrd_num == 'vns' or rqrd_num is None:
if rqrd_gen == 'vyr' or (rqrd_gen is None and num.gender != 'mot'):
if rqrd_infl == 'V':
if lemma == 'vienas':
word = lemma
elif lemma.endswith('dešimtis'):
return
elif lemma.endswith('as'):
if num.degree == 'aukšč':
word = lemma[:-2] + 'iausias'
elif num.degree == 'aukšt':
word = lemma[:-2] + 'esnis'
else:
word = lemma
elif lemma == 'tūkstantis':
word = lemma
elif lemma.endswith('is'):
word = lemma
if rqrd_infl == 'K':
if lemma == 'vienas':
word = 'vieno'
elif lemma.endswith('dešimtis'):
return
elif lemma.endswith('as'):
if num.degree == 'aukšč':
word = lemma[:-2] + 'iausio'
elif num.degree == 'aukšt':
word = lemma[:-2] + 'esnio'
else:
word = lemma[:-2] + 'o'
elif lemma == 'tūkstantis':
word = lemma[:-3] + 'čio'
elif lemma.endswith('is'):
word = lemma[:-2] + 'io'
if rqrd_infl == 'N':
if lemma == 'vienas':
word = 'vienam'
elif lemma.endswith('dešimtis'):
return
elif lemma.endswith('etas') or (num.num_type == 'kiek'
and (lemma == 'šimtas' or lemma == 'milijonas' or lemma == 'milijardas')) \
or lemma == 'ketvertas':
word = lemma[:-2] + 'ui'
elif lemma.endswith('as'):
if num.degree == 'aukšč':
word = lemma[:-2] + 'iausiam'
elif num.degree == 'aukšt':
word = lemma[:-2] + 'esniam'
else:
word = lemma[:-2] + 'am'
elif lemma == 'tūkstantis':
word = lemma[:-3] + 'čiui'
elif lemma.endswith('is'):
word = lemma[:-2] + 'iam'
if rqrd_infl == 'G':
if lemma == 'vienas':
word = 'vieną'
elif lemma.endswith('dešimtis'):
return
elif lemma.endswith('as'):
if num.degree == 'aukšč':
word = lemma[:-2] + 'iausią'
elif num.degree == 'aukšt':
word = lemma[:-2] + 'esnį'
else:
word = lemma[:-2] + 'ą'
elif lemma == 'tūkstantis':
word = lemma[:-3] + 'tį'
elif lemma.endswith('is'):
word = lemma[:-2] + 'į'
if rqrd_infl == 'Įn':
if lemma == 'vienas':
word | |
3188, 3189, 260, 2574, 3293, 2695, 3293,
3294, 2805, 3293, 3294, 3295, 2904, 3293, 3294,
3295, 3296, 2992, 3293, 3294, 3295, 3296, 3297,
3069, 3293, 3294, 3295, 3296, 3297, 3298, 3135,
3293, 3294, 3295, 3296, 3297, 3298, 3299, 3190,
3227, 3228, 3229, 3230, 3231, 3232, 3233, 3234,
272, 2575, 3293, 2696, 3293, 3294, 2806, 3293,
3294, 3295, 2905, 3293, 3294, 3295, 3296, 2993,
3293, 3294, 3295, 3296, 3297, 3070, 3293, 3294,
3295, 3296, 3297, 3298, 3136, 3293, 3294, 3295,
3296, 3297, 3298, 3299, 3191, 3293, 3294, 3295,
3296, 3297, 3298, 3299, 1607, 3235, 3260, 3261,
3262, 3263, 3264, 3265, 3266, 3267, 3268, 284,
287, 297, 288, 297, 297, 289, 297, 297,
297, 290, 297, 297, 297, 297, 291, 297,
297, 297, 297, 297, 292, 297, 297, 297,
297, 297, 297, 293, 297, 297, 297, 297,
297, 297, 297, 294, 297, 297, 297, 297,
297, 297, 297, 297, 295, 297, 297, 297,
297, 297, 297, 297, 297, 297, 296, 131,
132, 133, 134, 135, 136, 137, 138, 139,
140, 21, 177, 299, 189, 33, 177, 299,
300, 189, 299, 301, 300, 201, 201, 45,
177, 299, 300, 189, 299, 2336, 300, 301,
301, 201, 299, 302, 300, 302, 302, 301,
213, 213, 213, 57, 177, 299, 300, 189,
299, 2336, 300, 301, 301, 201, 299, 2336,
300, 2337, 2347, 301, 302, 302, 302, 213,
299, 303, 300, 303, 303, 301, 303, 303,
303, 302, 225, 225, 225, 225, 69, 177,
299, 300, 189, 299, 2336, 300, 301, 301,
201, 299, 2336, 300, 2337, 2347, 301, 302,
302, 302, 213, 299, 2336, 300, 2337, 2347,
301, 2338, 2348, 2357, 302, 303, 303, 303,
303, 225, 299, 304, 300, 304, 304, 301,
304, 304, 304, 302, 304, 304, 304, 304,
303, 237, 237, 237, 237, 237, 81, 177,
299, 300, 189, 299, 2336, 300, 301, 301,
201, 299, 2336, 300, 2337, 2347, 301, 302,
302, 302, 213, 299, 2336, 300, 2337, 2347,
301, 2338, 2348, 2357, 302, 303, 303, 303,
303, 225, 299, 2336, 300, 2337, 2347, 301,
2338, 2348, 2357, 302, 2339, 2349, 2358, 2366,
303, 304, 304, 304, 304, 304, 237, 299,
305, 300, 305, 305, 301, 305, 305, 305,
302, 305, 305, 305, 305, 303, 305, 305,
305, 305, 305, 304, 249, 249, 249, 249,
249, 249, 93, 177, 299, 300, 189, 299,
2336, 300, 301, 301, 201, 299, 2336, 300,
2337, 2347, 301, 302, 302, 302, 213, 299,
2336, 300, 2337, 2347, 301, 2338, 2348, 2357,
302, 303, 303, 303, 303, 225, 299, 2336,
300, 2337, 2347, 301, 2338, 2348, 2357, 302,
2339, 2349, 2358, 2366, 303, 304, 304, 304,
304, 304, 237, 299, 2336, 300, 2337, 2347,
301, 2338, 2348, 2357, 302, 2339, 2349, 2358,
2366, 303, 2340, 2350, 2359, 2367, 2374, 304,
305, 305, 305, 305, 305, 305, 249, 299,
306, 300, 306, 306, 301, 306, 306, 306,
302, 306, 306, 306, 306, 303, 306, 306,
306, 306, 306, 304, 306, 306, 306, 306,
306, 306, 305, 261, 261, 261, 261, 261,
261, 261, 105, 177, 299, 300, 189, 299,
2336, 300, 301, 301, 201, 299, 2336, 300,
2337, 2347, 301, 302, 302, 302, 213, 299,
2336, 300, 2337, 2347, 301, 2338, 2348, 2357,
302, 303, 303, 303, 303, 225, 299, 2336,
300, 2337, 2347, 301, 2338, 2348, 2357, 302,
2339, 2349, 2358, 2366, 303, 304, 304, 304,
304, 304, 237, 299, 2336, 300, 2337, 2347,
301, 2338, 2348, 2357, 302, 2339, 2349, 2358,
2366, 303, 2340, 2350, 2359, 2367, 2374, 304,
305, 305, 305, 305, 305, 305, 249, 299,
2336, 300, 2337, 2347, 301, 2338, 2348, 2357,
302, 2339, 2349, 2358, 2366, 303, 2340, 2350,
2359, 2367, 2374, 304, 2341, 2351, 2360, 2368,
2375, 2381, 305, 306, 306, 306, 306, 306,
306, 306, 261, 299, 307, 300, 307, 307,
301, 307, 307, 307, 302, 307, 307, 307,
307, 303, 307, 307, 307, 307, 307, 304,
307, 307, 307, 307, 307, 307, 305, 307,
307, 307, 307, 307, 307, 307, 306, 273,
273, 273, 273, 273, 273, 273, 273, 117,
177, 299, 300, 189, 299, 2336, 300, 301,
301, 201, 299, 2336, 300, 2337, 2347, 301,
302, 302, 302, 213, 299, 2336, 300, 2337,
2347, 301, 2338, 2348, 2357, 302, 303, 303,
303, 303, 225, 299, 2336, 300, 2337, 2347,
301, 2338, 2348, 2357, 302, 2339, 2349, 2358,
2366, 303, 304, 304, 304, 304, 304, 237,
299, 2336, 300, 2337, 2347, 301, 2338, 2348,
2357, 302, 2339, 2349, 2358, 2366, 303, 2340,
2350, 2359, 2367, 2374, 304, 305, 305, 305,
305, 305, 305, 249, 299, 2336, 300, 2337,
2347, 301, 2338, 2348, 2357, 302, 2339, 2349,
2358, 2366, 303, 2340, 2350, 2359, 2367, 2374,
304, 2341, 2351, 2360, 2368, 2375, 2381, 305,
306, 306, 306, 306, 306, 306, 306, 261,
299, 2336, 300, 2337, 2347, 301, 2338, 2348,
2357, 302, 2339, 2349, 2358, 2366, 303, 2340,
2350, 2359, 2367, 2374, 304, 2341, 2351, 2360,
2368, 2375, 2381, 305, 2342, 2352, 2361, 2369,
2376, 2382, 2387, 306, 307, 307, 307, 307,
307, 307, 307, 307, 273, 299, 308, 300,
308, 308, 301, 308, 308, 308, 302, 308,
308, 308, 308, 303, 308, 308, 308, 308,
308, 304, 308, 308, 308, 308, 308, 308,
305, 308, 308, 308, 308, 308, 308, 308,
306, 308, 308, 308, 308, 308, 308, 308,
308, 307, 285, 285, 285, 285, 285, 285,
285, 285, 285, 129, 177, 299, 300, 189,
299, 2336, 300, 301, 301, 201, 299, 2336,
300, 2337, 2347, 301, 302, 302, 302, 213,
299, 2336, 300, 2337, 2347, 301, 2338, 2348,
2357, 302, 303, 303, 303, 303, 225, 299,
2336, 300, 2337, 2347, 301, 2338, 2348, 2357,
302, 2339, 2349, 2358, 2366, 303, 304, 304,
304, 304, 304, 237, 299, 2336, 300, 2337,
2347, 301, 2338, 2348, 2357, 302, 2339, 2349,
2358, 2366, 303, 2340, 2350, 2359, 2367, 2374,
304, 305, 305, 305, 305, 305, 305, 249,
299, 2336, 300, 2337, 2347, 301, 2338, 2348,
2357, 302, 2339, 2349, 2358, 2366, 303, 2340,
2350, 2359, 2367, 2374, 304, 2341, 2351, 2360,
2368, 2375, 2381, 305, 306, 306, 306, 306,
306, 306, 306, 261, 299, 2336, 300, 2337,
2347, 301, 2338, 2348, 2357, 302, 2339, 2349,
2358, 2366, 303, 2340, 2350, 2359, 2367, 2374,
304, 2341, 2351, 2360, 2368, 2375, 2381, 305,
2342, 2352, 2361, 2369, 2376, 2382, 2387, 306,
307, 307, 307, 307, 307, 307, 307, 307,
273, 299, 2336, 300, 2337, 2347, 301, 2338,
2348, 2357, 302, 2339, 2349, 2358, 2366, 303,
2340, 2350, 2359, 2367, 2374, 304, 2341, 2351,
2360, 2368, 2375, 2381, 305, 2342, 2352, 2361,
2369, 2376, 2382, 2387, 306, 2343, 2353, 2362,
2370, 2377, 2383, 2388, 1607, 307, 308, 308,
308, 308, 308, 308, 308, 308, 308, 285,
299, 309, 300, 309, 309, 301, 309, 309,
309, 302, 309, 309, 309, 309, 303, 309,
309, 309, 309, 309, 304, 309, 309, 309,
309, 309, 309, 305, 309, 309, 309, 309,
309, 309, 309, 306, 309, 309, 309, 309,
309, 309, 309, 309, 307, 309, 309, 309,
309, 309, 309, 309, 309, 309, 308, 297,
297, 297, 297, 297, 297, 297, 297, 297,
297, 141, 143, 143, 143, 144, 143, 143,
144, 143, 144, 145, 143, 143, 144, 143,
144, 145, 143, 144, 145, 146, 143, 143,
144, 143, 144, 145, 143, 144, 145, 146,
143, 144, 145, 146, 147, 143, 143, 144,
143, 144, 145, 143, 144, 145, 146, 143,
144, 145, 146, 147, 143, 144, 145, 146,
147, 148, 143, 143, 144, 143, 144, 145,
143, 144, 145, 146, 143, 144, 145, 146,
147, 143, 144, 145, 146, 147, 148, 143,
144, 145, 146, 147, 148, 149, 143, 143,
144, 143, 144, 145, 143, 144, 145, 146,
143, 144, 145, 146, 147, 143, 144, 145,
146, 147, 148, 143, 144, 145, 146, 147,
148, | |
#! /usr/bin/env python
'''
tensorflow==2.5.0+
python3 openvino2tensorflow.py \
--model_path openvino/448x448/FP32/Resnet34_3inputs_448x448_20200609.xml \
--output_saved_model \
--output_pb \
--output_weight_quant_tflite \
--output_float16_quant_tflite \
--output_no_quant_float32_tflite
python3 openvino2tensorflow.py \
--model_path debug/openvino/yolox_nano/320x320/FP32/yolox_nano_320x320.xml \
--output_saved_model \
--output_pb \
--output_no_quant_float32_tflite \
--weight_replacement_config debug/weight_replacement_config_yolox_nano.json
'''
import os
import sys
import argparse
import struct
import numpy as np
from pathlib import Path
import xml.etree.ElementTree as et
import logging
import warnings
from tensorflow.python.framework.ops import _run_using_default_session
from tensorflow.python.keras.backend import ndim
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=Warning)
class Color:
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
WHITE = '\033[37m'
COLOR_DEFAULT = '\033[39m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
INVISIBLE = '\033[08m'
REVERCE = '\033[07m'
BG_BLACK = '\033[40m'
BG_RED = '\033[41m'
BG_GREEN = '\033[42m'
BG_YELLOW = '\033[43m'
BG_BLUE = '\033[44m'
BG_MAGENTA = '\033[45m'
BG_CYAN = '\033[46m'
BG_WHITE = '\033[47m'
BG_DEFAULT = '\033[49m'
RESET = '\033[0m'
def convert(model_path,
model_output_path,
output_saved_model,
output_h5,
output_weight_and_json,
output_pb,
output_no_quant_float32_tflite,
output_dynamic_range_quant_tflite,
output_weight_quant_tflite,
output_float16_quant_tflite,
output_integer_quant_tflite,
output_full_integer_quant_tflite,
output_integer_quant_type,
string_formulas_for_normalization,
calib_ds_type,
ds_name_for_tfds_for_calibration,
split_name_for_tfds_for_calibration,
download_dest_folder_path_for_the_calib_tfds,
tfds_download_flg,
npy_load_default_path,
load_dest_file_path_for_the_calib_npy,
output_tfjs,
output_tftrt_float32,
output_tftrt_float16,
tftrt_maximum_cached_engines,
output_coreml,
output_edgetpu,
edgetpu_compiler_timeout,
edgetpu_num_segments,
output_onnx,
onnx_opset,
use_onnx_optimization,
output_myriad,
vpu_number_of_shaves,
vpu_number_of_cmx_slices,
replace_swish_and_hardswish,
optimizing_hardswish_for_edgetpu,
replace_prelu_and_minmax,
restricted_resize_image_mode,
weight_replacement_config,
use_experimental_new_quantizer,
optimizing_barracuda,
layerids_of_the_terminating_output,
keep_input_tensor_in_nchw):
print(f'{Color.REVERCE}TensorFlow/Keras model building process starts{Color.RESET}', '=' * 38)
import subprocess
import tensorflow as tf
tf.get_logger().setLevel('INFO')
tf.autograph.set_verbosity(0)
tf.get_logger().setLevel(logging.ERROR)
import tensorflow_datasets as tfds
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, AveragePooling2D, Conv2DTranspose, PReLU, Lambda, LeakyReLU, Conv3D
from tensorflow.keras.initializers import Constant
from tensorflow.keras.activations import elu
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from tensorflow.python.framework.ops import EagerTensor
if output_coreml:
import coremltools as ct
import json
import pprint
import math
# for unpacking binary buffer
format_config = {
'FP32' : ['f', 4],
'FP16' : ['e', 2],
'I64' : ['q', 8],
'I32' : ['i', 4],
'I16' : ['h', 2],
'I8' : ['b', 1],
'U8' : ['B', 1],
'BOOL' : ['?', 1]
}
# vino: u8, u16, u32, u64, i8, i16, i32, i64, f16, f32, bf16, boolean
# tf : uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64, bfloat16
# type conversion table
cast_type_ov_tf = {
'u8' : tf.uint8,
'u16' : tf.uint16,
'u32' : tf.uint32,
'u64' : tf.uint64,
'i8' : tf.int8,
'i16' : tf.int16,
'i32' : tf.int32,
'i64' : tf.int64,
'f16' : tf.float16,
'f32' : tf.float32,
'bf16': tf.bfloat16,
'boolean': tf.bool
}
# integer type table
int_type_tf = [
tf.uint8,
tf.uint16,
tf.uint32,
tf.uint64,
tf.int8,
tf.int16,
tf.int32,
tf.int64
]
# pad type conversion table
pad_type_ov_tf = {
'constant' : 'CONSTANT',
'reflect' : 'REFLECT',
'symmetric': 'SYMMETRIC',
'edge' : 'REFLECT'
}
# Read IR weight data
with open(model_path+'.bin', 'rb') as f:
binWeight = f.read()
# Parse IR XML file,
tree = et.parse(model_path+'.xml')
root = tree.getroot()
edges = root.find('edges')
layers = root.find('layers')
tf_layers_dict = {}
tf_edges = {}
tf_inputs = []
tf_outputs = []
layer_id_port_dict = {}
def get_num_of_outputs_per_layer_id(tf_edges):
output_count_by_layer_id_tmp = {}
for key in tf_edges.keys():
key_tmp = key.split(':')[0]
output_count_by_layer_id_tmp.setdefault(key_tmp, {'count': 0, 'layer_id:port': []})
output_count_by_layer_id_tmp[key_tmp]['count'] += 1
output_count_by_layer_id_tmp[key_tmp]['layer_id:port'].append(key)
return output_count_by_layer_id_tmp
def get_bere_layer_type(before_layer):
t = type(tf_layers_dict[before_layer])
if t == np.ndarray:
# Const
return 'const'
else:
try:
return tf_layers_dict[before_layer.split(':')[0]].op.type
except:
# TopK
return 'other'
def get_tf_edges_from(tf_edges, layer_id, edge_index=-1):
if edge_index == -1:
# Add, Concat
layer_list = []
for edge_index in range(len(tf_edges[layer_id])):
before_layer_type = get_bere_layer_type(tf_edges[layer_id][edge_index])
if before_layer_type == 'Split':
layer_list.append(tf_edges[layer_id][edge_index])
elif before_layer_type == 'other':
layer_list.append(tf_edges[layer_id][edge_index])
else:
layer_list.append(tf_edges[layer_id][edge_index].split(':')[0])
return layer_list
else:
# Other
if layer_id in tf_edges:
before_layer_type = get_bere_layer_type(tf_edges[layer_id][edge_index])
else:
for key in tf_edges.keys():
if layer_id in key:
before_layer_type = get_bere_layer_type(tf_edges[key][edge_index])
layer_id = key
break
if before_layer_type == 'Split':
return tf_edges[layer_id][edge_index]
elif before_layer_type == 'other':
return tf_edges[layer_id][edge_index]
else:
return tf_edges[layer_id][edge_index].split(':')[0]
"""
format_version : Format version of weight_replacement_config.
layer_id : ID of the Const layer whose weight/constant parameter is to be swapped.
For example, specify "1123" for layer id="1123" for type="Const" in .xml.
<layer id="1123" name="Decoder/softmax/Reshape_1/Cast_123722_const657_const" type="Const" version="opset1">
<data element_type="i64" offset="7632604" shape="4" size="32"/>
<output>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
replace_mode : "direct" or "npy"
"direct": Specify the values of the Numpy matrix directly in the "values" attribute.
Ignores the values recorded in the .bin file and replaces them with the values specified in "values".
{
"layer_id": "1123",
"replace_mode": "direct",
"values": [
1,
2,
513,
513
]
}
"npy": Load a Numpy binary file with the matrix output by np.save('xyz', a).
The "values" attribute specifies the path to the Numpy binary file.
{
"layer_id": "1125",
"replace_mode": "npy",
"values": "weights/xyz.npy"
}
values : Specify the value or the path to the Numpy binary file to replace the weight/constant value recorded in .bin.
The way to specify is as described in the description of 'replace_mode'.
"""
# Elements for each version of weights_replacement_config
# key = config version
# value = Allowed elements for each version
weights_replacement_config_version_elements = {
1 : ['layer_id', 'replace_mode', 'values'],
2 : ['layer_id', 'type', 'replace_mode', 'values']
}
# Combinations of possible values for type key and replace_mode in weights_replacement_config.
# key = Type name
# value = List of replace_mode
weights_replacement_config_types = {
'Const': ['direct', 'npy'],
'Transpose': ['insert_before', 'insert_after'],
'Reshape': ['insert_before', 'insert_after'],
'Cast': ['insert_before', 'insert_after'],
'Concat': ['change_axis'],
'SoftMax': ['change_axis'],
'ShuffleChannels': ['change_axis'],
'StridedSlice': ['change_attributes'],
'MaxPool': ['change_padding_mode'],
'PReLU': ['change_shared_axes'],
'ReverseSequence': ['change_batch_axis', 'change_seq_axis'],
'Squeeze': ['insert_before', 'insert_after'],
'Unsqueeze': ['insert_before', 'insert_after'],
}
def parse_json(jsonfile_path: str):
"""Parsing weights_replacement_config
Args:
----------
jsonfile_path : str
Path to the weights_replacement_config file
Returns:
----------
format_version : int
Format version number of weights_replacement_config
layers : dict
Result of parsing weights_replacement_config into dict format
"""
j = json.load(open(jsonfile_path))
format_version = j['format_version']
layers = {}
for v in j['layers']:
# Elements check
for k in v.keys():
if not k in weights_replacement_config_version_elements[format_version]:
key_name1 = 'layer_id'
print(f'{Color.RED}ERROR:{Color.RESET} It contains a key that cannot be included in the config with format_version: {format_version}. layer_id: {v[key_name1]}, key: "{k}"')
print(f'{Color.RED}ERROR:{Color.RESET} List of keys to allow in format_version: {format_version}. {weights_replacement_config_version_elements[format_version]}')
sys.exit(-1)
for k in weights_replacement_config_version_elements[format_version]:
if not k in v.keys():
key_name1 = 'layer_id'
print(f'{Color.RED}ERROR:{Color.RESET} Missing elements that must be included in the config for format_version: {format_version}. layer_id: {v[key_name1]}, key: "{k}"')
print(f'{Color.RED}ERROR:{Color.RESET} List of elements that must be included in the config for format_version: {format_version}. {weights_replacement_config_version_elements[format_version]}')
sys.exit(-1)
# weights_replacement_config_types check (Only when format_version is 2 or higher)
if format_version >= 2:
# Type check
if not v['type'] in weights_replacement_config_types.keys():
key_name1 = 'layer_id'
key_name2 = 'type'
print(f'{Color.RED}ERROR:{Color.RESET} It contains a key that cannot be included in the config. layer_id: {v[key_name1]}, type: "{v[key_name2]}"')
print(f'{Color.RED}ERROR:{Color.RESET} List of keys to allow. {weights_replacement_config_types.keys()}')
sys.exit(-1)
# Replace Mode check
if not v['replace_mode'] in weights_replacement_config_types[v['type']]:
key_name1 = 'layer_id'
key_name2 = 'replace_mode'
key_name3 = 'type'
print(f'{Color.RED}ERROR:{Color.RESET} It contains a key that cannot be included in the config. layer_id: {v[key_name1]}, replace_mode: "{v[key_name2]}"')
print(f'{Color.RED}ERROR:{Color.RESET} List of keys to allow. {weights_replacement_config_types[v[key_name3]]}')
sys.exit(-1)
layers[v['layer_id']] = v
print(f'{Color.GREEN}weight_replacement_config format_version:{Color.RESET} {format_version}')
print(f'{Color.GREEN}Replace the value of Const for each layer_id with the value below.{Color.RESET}')
pprint.pprint(layers)
return format_version, layers
format_version = None
wr_config = None
if weight_replacement_config:
format_version, wr_config = parse_json(weight_replacement_config)
def extrapolation_of_layers(setting_up_layers_to_be_extrapolated: dict, input):
"""Processing of input operations based on weights_replacement_config settings
Args:
----------
setting_up_layers_to_be_extrapolated : dict
wr_config[layer_id]
{
"layer_id": "659",
"type": "Transpose",
"replace_mode": "insert_before",
"values": [0,2,1]
}
input : INPUT operation
INPUT layer to be input to TF operations
Returns:
----------
Processed input operations
"""
tf_layer = None
layer_type = setting_up_layers_to_be_extrapolated['type']
param = setting_up_layers_to_be_extrapolated['values']
if layer_type == 'Transpose':
tf_layer = tf.transpose(
input,
perm=param
)
elif layer_type == 'Reshape':
tf_layer = tf.reshape(
input,
shape=param
)
elif layer_type == 'Cast':
tf_layer = tf.cast(
input,
dtype=cast_type_ov_tf[param]
)
elif layer_type == 'Squeeze':
tf_layer = tf.squeeze(
input,
axis=param
)
elif layer_type == 'Unsqueeze':
tf_layer = tf.expand_dims(
input,
axis=param
)
return tf_layer
print(f'{Color.REVERCE}Layer structure{Color.RESET}', '=' * 69)
def layer_structure_print(info: dict) -> None:
for key, value in info.items():
print(f'{Color.GREEN}{key}{Color.RESET}: {value}')
print('=' * 84)
# edges
added_key_list = []
concat_port_list = {}
for edge in edges:
to_layer = edge.attrib['to-layer']
to_layer_port = edge.attrib['to-port']
from_layer = edge.attrib['from-layer']
from_layer_port = edge.attrib['from-port']
for layer in layers:
if layer.attrib['id'] == to_layer:
output_layer_ports = layer.find('output')
if layer.attrib['type'] != 'Result' and len(output_layer_ports) >= 2:
for port in output_layer_ports:
tf_edges.setdefault('{}:{}'.format(to_layer, port.attrib['id']), []).append(from_layer)
added_key_list.append(to_layer)
else:
tf_edges.setdefault(to_layer, [])
if layer.attrib['type'] == 'Concat' or \
layer.attrib['type'] == 'Gather' or \
layer.attrib['type'] == 'GatherND' or \
layer.attrib['type'] == 'GatherElements' or \
layer.attrib['type'] == 'ScatterElementsUpdate' or \
layer.attrib['type'] == 'ScatterNDUpdate' or \
layer.attrib['type'] == 'Reshape' or \
layer.attrib['type'] == | |
GetNAFltI(self, *args):
"""
GetNAFltI(TNEANet self, TStr attr, int const & NId) -> TNEANet::TAFltI
Parameters:
attr: TStr const &
NId: int const &
"""
return _snap.TNEANet_GetNAFltI(self, *args)
def AttrNameNI(self, *args):
"""
AttrNameNI(TNEANet self, TInt NId, TStrV Names)
Parameters:
NId: TInt const &
Names: TStrV &
AttrNameNI(TNEANet self, TInt NId, TStrIntPrH::TIter NodeHI, TStrV Names)
Parameters:
NId: TInt const &
NodeHI: TStrIntPrH::TIter
Names: TStrV &
"""
return _snap.TNEANet_AttrNameNI(self, *args)
def AttrValueNI(self, *args):
"""
AttrValueNI(TNEANet self, TInt NId, TStrV Values)
Parameters:
NId: TInt const &
Values: TStrV &
AttrValueNI(TNEANet self, TInt NId, TStrIntPrH::TIter NodeHI, TStrV Values)
Parameters:
NId: TInt const &
NodeHI: TStrIntPrH::TIter
Values: TStrV &
"""
return _snap.TNEANet_AttrValueNI(self, *args)
def IntAttrNameNI(self, *args):
"""
IntAttrNameNI(TNEANet self, TInt NId, TStrV Names)
Parameters:
NId: TInt const &
Names: TStrV &
IntAttrNameNI(TNEANet self, TInt NId, TStrIntPrH::TIter NodeHI, TStrV Names)
Parameters:
NId: TInt const &
NodeHI: TStrIntPrH::TIter
Names: TStrV &
"""
return _snap.TNEANet_IntAttrNameNI(self, *args)
def IntAttrValueNI(self, *args):
"""
IntAttrValueNI(TNEANet self, TInt NId, TIntV Values)
Parameters:
NId: TInt const &
Values: TIntV &
IntAttrValueNI(TNEANet self, TInt NId, TStrIntPrH::TIter NodeHI, TIntV Values)
Parameters:
NId: TInt const &
NodeHI: TStrIntPrH::TIter
Values: TIntV &
"""
return _snap.TNEANet_IntAttrValueNI(self, *args)
def StrAttrNameNI(self, *args):
"""
StrAttrNameNI(TNEANet self, TInt NId, TStrV Names)
Parameters:
NId: TInt const &
Names: TStrV &
StrAttrNameNI(TNEANet self, TInt NId, TStrIntPrH::TIter NodeHI, TStrV Names)
Parameters:
NId: TInt const &
NodeHI: TStrIntPrH::TIter
Names: TStrV &
"""
return _snap.TNEANet_StrAttrNameNI(self, *args)
def StrAttrValueNI(self, *args):
"""
StrAttrValueNI(TNEANet self, TInt NId, TStrV Values)
Parameters:
NId: TInt const &
Values: TStrV &
StrAttrValueNI(TNEANet self, TInt NId, TStrIntPrH::TIter NodeHI, TStrV Values)
Parameters:
NId: TInt const &
NodeHI: TStrIntPrH::TIter
Values: TStrV &
"""
return _snap.TNEANet_StrAttrValueNI(self, *args)
def FltAttrNameNI(self, *args):
"""
FltAttrNameNI(TNEANet self, TInt NId, TStrV Names)
Parameters:
NId: TInt const &
Names: TStrV &
FltAttrNameNI(TNEANet self, TInt NId, TStrIntPrH::TIter NodeHI, TStrV Names)
Parameters:
NId: TInt const &
NodeHI: TStrIntPrH::TIter
Names: TStrV &
"""
return _snap.TNEANet_FltAttrNameNI(self, *args)
def FltAttrValueNI(self, *args):
"""
FltAttrValueNI(TNEANet self, TInt NId, TFltV & Values)
Parameters:
NId: TInt const &
Values: TFltV &
FltAttrValueNI(TNEANet self, TInt NId, TStrIntPrH::TIter NodeHI, TFltV & Values)
Parameters:
NId: TInt const &
NodeHI: TStrIntPrH::TIter
Values: TFltV &
"""
return _snap.TNEANet_FltAttrValueNI(self, *args)
def AttrNameEI(self, *args):
"""
AttrNameEI(TNEANet self, TInt EId, TStrV Names)
Parameters:
EId: TInt const &
Names: TStrV &
AttrNameEI(TNEANet self, TInt EId, TStrIntPrH::TIter EdgeHI, TStrV Names)
Parameters:
EId: TInt const &
EdgeHI: TStrIntPrH::TIter
Names: TStrV &
"""
return _snap.TNEANet_AttrNameEI(self, *args)
def AttrValueEI(self, *args):
"""
AttrValueEI(TNEANet self, TInt EId, TStrV Values)
Parameters:
EId: TInt const &
Values: TStrV &
AttrValueEI(TNEANet self, TInt EId, TStrIntPrH::TIter EdgeHI, TStrV Values)
Parameters:
EId: TInt const &
EdgeHI: TStrIntPrH::TIter
Values: TStrV &
"""
return _snap.TNEANet_AttrValueEI(self, *args)
def IntAttrNameEI(self, *args):
"""
IntAttrNameEI(TNEANet self, TInt EId, TStrV Names)
Parameters:
EId: TInt const &
Names: TStrV &
IntAttrNameEI(TNEANet self, TInt EId, TStrIntPrH::TIter EdgeHI, TStrV Names)
Parameters:
EId: TInt const &
EdgeHI: TStrIntPrH::TIter
Names: TStrV &
"""
return _snap.TNEANet_IntAttrNameEI(self, *args)
def IntAttrValueEI(self, *args):
"""
IntAttrValueEI(TNEANet self, TInt EId, TIntV Values)
Parameters:
EId: TInt const &
Values: TIntV &
IntAttrValueEI(TNEANet self, TInt EId, TStrIntPrH::TIter EdgeHI, TIntV Values)
Parameters:
EId: TInt const &
EdgeHI: TStrIntPrH::TIter
Values: TIntV &
"""
return _snap.TNEANet_IntAttrValueEI(self, *args)
def StrAttrNameEI(self, *args):
"""
StrAttrNameEI(TNEANet self, TInt EId, TStrV Names)
Parameters:
EId: TInt const &
Names: TStrV &
StrAttrNameEI(TNEANet self, TInt EId, TStrIntPrH::TIter EdgeHI, TStrV Names)
Parameters:
EId: TInt const &
EdgeHI: TStrIntPrH::TIter
Names: TStrV &
"""
return _snap.TNEANet_StrAttrNameEI(self, *args)
def StrAttrValueEI(self, *args):
"""
StrAttrValueEI(TNEANet self, TInt EId, TStrV Values)
Parameters:
EId: TInt const &
Values: TStrV &
StrAttrValueEI(TNEANet self, TInt EId, TStrIntPrH::TIter EdgeHI, TStrV Values)
Parameters:
EId: TInt const &
EdgeHI: TStrIntPrH::TIter
Values: TStrV &
"""
return _snap.TNEANet_StrAttrValueEI(self, *args)
def FltAttrNameEI(self, *args):
"""
FltAttrNameEI(TNEANet self, TInt EId, TStrV Names)
Parameters:
EId: TInt const &
Names: TStrV &
FltAttrNameEI(TNEANet self, TInt EId, TStrIntPrH::TIter EdgeHI, TStrV Names)
Parameters:
EId: TInt const &
EdgeHI: TStrIntPrH::TIter
Names: TStrV &
"""
return _snap.TNEANet_FltAttrNameEI(self, *args)
def FltAttrValueEI(self, *args):
"""
FltAttrValueEI(TNEANet self, TInt EId, TFltV & Values)
Parameters:
EId: TInt const &
Values: TFltV &
FltAttrValueEI(TNEANet self, TInt EId, TStrIntPrH::TIter EdgeHI, TFltV & Values)
Parameters:
EId: TInt const &
EdgeHI: TStrIntPrH::TIter
Values: TFltV &
"""
return _snap.TNEANet_FltAttrValueEI(self, *args)
def GetEAIntI(self, *args):
"""
GetEAIntI(TNEANet self, TStr attr, int const & EId) -> TNEANet::TAIntI
Parameters:
attr: TStr const &
EId: int const &
"""
return _snap.TNEANet_GetEAIntI(self, *args)
def GetEAStrI(self, *args):
"""
GetEAStrI(TNEANet self, TStr attr, int const & EId) -> TNEANet::TAStrI
Parameters:
attr: TStr const &
EId: int const &
"""
return _snap.TNEANet_GetEAStrI(self, *args)
def GetEAFltI(self, *args):
"""
GetEAFltI(TNEANet self, TStr attr, int const & EId) -> TNEANet::TAFltI
Parameters:
attr: TStr const &
EId: int const &
"""
return _snap.TNEANet_GetEAFltI(self, *args)
def GetMxNId(self):
"""
GetMxNId(TNEANet self) -> int
Parameters:
self: TNEANet const *
"""
return _snap.TNEANet_GetMxNId(self)
def GetEdges(self):
"""
GetEdges(TNEANet self) -> int
Parameters:
self: TNEANet const *
"""
return _snap.TNEANet_GetEdges(self)
def AddEdge(self, *args):
"""
AddEdge(TNEANet self, int const & SrcNId, int const & DstNId, int EId=-1) -> int
Parameters:
SrcNId: int const &
DstNId: int const &
EId: int
AddEdge(TNEANet self, int const & SrcNId, int const & DstNId) -> int
Parameters:
SrcNId: int const &
DstNId: int const &
AddEdge(TNEANet self, TNEANet::TEdgeI const & EdgeI) -> int
Parameters:
EdgeI: TNEANet::TEdgeI const &
"""
return _snap.TNEANet_AddEdge(self, *args)
def DelEdge(self, *args):
"""
DelEdge(TNEANet self, int const & EId)
Parameters:
EId: int const &
DelEdge(TNEANet self, int const & SrcNId, int const & DstNId, bool const & IsDir=True)
Parameters:
SrcNId: int const &
DstNId: int const &
IsDir: bool const &
DelEdge(TNEANet self, int const & SrcNId, int const & DstNId)
Parameters:
SrcNId: int const &
DstNId: int const &
"""
return _snap.TNEANet_DelEdge(self, *args)
def IsEdge(self, *args):
"""
IsEdge(TNEANet self, int const & EId) -> bool
Parameters:
EId: int const &
IsEdge(TNEANet self, int const & SrcNId, int const & DstNId, bool const & IsDir=True) -> bool
Parameters:
SrcNId: int const &
DstNId: int const &
IsDir: bool const &
IsEdge(TNEANet self, int const & SrcNId, int const & DstNId) -> bool
Parameters:
SrcNId: int const &
DstNId: int const &
IsEdge(TNEANet self, int const & SrcNId, int const & DstNId, int & EId, bool const & IsDir=True) -> bool
Parameters:
SrcNId: int const &
DstNId: int const &
EId: int &
IsDir: bool const &
IsEdge(TNEANet self, int const & SrcNId, int const & DstNId, int & EId) -> bool
Parameters:
SrcNId: int const &
DstNId: int const &
EId: int &
"""
return _snap.TNEANet_IsEdge(self, *args)
def GetEId(self, *args):
"""
GetEId(TNEANet self, int const & SrcNId, int const & DstNId) -> int
Parameters:
SrcNId: int const &
DstNId: int const &
"""
return _snap.TNEANet_GetEId(self, *args)
def GetEI(self, *args):
"""
GetEI(TNEANet self, int const & SrcNId, int const & DstNId) -> TNEANet::TEdgeI
Parameters:
SrcNId: int const &
DstNId: int const &
"""
return _snap.TNEANet_GetEI(self, *args)
def GetRndNId(self, *args):
"""
GetRndNId(TNEANet self, TRnd Rnd=Rnd) -> int
Parameters:
Rnd: TRnd &
GetRndNId(TNEANet self) -> int
Parameters:
self: TNEANet *
"""
return _snap.TNEANet_GetRndNId(self, *args)
def GetRndNI(self, *args):
"""
GetRndNI(TNEANet self, TRnd Rnd=Rnd) -> TNEANet::TNodeI
Parameters:
Rnd: TRnd &
GetRndNI(TNEANet self) -> TNEANet::TNodeI
Parameters:
self: TNEANet *
"""
return _snap.TNEANet_GetRndNI(self, *args)
def GetRndEId(self, *args):
"""
GetRndEId(TNEANet self, TRnd Rnd=Rnd) -> int
Parameters:
Rnd: TRnd &
GetRndEId(TNEANet self) -> int
Parameters:
self: TNEANet *
"""
return _snap.TNEANet_GetRndEId(self, *args)
def GetRndEI(self, *args):
"""
GetRndEI(TNEANet self, TRnd Rnd=Rnd) -> TNEANet::TEdgeI
Parameters:
Rnd: TRnd &
GetRndEI(TNEANet self) -> TNEANet::TEdgeI
Parameters:
self: TNEANet *
"""
return _snap.TNEANet_GetRndEI(self, *args)
def GetNIdV(self, *args):
"""
GetNIdV(TNEANet self, TIntV NIdV)
Parameters:
NIdV: TIntV &
"""
return _snap.TNEANet_GetNIdV(self, *args)
def GetEIdV(self, *args):
"""
GetEIdV(TNEANet self, TIntV EIdV)
Parameters:
EIdV: TIntV &
"""
return _snap.TNEANet_GetEIdV(self, *args)
def Empty(self):
"""
Empty(TNEANet self) -> bool
Parameters:
self: TNEANet const *
"""
return _snap.TNEANet_Empty(self)
def Clr(self):
"""
Clr(TNEANet self)
Parameters:
self: TNEANet *
"""
return _snap.TNEANet_Clr(self)
def Reserve(self, *args):
"""
Reserve(TNEANet self, int const & Nodes, int const & | |
# Eliminando colunas adicionais
X = X.drop(['ae', 'aggreg_dict'], axis=1)
return X
# Definindo transformador para RMS Energy
class RMSEnergy(BaseEstimator, TransformerMixin):
"""
Classe responsável por extrair a raíz da energia média quadrática de sinais de áudio
considerando agregados estatísticos pré definidos.
Parâmetros
----------
:param frame_size: quantidade de amostrar por enquadramento do sinal [type: int]
:param hop_length: parâmetro de overlapping de quadros do sinal [type: int]
:param signal_col: referência da coluna de armazenamento do sinal na base [type: string, default='signal']
:param feature_aggreg: lista de agregadores estatísticos aplicados após a extração da features
*default=['mean', 'median', 'std', 'var', 'max', 'min']
Retorno
-------
:return X: base de dados contendo os agregados estatísticos para a raíz da energia média quadrática [type: pd.DataFrame]
Aplicação
---------
rms_extractor = RMSEnergy(frame_size=FRAME_SIZE, hop_length=HOP_LENGTH,
signal_col='signal', feature_aggreg=FEATURE_AGGREG)
X_rms = rms_extractor.fit_transform(X)
"""
def __init__(self, frame_size, hop_length, signal_col='signal',
feature_aggreg=['mean', 'median', 'std', 'var', 'max', 'min']):
self.frame_size = frame_size
self.hop_length = hop_length
self.signal_col = signal_col
self.feature_aggreg = feature_aggreg
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Extraindo feature para cada sinal
X['rms_engy'] = X[self.signal_col].apply(lambda x: librosa.feature.rms(x, frame_length=self.frame_size,
hop_length=self.hop_length)[0])
# Criando dicionário com agregações
X['aggreg_dict'] = X['rms_engy'].apply(lambda x: pd.DataFrame(x).agg(self.feature_aggreg))
# Extraindo agregações e enriquecendo dataset
for agg in self.feature_aggreg:
X['rms_engy_' + agg] = X['aggreg_dict'].apply(lambda x: x[0][agg])
# Eliminando colunas adicionais
X = X.drop(['rms_engy', 'aggreg_dict'], axis=1)
return X
# Definindo transformador para Zero Crossing Rate
class ZeroCrossingRate(BaseEstimator, TransformerMixin):
"""
Classe responsável por extrair a taxa de cruzamento de zero de sinais de áudio
considerando agregados estatísticos pré definidos.
Parâmetros
----------
:param frame_size: quantidade de amostrar por enquadramento do sinal [type: int]
:param hop_length: parâmetro de overlapping de quadros do sinal [type: int]
:param signal_col: referência da coluna de armazenamento do sinal na base [type: string, default='signal']
:param feature_aggreg: lista de agregadores estatísticos aplicados após a extração da features
*default=['mean', 'median', 'std', 'var', 'max', 'min']
Retorno
-------
:return X: base de dados contendo os agregados estatísticos para a taxa de cruzamento de zero [type: pd.DataFrame]
Aplicação
---------
zcr_extractor = ZeroCrossingRate(frame_size=FRAME_SIZE, hop_length=HOP_LENGTH,
signal_col='signal', feature_aggreg=FEATURE_AGGREG)
X_zcr = zcr_extractor.fit_transform(X)
"""
def __init__(self, frame_size, hop_length, signal_col='signal',
feature_aggreg=['mean', 'median', 'std', 'var', 'max', 'min']):
self.frame_size = frame_size
self.hop_length = hop_length
self.signal_col = signal_col
self.feature_aggreg = feature_aggreg
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Extraindo feature para cada sinal
X['zcr'] = X[self.signal_col].apply(lambda x: librosa.feature.zero_crossing_rate(x, frame_length=self.frame_size,
hop_length=self.hop_length)[0])
# Criando dicionário com agregações
X['aggreg_dict'] = X['zcr'].apply(lambda x: pd.DataFrame(x).agg(self.feature_aggreg))
# Extraindo agregações e enriquecendo dataset
for agg in self.feature_aggreg:
X['zcr_' + agg] = X['aggreg_dict'].apply(lambda x: x[0][agg])
# Eliminando colunas adicionais
X = X.drop(['zcr', 'aggreg_dict'], axis=1)
return X
# Definindo transformador para BER
class BandEnergyRatio(BaseEstimator, TransformerMixin):
"""
Classe responsável por extrair a taxa de energia de banda de sinais de áudio
considerando agregados estatísticos pré definidos.
Parâmetros
----------
:param frame_size: quantidade de amostrar por enquadramento do sinal [type: int]
:param hop_length: parâmetro de overlapping de quadros do sinal [type: int]
:param split_freq: frequência de separação entre altas e baixas frequências [type: int]
:param sr: taxa de amostragem do sinal de áudio [type: int]
:param signal_col: referência da coluna de armazenamento do sinal na base [type: string, default='signal']
:param feature_aggreg: lista de agregadores estatísticos aplicados após a extração da features
*default=['mean', 'median', 'std', 'var', 'max', 'min']
Retorno
-------
:return X: base de dados contendo os agregados estatísticos para a taxa de energia de banda [type: pd.DataFrame]
Aplicação
---------
ber_extractor = BandEnergyRatio(frame_size=FRAME_SIZE, hop_length=HOP_LENGTH,
signal_col='signal', feature_aggreg=FEATURE_AGGREG)
X_ber = ber_extractor.fit_transform(X)
"""
def __init__(self, frame_size, hop_length, split_freq, sr, signal_col='signal',
feature_aggreg=['mean', 'median', 'std', 'var', 'max', 'min']):
self.frame_size = frame_size
self.hop_length = hop_length
self.split_freq = split_freq
self.sr = sr
self.signal_col = signal_col
self.feature_aggreg = feature_aggreg
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Calculando espectrograma dos sinais
X['spec'] = X[self.signal_col].apply(lambda x: librosa.stft(y=x, n_fft=self.frame_size,
hop_length=self.hop_length))
# Calculando BER
X['ber'] = X['spec'].apply(lambda x: calc_ber(spec=x, split_freq=self.split_freq, sr=self.sr))
# Criando dicionário com agregações
X['aggreg_dict'] = X['ber'].apply(lambda x: pd.DataFrame(x).agg(self.feature_aggreg))
# Extraindo agregações e enriquecendo dataset
for agg in self.feature_aggreg:
X['ber_' + agg] = X['aggreg_dict'].apply(lambda x: x[0][agg])
# Eliminando colunas adicionais
X = X.drop(['spec', 'ber', 'aggreg_dict'], axis=1)
return X
# Definindo transformador para Spectral Centroid
class SpectralCentroid(BaseEstimator, TransformerMixin):
"""
Classe responsável por extrair o centroide espectral de sinais de áudio
considerando agregados estatísticos pré definidos.
Parâmetros
----------
:param frame_size: quantidade de amostrar por enquadramento do sinal [type: int]
:param hop_length: parâmetro de overlapping de quadros do sinal [type: int]
:param sr: taxa de amostragem do sinal de áudio [type: int]
:param signal_col: referência da coluna de armazenamento do sinal na base [type: string, default='signal']
:param feature_aggreg: lista de agregadores estatísticos aplicados após a extração da features
*default=['mean', 'median', 'std', 'var', 'max', 'min']
Retorno
-------
:return X: base de dados contendo os agregados estatísticos para o centroide espectral [type: pd.DataFrame]
Aplicação
---------
sc_extractor = SpectralCentroid(frame_size=FRAME_SIZE, hop_length=HOP_LENGTH,
signal_col='signal', feature_aggreg=FEATURE_AGGREG)
X_sc = sc_extractor.fit_transform(X)
"""
def __init__(self, frame_size, hop_length, sr, signal_col='signal',
feature_aggreg=['mean', 'median', 'std', 'var', 'max', 'min']):
self.frame_size = frame_size
self.hop_length = hop_length
self.sr = sr
self.signal_col = signal_col
self.feature_aggreg = feature_aggreg
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Calculando feature
X['sc'] = X[self.signal_col].apply(lambda x: librosa.feature.spectral_centroid(y=x, sr=self.sr,
n_fft=self.frame_size,
hop_length=self.hop_length)[0])
# Criando dicionário com agregações
X['aggreg_dict'] = X['sc'].apply(lambda x: pd.DataFrame(x).agg(self.feature_aggreg))
# Extraindo agregações e enriquecendo dataset
for agg in self.feature_aggreg:
X['sc_' + agg] = X['aggreg_dict'].apply(lambda x: x[0][agg])
# Eliminando colunas adicionais
X = X.drop(['sc', 'aggreg_dict'], axis=1)
return X
# Definindo transformador para BandWidth
class BandWidth(BaseEstimator, TransformerMixin):
"""
Classe responsável por extrair a largura de banda de sinais de áudio
considerando agregados estatísticos pré definidos.
Parâmetros
----------
:param frame_size: quantidade de amostrar por enquadramento do sinal [type: int]
:param hop_length: parâmetro de overlapping de quadros do sinal [type: int]
:param sr: taxa de amostragem do sinal de áudio [type: int]
:param signal_col: referência da coluna de armazenamento do sinal na base [type: string, default='signal']
:param feature_aggreg: lista de agregadores estatísticos aplicados após a extração da features
*default=['mean', 'median', 'std', 'var', 'max', 'min']
Retorno
-------
:return X: base de dados contendo os agregados estatísticos para a largura de banda [type: pd.DataFrame]
Aplicação
---------
bw_extractor = BandWidth(frame_size=FRAME_SIZE, hop_length=HOP_LENGTH,
signal_col='signal', feature_aggreg=FEATURE_AGGREG)
X_bw = bw_extractor.fit_transform(X)
"""
def __init__(self, frame_size, hop_length, sr, signal_col='signal',
feature_aggreg=['mean', 'median', 'std', 'var', 'max', 'min']):
self.frame_size = frame_size
self.hop_length = hop_length
self.sr = sr
self.signal_col = signal_col
self.feature_aggreg = feature_aggreg
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Calculando feature
X['bw'] = X[self.signal_col].apply(lambda x: librosa.feature.spectral_bandwidth(y=x, sr=self.sr,
n_fft=self.frame_size,
hop_length=self.hop_length)[0])
# Criando dicionário com agregações
X['aggreg_dict'] = X['bw'].apply(lambda x: pd.DataFrame(x).agg(self.feature_aggreg))
# Extraindo agregações e enriquecendo dataset
for agg in self.feature_aggreg:
X['bw_' + agg] = X['aggreg_dict'].apply(lambda x: x[0][agg])
# Eliminando colunas adicionais
X = X.drop(['bw', 'aggreg_dict'], axis=1)
return X
# Definindo transformador para agregação de espectrograma em grupos
class GroupSpecAggreg(BaseEstimator, TransformerMixin):
"""
Classe responsável por extrair a potência espectral de altas e baixas frequências
de sinais de áudio considerando agregados estatísticos pré definidos.
Parâmetros
----------
:param frame_size: quantidade de amostrar por enquadramento do sinal [type: int]
:param hop_length: parâmetro de overlapping de quadros do sinal [type: int]
:param sr: taxa de amostragem do sinal de áudio [type: int]
:param split_freq: frequência de separação entre altas e baixas frequências [type: int]
:param freq_cat_aggreg: agregador aplicado no agrupamento das potências [type: int, default='sum']
:param signal_col: referência da coluna de armazenamento do sinal na base [type: string, default='signal']
:param feature_aggreg: lista de agregadores estatísticos aplicados após a extração da features
*default=['mean', 'median', 'std', 'var', 'max', 'min']
Retorno
-------
:return X: base de dados contendo os agregados estatísticos para a potência espectral agrupada [type: pd.DataFrame]
Aplicação
---------
spec_extractor = GroupSpecAggreg(frame_size=FRAME_SIZE, hop_length=HOP_LENGTH,
signal_col='signal', feature_aggreg=FEATURE_AGGREG)
X_spec = spec_extractor.fit_transform(X)
"""
def __init__(self, frame_size, hop_length, sr, split_freq, freq_cat_aggreg='sum',
signal_col='signal', | |
"""
SLURM batch system interface module.
"""
# TODO: Check if there are any bugfixes to the bash SLURM back-end scripts which has not ported to this SLURM python module.
from __future__ import absolute_import
import os, sys, time, re
import arc
from .common.cancel import cancel
from .common.config import Config, configure, is_conf_setter
from .common.proc import execute_local, execute_remote
from .common.log import debug, verbose, info, warn, error, ArcError
from .common.lrmsinfo import LRMSInfo
from .common.scan import *
from .common.ssh import ssh_connect
from .common.submit import *
@is_conf_setter
def set_slurm(cfg):
"""
Set SLURM specific :py:data:`~lrms.common.Config` attributes.
:param cfg: parsed arc.conf
:type cfg: :py:class:`ConfigParser.ConfigParser`
"""
Config.slurm_bin_path = str(cfg.get('lrms', 'slurm_bin_path')).strip('"') if \
cfg.has_option('lrms', 'slurm_bin_path') else '/usr/bin'
Config.slurm_wakeupperiod = int(cfg.get('lrms', 'slurm_wakeupperiod').strip('"')) if \
cfg.has_option('lrms', 'slurm_wakeupperiod') else 30
#---------------------
# Submit methods
#---------------------
def Submit(config, jobdesc):
"""
Submits a job to the SLURM queue specified in arc.conf. This method executes the required
RunTimeEnvironment scripts and assembles the bash job script. The job script is
written to file and submitted with ``sbatch``.
:param str config: path to arc.conf
:param jobdesc: job description object
:type jobdesc: :py:class:`arc.JobDescription`
:return: local job ID if successfully submitted, else ``None``
:rtype: :py:obj:`str`
"""
configure(config, set_slurm)
validate_attributes(jobdesc)
if Config.remote_host:
ssh_connect(Config.remote_host, Config.remote_user, Config.private_key)
# Run RTE stage0
debug('----- starting slurmSubmitter.py -----', 'slurm.Submit')
RTE_stage0(jobdesc, 'SLURM', SBATCH_ACCOUNT = 'OtherAttributes.SBATCH_ACCOUNT')
set_grid_global_jobid(jobdesc)
# Create script file and write job script
jobscript = get_job_script(jobdesc)
script_file = write_script_file(jobscript)
debug('Created file %s' % script_file, 'slurm.Submit')
debug('SLURM jobname: %s' % jobdesc.Identification.JobName, 'slurm.Submit')
debug('SLURM job script built', 'slurm.Submit')
debug('----------------- BEGIN job script -----', 'slurm.Submit')
emptylines = 0
for line in jobscript.split('\n'):
if not line:
emptylines += 1
else:
debug(emptylines*'\n' + line.replace("%", "%%"), 'slurm.Submit')
emptylines = 0
if emptylines > 1:
debug((emptylines-1)*'\n', 'slurm.Submit')
debug('----------------- END job script -----', 'slurm.Submit')
if 'ONLY_WRITE_JOBSCRIPT' in os.environ and os.environ['ONLY_WRITE_JOBSCRIPT'] == 'yes':
return "-1"
#######################################
# Submit the job
######################################
execute = execute_local if not Config.remote_host else execute_remote
directory = jobdesc.OtherAttributes['joboption;directory']
debug('Session directory: %s' % directory, 'slurm.Submit')
SLURM_TRIES = 0
handle = None
while SLURM_TRIES < 10:
args = '%s/sbatch %s' % (Config.slurm_bin_path, script_file)
verbose('Executing \'%s\' on %s' %
(args, Config.remote_host if Config.remote_host else 'localhost'), 'slurm.Submit')
handle = execute(args)
if handle.returncode == 0:
break
if handle.returncode == 198 or wait_for_queue(handle):
debug('Waiting for queue to decrease', 'slurm.Submit')
time.sleep(60)
SLURM_TRIES += 1
continue
break # Other error than full queue
if handle.returncode == 0:
# TODO: Test what happens when the jobqueue is full or when the slurm
# ctld is not responding. SLURM 1.x and 2.2.x outputs the jobid into
# STDERR and STDOUT respectively. Concat them, and let sed sort it out.
# From the exit code we know that the job was submitted, so this
# is safe. <NAME> <<EMAIL>> 1.5.2011
localid = get_job_id(handle)
if localid:
debug('Job submitted successfully!', 'slurm.Submit')
debug('Local job id: ' + localid, 'slurm.Submit')
debug('----- exiting submitSubmitter.py -----', 'slurm.Submit')
return localid
debug('job *NOT* submitted successfully!', 'slurm.Submit')
debug('got error code from sbatch: %d !' % handle.returncode, 'slurm.Submit')
debug('Output is:\n' + ''.join(handle.stdout), 'slurm.Submit')
debug('Error output is:\n' + ''.join(handle.stderr), 'slurm.Submit')
debug('----- exiting slurmSubmitter.py -----', 'slurm.Submit')
def wait_for_queue(handle):
"""
Read from ``sbatch`` output whether the queue is full.
:param object handle: sbatch handle
:return: ``True`` if queue is full, else ``False``
:rtype: :py:obj:`bool`
"""
for f in (handle.stdout, handle.stderr):
for line in f:
if ("maximum number of jobs" in line or
# A rare SLURM error, but may cause chaos in the
# information/accounting system
"unable to accept job" in line):
return True
return False
def get_job_id(handle):
"""
Read local job ID from ``sbatch`` output.
:param object handle: sbatch handle
:return: local job ID if found, else ``None``
:rtype: :py:obj:`str`
"""
for f in (handle.stdout, handle.stderr):
for line in f:
match = re.search(r'Submitted batch job (\d+)', line)
if match:
return match.group(1)
error('Job ID not found in stdout or stderr', 'slurm.Submit')
def get_job_script(jobdesc):
"""
Assemble bash job script for a SLURM host.
:param jobdesc: job description object
:type jobdesc: :py:class:`arc.JobDescription`
:return: job script
:rtype: :py:obj:`str`
"""
set_req_mem(jobdesc)
# TODO: Maybe change way in which JobDescriptionParserSLURM is loaded.
jobscript = JobscriptAssemblerSLURM(jobdesc).assemble()
if not jobscript:
raise ArcError('Unable to assemble SLURM job option', 'slurm.Submit')
return jobscript
#---------------------
# Cancel methods
#---------------------
def Cancel(config, jobid):
"""
Cancel a job running at a SLURM host with ``scancel``.
:param str config: path to arc.conf
:param str jobid: local job ID
:return: ``True`` if successfully cancelled, else ``False``
:rtype: :py:obj:`bool`
"""
verify_job_id(jobid)
configure(config, set_slurm)
cmd = '%s/%s' % (Config.slurm_bin_path, 'scancel')
return cancel([cmd, jobid], jobid)
def verify_job_id(jobid):
"""
Verify that the job ID is an integer else raise :py:class:`~lrms.common.common.ArcError`.
:param str jobid: local job ID
"""
try:
int(jobid)
except:
raise ArcError('Job ID is not set, or it contains non-numeric characters (%s)' % jobid, 'slurm.Cancel')
#---------------------
# Scan methods
#---------------------
def Scan(config, ctr_dirs):
"""
Query the SLURM host for all jobs in /[controldir]/processing with ``squeue``.
If the job has stopped running, more detailed information is fetched with ``scontrol``,
and the diagnostics and comments files are updated. Finally ``gm-kick`` is executed
on all jobs with an exit code.
:param str config: path to arc.conf
:param ctr_dirs: list of paths to control directories
:type ctr_dirs: :py:obj:`list` [ :py:obj:`str` ... ]
"""
configure(config, set_slurm)
if Config.scanscriptlog:
scanlogfile = arc.common.LogFile(Config.scanscriptlog)
arc.common.Logger_getRootLogger().addDestination(scanlogfile)
arc.common.Logger_getRootLogger().setThreshold(Config.log_threshold)
jobs = get_jobs(ctr_dirs)
if not jobs: return
if Config.remote_host:
# NOTE: Assuming 256 B of TCP window needed for each job (squeue)
ssh_connect(Config.remote_host, Config.remote_user, Config.private_key, (2 << 7)*len(jobs))
execute = execute_local if not Config.remote_host else execute_remote
args = Config.slurm_bin_path + '/squeue -a -h -o %i:%T -t all -j ' + ','.join(jobs.keys())
if '__SLURM_TEST' in os.environ:
handle = execute(args, env=dict(os.environ))
else:
handle = execute(args)
if handle.returncode != 0:
debug('Got error code %i from squeue' % handle.returncode, 'slurm.Scan')
debug('Error output is:\n' + ''.join(handle.stderr), 'slurm.Scan')
# Slurm can report StartTime and EndTime in at least these two formats:
# 2010-02-15T15:30:29 (MDS)
# 02/15-15:25:15
# Python does not support duplicate named groups.
# Have to use separate regex if we want to use named groups.
date_MDS = re.compile(r'^(?P<YYYY>\d\d\d\d)-(?P<mm>\d\d)-(?P<dd>\d\d)T(?P<HH>\d\d):(?P<MM>\d\d):(?P<SS>\d\d)$')
date_2 = re.compile(r'^(?P<mm>\d\d)/(?P<dd>\d\d)-(?P<HH>\d\d):(?P<MM>\d\d):(?P<SS>\d\d)$')
for line in handle.stdout:
try:
localid, state = line.strip().split(':', 1)
except:
if line:
warn('Failed to parse squeue line: ' + line, 'slurm.Scan')
continue
job = jobs[localid]
job.state = state
if job.state in ['PENDING','RUNNING','SUSPENDED','COMPLETING']:
continue
if not job.state:
set_exit_code_from_diag(job)
job.message = MESSAGES.get(job.state, '')
args = Config.slurm_bin_path + '/scontrol -o show job %s' % localid
scontrol_handle = execute(args)
if scontrol_handle.returncode != 0:
debug('Got error code %i from scontrol' % scontrol_handle.returncode, 'slurm.Scan')
debug('Error output is:\n' + ''.join(scontrol_handle.stderr), 'slurm.Scan')
try:
scontrol_dict = dict(item.split('=', 1) for item in re.split(' (?=[^ =]+=)', scontrol_handle.stdout[0]))
job = jobs[scontrol_dict['JobId']]
except:
warn('Failed to parse scontrol line: ' + line, 'slurm.Scan')
continue
if 'ExitCode' in scontrol_dict:
ec1, ec2 = scontrol_dict['ExitCode'].split(':')
job.exitcode = int(ec2) + 256 if int(ec2) != 0 else int(ec1)
else:
job.exitcode = 0 if state == 'COMPLETED' else -1
if (state == 'NODE_FAIL' or state == 'CANCELLED') and ('ExitCode' not in scontrol_dict or job.exitcode == 0):
job.exitcode = 15
job.message = 'Job was cancelled by SLURM'
if 'StartTime' in scontrol_dict:
match = date_MDS.match(scontrol_dict['StartTime']) or date_2.match(scontrol_dict['StartTime'])
scontrol_dict['StartTime'] = get_MDS(match.groupdict())
job.LRMSStartTime = arc.common.Time(scontrol_dict['StartTime'])
if 'EndTime' in scontrol_dict:
match = date_MDS.match(scontrol_dict['EndTime']) or date_2.match(scontrol_dict['EndTime'])
scontrol_dict['EndTime'] = get_MDS(match.groupdict())
job.LRMSEndTime = arc.common.Time(scontrol_dict['EndTime'])
if 'StartTime' in scontrol_dict and 'EndTime' in scontrol_dict:
job.WallTime = job.LRMSEndTime - job.LRMSStartTime
if 'NumCPUs' in scontrol_dict:
job.Processors = scontrol_dict['NumCPUs']
with open(job.lrms_done_file, 'w') as f:
f.write('%d %s\n' % (job.exitcode, job.message))
write_comments(job)
update_diag(job)
kicklist = [job for job in jobs.values() if job.state not in ['PENDING','RUNNING','SUSPENDED','COMPLETING']]
kicklist.extend([job for job in jobs.values() if job.state == 'CANCELLED']) # kick twice
gm_kick(kicklist)
def get_lrms_options_schema():
return LRMSInfo.get_lrms_options_schema(slurm_bin_path = '*')
def get_lrms_info(options):
if sys.version_info[0] >= 3:
# Perl::Inline::Python passes text input as bytes objects in Python 3
# Convert them to str objects since this is what ARC is using
def convert(input):
if isinstance(input, dict):
return dict((convert(key), convert(value)) for key, value in input.items())
elif isinstance(input, list):
return [convert(element) for element in input]
elif isinstance(input, bytes):
return input.decode()
else:
return input
options = convert(options)
si = SLURMInfo(options)
si.read_config()
si.read_partitions()
si.read_jobs()
si.read_nodes()
si.read_cpuinfo()
si.cluster_info()
for qkey, qval in options['queues'].items():
if | |
noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_object_device_ip_port_port_get(self, ip, port, **kwargs): # noqa: E501
"""api_v1_object_device_ip_port_port_get # noqa: E501
Returns a row from the device_port table # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_object_device_ip_port_port_get(ip, port, async_req=True)
>>> result = thread.get()
:param ip: Canonical IP of the Device. Use Search methods to find this. (required)
:type ip: str
:param port: Name of the port. Use the \".../device/{ip}/ports\" method to find these. (required)
:type port: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Port
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_object_device_ip_port_port_get_with_http_info(ip, port, **kwargs) # noqa: E501
def api_v1_object_device_ip_port_port_get_with_http_info(self, ip, port, **kwargs): # noqa: E501
"""api_v1_object_device_ip_port_port_get # noqa: E501
Returns a row from the device_port table # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_object_device_ip_port_port_get_with_http_info(ip, port, async_req=True)
>>> result = thread.get()
:param ip: Canonical IP of the Device. Use Search methods to find this. (required)
:type ip: str
:param port: Name of the port. Use the \".../device/{ip}/ports\" method to find these. (required)
:type port: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Port, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'ip',
'port'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_object_device_ip_port_port_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'ip' is set
if self.api_client.client_side_validation and ('ip' not in local_var_params or # noqa: E501
local_var_params['ip'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `ip` when calling `api_v1_object_device_ip_port_port_get`") # noqa: E501
# verify the required parameter 'port' is set
if self.api_client.client_side_validation and ('port' not in local_var_params or # noqa: E501
local_var_params['port'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `port` when calling `api_v1_object_device_ip_port_port_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'ip' in local_var_params:
path_params['ip'] = local_var_params['ip'] # noqa: E501
if 'port' in local_var_params:
path_params['port'] = local_var_params['port'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader'] # noqa: E501
response_types_map = {
200: "Port",
}
return self.api_client.call_api(
'/api/v1/object/device/{ip}/port/{port}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_object_device_ip_port_port_last_node_get(self, ip, port, **kwargs): # noqa: E501
"""api_v1_object_device_ip_port_port_last_node_get # noqa: E501
Returns the related last_node table entry for a given port # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_object_device_ip_port_port_last_node_get(ip, port, async_req=True)
>>> result = thread.get()
:param ip: Canonical IP of the Device. Use Search methods to find this. (required)
:type ip: str
:param port: Name of the port. Use the \".../device/{ip}/ports\" method to find these. (required)
:type port: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.api_v1_object_device_ip_port_port_last_node_get_with_http_info(ip, port, **kwargs) # noqa: E501
def api_v1_object_device_ip_port_port_last_node_get_with_http_info(self, ip, port, **kwargs): # noqa: E501
"""api_v1_object_device_ip_port_port_last_node_get # noqa: E501
Returns the related last_node table entry for a given port # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_object_device_ip_port_port_last_node_get_with_http_info(ip, port, async_req=True)
>>> result = thread.get()
:param ip: Canonical IP of the Device. Use Search methods to find this. (required)
:type ip: str
:param port: Name of the port. Use the \".../device/{ip}/ports\" method to find these. (required)
:type port: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'ip',
'port'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method api_v1_object_device_ip_port_port_last_node_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'ip' is set
if self.api_client.client_side_validation and ('ip' not in local_var_params or # noqa: E501
local_var_params['ip'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `ip` when calling `api_v1_object_device_ip_port_port_last_node_get`") # noqa: E501
# verify the required parameter 'port' is set
if self.api_client.client_side_validation and ('port' not in local_var_params or # noqa: E501
local_var_params['port'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `port` when calling `api_v1_object_device_ip_port_port_last_node_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'ip' in local_var_params:
path_params['ip'] = local_var_params['ip'] # noqa: E501
if 'port' in local_var_params:
path_params['port'] = local_var_params['port'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['APIKeyHeader'] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/api/v1/object/device/{ip}/port/{port}/last_node', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def api_v1_object_device_ip_port_port_logs_get(self, ip, port, **kwargs): # noqa: E501
"""api_v1_object_device_ip_port_port_logs_get # noqa: E501
Returns logs rows for a given port # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_v1_object_device_ip_port_port_logs_get(ip, port, async_req=True)
>>> result = thread.get()
:param ip: Canonical IP of the Device. Use Search methods to find this. (required)
:type ip: str
:param port: Name of the port. Use the \".../device/{ip}/ports\" method to find these. (required)
:type port: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if | |
self.transaction_visibility = transaction_visibility
self.trust_proxy = trust_proxy
self.trust_unknown_certs = trust_unknown_certs
self.versions = versions
@classmethod
def from_dict(cls, _dict: Dict) -> 'GetPublicSettingsResponse':
"""Initialize a GetPublicSettingsResponse object from a json dictionary."""
args = {}
if 'ACTIVITY_TRACKER_PATH' in _dict:
args['activity_tracker_path'] = _dict.get('ACTIVITY_TRACKER_PATH')
if 'ATHENA_ID' in _dict:
args['athena_id'] = _dict.get('ATHENA_ID')
if 'AUTH_SCHEME' in _dict:
args['auth_scheme'] = _dict.get('AUTH_SCHEME')
if 'CALLBACK_URI' in _dict:
args['callback_uri'] = _dict.get('CALLBACK_URI')
if 'CLUSTER_DATA' in _dict:
args['cluster_data'] = GetPublicSettingsResponseCLUSTERDATA.from_dict(_dict.get('CLUSTER_DATA'))
if 'CONFIGTXLATOR_URL' in _dict:
args['configtxlator_url'] = _dict.get('CONFIGTXLATOR_URL')
if 'CRN' in _dict:
args['crn'] = GetPublicSettingsResponseCRN.from_dict(_dict.get('CRN'))
if 'CRN_STRING' in _dict:
args['crn_string'] = _dict.get('CRN_STRING')
if 'CSP_HEADER_VALUES' in _dict:
args['csp_header_values'] = _dict.get('CSP_HEADER_VALUES')
if 'DB_SYSTEM' in _dict:
args['db_system'] = _dict.get('DB_SYSTEM')
if 'DEPLOYER_URL' in _dict:
args['deployer_url'] = _dict.get('DEPLOYER_URL')
if 'DOMAIN' in _dict:
args['domain'] = _dict.get('DOMAIN')
if 'ENVIRONMENT' in _dict:
args['environment'] = _dict.get('ENVIRONMENT')
if 'FABRIC_CAPABILITIES' in _dict:
args['fabric_capabilities'] = GetPublicSettingsResponseFABRICCAPABILITIES.from_dict(_dict.get('FABRIC_CAPABILITIES'))
if 'FEATURE_FLAGS' in _dict:
args['feature_flags'] = _dict.get('FEATURE_FLAGS')
if 'FILE_LOGGING' in _dict:
args['file_logging'] = GetPublicSettingsResponseFILELOGGING.from_dict(_dict.get('FILE_LOGGING'))
if 'HOST_URL' in _dict:
args['host_url'] = _dict.get('HOST_URL')
if 'IAM_CACHE_ENABLED' in _dict:
args['iam_cache_enabled'] = _dict.get('IAM_CACHE_ENABLED')
if 'IAM_URL' in _dict:
args['iam_url'] = _dict.get('IAM_URL')
if 'IBM_ID_CALLBACK_URL' in _dict:
args['ibm_id_callback_url'] = _dict.get('IBM_ID_CALLBACK_URL')
if 'IGNORE_CONFIG_FILE' in _dict:
args['ignore_config_file'] = _dict.get('IGNORE_CONFIG_FILE')
if 'INACTIVITY_TIMEOUTS' in _dict:
args['inactivity_timeouts'] = GetPublicSettingsResponseINACTIVITYTIMEOUTS.from_dict(_dict.get('INACTIVITY_TIMEOUTS'))
if 'INFRASTRUCTURE' in _dict:
args['infrastructure'] = _dict.get('INFRASTRUCTURE')
if 'LANDING_URL' in _dict:
args['landing_url'] = _dict.get('LANDING_URL')
if 'LOGIN_URI' in _dict:
args['login_uri'] = _dict.get('LOGIN_URI')
if 'LOGOUT_URI' in _dict:
args['logout_uri'] = _dict.get('LOGOUT_URI')
if 'MAX_REQ_PER_MIN' in _dict:
args['max_req_per_min'] = _dict.get('MAX_REQ_PER_MIN')
if 'MAX_REQ_PER_MIN_AK' in _dict:
args['max_req_per_min_ak'] = _dict.get('MAX_REQ_PER_MIN_AK')
if 'MEMORY_CACHE_ENABLED' in _dict:
args['memory_cache_enabled'] = _dict.get('MEMORY_CACHE_ENABLED')
if 'PORT' in _dict:
args['port'] = _dict.get('PORT')
if 'PROXY_CACHE_ENABLED' in _dict:
args['proxy_cache_enabled'] = _dict.get('PROXY_CACHE_ENABLED')
if 'PROXY_TLS_FABRIC_REQS' in _dict:
args['proxy_tls_fabric_reqs'] = _dict.get('PROXY_TLS_FABRIC_REQS')
if 'PROXY_TLS_HTTP_URL' in _dict:
args['proxy_tls_http_url'] = _dict.get('PROXY_TLS_HTTP_URL')
if 'PROXY_TLS_WS_URL' in _dict:
args['proxy_tls_ws_url'] = _dict.get('PROXY_TLS_WS_URL')
if 'REGION' in _dict:
args['region'] = _dict.get('REGION')
if 'SESSION_CACHE_ENABLED' in _dict:
args['session_cache_enabled'] = _dict.get('SESSION_CACHE_ENABLED')
if 'TIMEOUTS' in _dict:
args['timeouts'] = _dict.get('TIMEOUTS')
if 'TIMESTAMPS' in _dict:
args['timestamps'] = SettingsTimestampData.from_dict(_dict.get('TIMESTAMPS'))
if 'TRANSACTION_VISIBILITY' in _dict:
args['transaction_visibility'] = _dict.get('TRANSACTION_VISIBILITY')
if 'TRUST_PROXY' in _dict:
args['trust_proxy'] = _dict.get('TRUST_PROXY')
if 'TRUST_UNKNOWN_CERTS' in _dict:
args['trust_unknown_certs'] = _dict.get('TRUST_UNKNOWN_CERTS')
if 'VERSIONS' in _dict:
args['versions'] = GetPublicSettingsResponseVERSIONS.from_dict(_dict.get('VERSIONS'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a GetPublicSettingsResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'activity_tracker_path') and self.activity_tracker_path is not None:
_dict['ACTIVITY_TRACKER_PATH'] = self.activity_tracker_path
if hasattr(self, 'athena_id') and self.athena_id is not None:
_dict['ATHENA_ID'] = self.athena_id
if hasattr(self, 'auth_scheme') and self.auth_scheme is not None:
_dict['AUTH_SCHEME'] = self.auth_scheme
if hasattr(self, 'callback_uri') and self.callback_uri is not None:
_dict['CALLBACK_URI'] = self.callback_uri
if hasattr(self, 'cluster_data') and self.cluster_data is not None:
_dict['CLUSTER_DATA'] = self.cluster_data.to_dict()
if hasattr(self, 'configtxlator_url') and self.configtxlator_url is not None:
_dict['CONFIGTXLATOR_URL'] = self.configtxlator_url
if hasattr(self, 'crn') and self.crn is not None:
_dict['CRN'] = self.crn.to_dict()
if hasattr(self, 'crn_string') and self.crn_string is not None:
_dict['CRN_STRING'] = self.crn_string
if hasattr(self, 'csp_header_values') and self.csp_header_values is not None:
_dict['CSP_HEADER_VALUES'] = self.csp_header_values
if hasattr(self, 'db_system') and self.db_system is not None:
_dict['DB_SYSTEM'] = self.db_system
if hasattr(self, 'deployer_url') and self.deployer_url is not None:
_dict['DEPLOYER_URL'] = self.deployer_url
if hasattr(self, 'domain') and self.domain is not None:
_dict['DOMAIN'] = self.domain
if hasattr(self, 'environment') and self.environment is not None:
_dict['ENVIRONMENT'] = self.environment
if hasattr(self, 'fabric_capabilities') and self.fabric_capabilities is not None:
_dict['FABRIC_CAPABILITIES'] = self.fabric_capabilities.to_dict()
if hasattr(self, 'feature_flags') and self.feature_flags is not None:
_dict['FEATURE_FLAGS'] = self.feature_flags
if hasattr(self, 'file_logging') and self.file_logging is not None:
_dict['FILE_LOGGING'] = self.file_logging.to_dict()
if hasattr(self, 'host_url') and self.host_url is not None:
_dict['HOST_URL'] = self.host_url
if hasattr(self, 'iam_cache_enabled') and self.iam_cache_enabled is not None:
_dict['IAM_CACHE_ENABLED'] = self.iam_cache_enabled
if hasattr(self, 'iam_url') and self.iam_url is not None:
_dict['IAM_URL'] = self.iam_url
if hasattr(self, 'ibm_id_callback_url') and self.ibm_id_callback_url is not None:
_dict['IBM_ID_CALLBACK_URL'] = self.ibm_id_callback_url
if hasattr(self, 'ignore_config_file') and self.ignore_config_file is not None:
_dict['IGNORE_CONFIG_FILE'] = self.ignore_config_file
if hasattr(self, 'inactivity_timeouts') and self.inactivity_timeouts is not None:
_dict['INACTIVITY_TIMEOUTS'] = self.inactivity_timeouts.to_dict()
if hasattr(self, 'infrastructure') and self.infrastructure is not None:
_dict['INFRASTRUCTURE'] = self.infrastructure
if hasattr(self, 'landing_url') and self.landing_url is not None:
_dict['LANDING_URL'] = self.landing_url
if hasattr(self, 'login_uri') and self.login_uri is not None:
_dict['LOGIN_URI'] = self.login_uri
if hasattr(self, 'logout_uri') and self.logout_uri is not None:
_dict['LOGOUT_URI'] = self.logout_uri
if hasattr(self, 'max_req_per_min') and self.max_req_per_min is not None:
_dict['MAX_REQ_PER_MIN'] = self.max_req_per_min
if hasattr(self, 'max_req_per_min_ak') and self.max_req_per_min_ak is not None:
_dict['MAX_REQ_PER_MIN_AK'] = self.max_req_per_min_ak
if hasattr(self, 'memory_cache_enabled') and self.memory_cache_enabled is not None:
_dict['MEMORY_CACHE_ENABLED'] = self.memory_cache_enabled
if hasattr(self, 'port') and self.port is not None:
_dict['PORT'] = self.port
if hasattr(self, 'proxy_cache_enabled') and self.proxy_cache_enabled is not None:
_dict['PROXY_CACHE_ENABLED'] = self.proxy_cache_enabled
if hasattr(self, 'proxy_tls_fabric_reqs') and self.proxy_tls_fabric_reqs is not None:
_dict['PROXY_TLS_FABRIC_REQS'] = self.proxy_tls_fabric_reqs
if hasattr(self, 'proxy_tls_http_url') and self.proxy_tls_http_url is not None:
_dict['PROXY_TLS_HTTP_URL'] = self.proxy_tls_http_url
if hasattr(self, 'proxy_tls_ws_url') and self.proxy_tls_ws_url is not None:
_dict['PROXY_TLS_WS_URL'] = self.proxy_tls_ws_url
if hasattr(self, 'region') and self.region is not None:
_dict['REGION'] = self.region
if hasattr(self, 'session_cache_enabled') and self.session_cache_enabled is not None:
_dict['SESSION_CACHE_ENABLED'] = self.session_cache_enabled
if hasattr(self, 'timeouts') and self.timeouts is not None:
_dict['TIMEOUTS'] = self.timeouts
if hasattr(self, 'timestamps') and self.timestamps is not None:
_dict['TIMESTAMPS'] = self.timestamps.to_dict()
if hasattr(self, 'transaction_visibility') and self.transaction_visibility is not None:
_dict['TRANSACTION_VISIBILITY'] = self.transaction_visibility
if hasattr(self, 'trust_proxy') and self.trust_proxy is not None:
_dict['TRUST_PROXY'] = self.trust_proxy
if hasattr(self, 'trust_unknown_certs') and self.trust_unknown_certs is not None:
_dict['TRUST_UNKNOWN_CERTS'] = self.trust_unknown_certs
if hasattr(self, 'versions') and self.versions is not None:
_dict['VERSIONS'] = self.versions.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this GetPublicSettingsResponse object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'GetPublicSettingsResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'GetPublicSettingsResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class GetPublicSettingsResponseCLUSTERDATA():
"""
GetPublicSettingsResponseCLUSTERDATA.
:attr str type: (optional) Indicates whether this is a paid or free IBP console.
"""
def __init__(self,
*,
type: str = None) -> None:
"""
Initialize a GetPublicSettingsResponseCLUSTERDATA object.
:param str type: (optional) Indicates whether this is a paid or free IBP
console.
"""
self.type = type
@classmethod
def from_dict(cls, _dict: Dict) -> 'GetPublicSettingsResponseCLUSTERDATA':
"""Initialize a GetPublicSettingsResponseCLUSTERDATA object from a json dictionary."""
args = {}
if 'type' in _dict:
args['type'] = _dict.get('type')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a GetPublicSettingsResponseCLUSTERDATA object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this GetPublicSettingsResponseCLUSTERDATA object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'GetPublicSettingsResponseCLUSTERDATA') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'GetPublicSettingsResponseCLUSTERDATA') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class GetPublicSettingsResponseCRN():
"""
metadata about the IBM Cloud service instance. [Only populated if using IBM Cloud].
:attr str account_id: (optional)
:attr str c_name: (optional)
:attr str c_type: (optional)
:attr str instance_id: (optional)
:attr str location: (optional)
:attr str resource_id: (optional)
:attr str resource_type: (optional)
:attr str service_name: (optional)
:attr str version: (optional)
"""
def __init__(self,
*,
account_id: str = None,
c_name: str = None,
c_type: str = None,
instance_id: str = None,
location: str = None,
resource_id: str = None,
resource_type: str = None,
service_name: str = None,
version: str = None) -> None:
"""
Initialize a GetPublicSettingsResponseCRN object.
:param str account_id: (optional)
:param str c_name: (optional)
:param str c_type: (optional)
:param str instance_id: (optional)
:param str location: (optional)
:param str resource_id: (optional)
:param str resource_type: (optional)
:param str service_name: (optional)
:param str version: (optional)
"""
self.account_id = account_id
self.c_name = c_name
self.c_type = c_type
self.instance_id = instance_id
self.location = location
self.resource_id = resource_id
self.resource_type = resource_type
self.service_name = service_name
self.version = version
@classmethod
def from_dict(cls, _dict: Dict) -> 'GetPublicSettingsResponseCRN':
"""Initialize a GetPublicSettingsResponseCRN object from a json dictionary."""
args = {}
if 'account_id' in _dict:
args['account_id'] = _dict.get('account_id')
if 'c_name' in _dict:
args['c_name'] = _dict.get('c_name')
if 'c_type' | |
#!/usr/bin/env python
import collections
# import itertools
import numpy as np
# from sklearn import linear_model as linear # for VAR
# from .utils import sliding_window as window
# from .utils.distance import kmeans, dists_sq
# from .utils import distance as dist
# from python import compress
# ================================================================ shifts lut
SHIFT_PAIRS_16 = [
(7, 1), # ~0 - .5 = ~-.5
(3, 1), # .125 - .5 = -.375
(2, 1), # .25 - .5 = -.25
# (4, 2), # .0625 - .25 = -.1875
(3, 2), # .125 - .5 = -.125
(4, 3), # .0625 - .125 = -.0625
(0, 0), # 1 - 1 = 0
(3, 4), # .125 - .0625 = .0625
(2, 3), # .25 - .125 - .125
(2, 4), # .25 - .0625 = .1875
(1, 2), # .5 - .25 = .25
(1, 3), # .5 - .125 = .375
(0, 1), # 1 - .5 = .5
(0, 2), # 1 - .25 = .75
(0, 3), # 1 - .125 = .875
(0, 4), # 1 - .0625 = .9375
(0, 7), # 1 - ~0 = ~1
]
# should be equivalent to `all_shifts(max_shift=5, omit_duplicates=True)`
# EDIT: wait, no, not true because we have shifts of 7 at the ends
SHIFT_PAIRS_26 = [
(7, 1), # ~0 - .5 = ~-.5
(5, 1), # .0625 - .5 = -.46875 # added
(4, 1), # .0625 - .5 = -.4375 # added, max 4
(3, 1), # .125 - .5 = -.375
(2, 1), # .25 - .5 = -.25
(5, 2), # .03125- .25 = -.21875
(4, 2), # .0625 - .25 = -.1875 # added, max 4
(3, 2), # .125 - .25 = -.125
(5, 3), # .03125- .125 = -.09375 # added
(4, 3), # .0625 - .125 = -.0625
(5, 4), # .03125- .0625 = -.03125 # added
(0, 0), # 1 - 1 = 0
(4, 5), # .0625 - .03125= .03125
(3, 4), # .125 - .0625 = .0625
(3, 5), # .125 - .03125= .09375 # added
(2, 3), # .25 - .125 - .125
(2, 4), # .25 - .0625 = .1875
(2, 5), # .25 - .03125= .21875 # added
(1, 2), # .5 - .25 = .25
(1, 3), # .5 - .125 = .375
(1, 4), # .5 - .0625 = .4375 # added, max 4
(1, 5), # .5 - .03125= .46875 # added
(0, 1), # 1 - .5 = .5
(0, 2), # 1 - .25 = .75
(0, 3), # 1 - .125 = .875
(0, 4), # 1 - .0625 = .9375
(0, 5), # 1 - .03125= .96875 # added
(0, 7), # 1 - ~0 = ~1
]
def all_shifts(max_shift=-1, omit_duplicates=True):
vals = {}
nbits = 8
x = 1 << nbits # reference val; 256 for nbits
if max_shift < 0:
max_shift = nbits - 1
if omit_duplicates:
vals[(0, 0)] = 0
for a in range(max_shift + 1):
for b in range(max_shift + 1):
if omit_duplicates and a == b:
continue
vals[(a, b)] = (x >> a) - (x >> b)
keys, coeffs = list(zip(*list(vals.items())))
keys = np.array(keys)
coeffs = np.array(coeffs)
order = np.argsort(coeffs)
# print "shift results:"
# print keys[order]
# print coeffs[order]
return keys[order], coeffs[order]
# okay, looks like (according to test immediately below) these values are
# identical to what's in our existing LUT; this makes sense given that impls
# are basically identical
def _i16_for_shifts(pos_shift, neg_shift, nbits=8):
start_val = 1 << nbits # 256 for nbits = 8
return (start_val >> pos_shift) - (start_val >> neg_shift)
# TODO actual unit test
def _test_shift_coeffs(nbits=8):
shifts, shift_coeffs = all_shifts()
for (pos_shift, neg_shift), coeff in zip(shifts, shift_coeffs):
assert _i16_for_shifts(pos_shift, neg_shift) == coeff
for val in range(-128, 128):
two_shifts_val = (val >> pos_shift) - (val >> neg_shift)
# ya, this fails; multiply and rshift != using shifts directly
# assert (val * coeff) >> nbits == two_shifts_val
# this way works; requires two multiplies though...
pos_coef = 1 << (nbits - pos_shift)
neg_coef = 1 << (nbits - neg_shift)
pos = (val * pos_coef) >> nbits
neg = (val * neg_coef) >> nbits
assert pos - neg == two_shifts_val
# this way also fails
# pos = val * pos_coef
# neg = val * neg_coef
# assert (pos - neg) >> nbits == two_shifts_val
# def coeff_lut():
# """create lookup table `T` such that `T[coeff]` yields the two indices
# whose associated coefficients are immediately above and below `coeff`"""
# shifts, shift_coeffs = all_shifts()
SHIFTS, SHIFT_COEFFS = all_shifts()
# ================================================================ funcs
def binary_search(array, val):
M = len(array)
first = 0
middle = int(M / 2)
last = M - 1
while (first <= last):
middle_val = array[middle]
if middle_val < val:
first = middle + 1
elif middle_val == val:
return middle
else: # middle_val > val
last = middle - 1
middle = int((first + last) / 2)
return middle
class OnlineRegressor(object):
def __init__(self, block_sz=8, verbose=0, method='linreg',
shifts=SHIFTS, shift_coeffs=SHIFT_COEFFS, numbits=8, ntaps=1):
# self.prev0 = 0
# self.prev1 = 0
# self.mod = 1 << nbits
# self.shift0 = 0
# self.shift1 = 1
self.block_sz = block_sz
self.verbose = verbose
self.method = method
self.shifts = shifts
self.shift_coeffs = shift_coeffs
self.numbits = numbits
self.ntaps = ntaps
self.last_val = 0
self.last_delta = 0
self.coef = 0
self.coef = 256
self.counter = 0
# self.counter = 256 << (1 + self.numbits - 8) # TODO indirect to learning rate, not just 1 # noqa
# self.counter = 8 << 1 # equivalent to adding 8 to round to nearest?
# self.counter = self.coef
self.t = 0
self.grad_counter = 0
self.offset = 0
self.offset_counter = 0
shift_by = (1 + self.numbits - 8)
self.coeffs = np.zeros(self.ntaps, dtype=np.int32) + 256
self.counters = np.zeros(self.ntaps, dtype=np.int32) + (256 << shift_by)
# self.approx_256_over_x = 1
self.Sxy = 0
self.Sxx = 0
self.errs = []
# print "using shifts, coeffs:"
# print shifts
# print shift_coeffs
# for logging
# self.best_idx_offset_counts = np.zeros(3, dtype=np.int64)
self.best_idx_counts = np.zeros(len(self.shifts), dtype=np.int64)
# counts_len = len(self.shifts) if method == 'linreg' else 512
# self.best_idx_counts = np.zeros(counts_len, dtype=np.int64)
self.best_coef_counts = collections.Counter()
self.best_offset_counts = collections.Counter()
def feed_group(self, group):
pass # TODO determine optimal filter here
# errhat = a*x0 - b*x0 - a*x1 + b*x1
# = a(x0 - x1) + b(x1 - x0)
# = c(x0 - x1), where c = (a - b)
#
# we should compute c, and find shifts (which correspond to a, b) that
# approximate it well; also note that errhat is prediction of the delta
#
# this is just linear regression between (x0 - x1) and new val, with
# some extra logic at the end to get shifts based on regression coeff
# deltas; these are our target variable
deltas = np.zeros(group.size, dtype=group.dtype)
deltas[1:] = group[1:] - group[:-1]
deltas[0] = group[0] - self.last_val
self.last_val = group[-1]
# deltas from previous time step; these are our indep variable
diffs = np.zeros(group.size, dtype=group.dtype)
diffs[1:] = deltas[:-1]
diffs[0] = self.last_delta
self.last_delta = deltas[-1]
x = diffs
y = deltas
# linear regression
if self.method == 'linreg':
Sxy = np.sum(x * y)
Sxx = np.sum(x * x)
# print "x, y dtypes: ", x.dtype, y.dtype
# print "Sxx, Sxy dtypes: ", Sxx.dtype, Sxy.dtype
coeff = (Sxy << 8) / Sxx # shift to mirror what we'll need to do in C
idx = binary_search(self.shift_coeffs, coeff)
def compute_errs(x, y, shifts):
predictions = (x >> shifts[0]) - (x >> shifts[1])
return y - predictions
# These are commented out because, empirically, they're
# *never* chosen
#
# best_idx_offset = 0
#
# def compute_total_cost(errs, block_sz=self.block_sz):
# raw_costs = compress.nbits_cost(errs)
# block_costs_rows = raw_costs.reshape(-1, block_sz)
# block_costs = np.max(block_costs_rows, axis=1)
# return np.sum(block_costs)
#
# cost = compute_total_cost(errs)
# if idx > 0:
# errs2 = compute_errs(x, y, SHIFTS[idx - 1])
# cost2 = compute_total_cost(errs)
# if | |
into json.
subtask_status = {subtask_id: (SubtaskStatus.create(subtask_id)).to_dict() for subtask_id in subtask_id_list}
subtask_dict = {
'total': num_subtasks,
'succeeded': 0,
'failed': 0,
'status': subtask_status
}
entry.subtasks = json.dumps(subtask_dict)
# and save the entry immediately, before any subtasks actually start work:
entry.save_now()
return task_progress
# lint-amnesty, # pylint: disable=bad-option-value
def queue_subtasks_for_query(
entry,
action_name,
create_subtask_fcn,
item_querysets,
item_fields,
items_per_task,
total_num_items,
):
"""
Generates and queues subtasks to each execute a chunk of "items" generated by a queryset.
Arguments:
`entry` : the InstructorTask object for which subtasks are being queued.
`action_name` : a past-tense verb that can be used for constructing readable status messages.
`create_subtask_fcn` : a function of two arguments that constructs the desired kind of subtask object.
Arguments are the list of items to be processed by this subtask, and a SubtaskStatus
object reflecting initial status (and containing the subtask's id).
`item_querysets` : a list of query sets that define the "items" that should be passed to subtasks.
`item_fields` : the fields that should be included in the dict that is returned.
These are in addition to the 'pk' field.
`items_per_task` : maximum size of chunks to break each query chunk into for use by a subtask.
`total_num_items` : total amount of items that will be put into subtasks
Returns: the task progress as stored in the InstructorTask object.
"""
task_id = entry.task_id
# Calculate the number of tasks that will be created, and create a list of ids for each task.
total_num_subtasks = _get_number_of_subtasks(total_num_items, items_per_task)
subtask_id_list = [str(uuid4()) for _ in range(total_num_subtasks)]
# Update the InstructorTask with information about the subtasks we've defined.
TASK_LOG.info(
"Task %s: updating InstructorTask %s with subtask info for %s subtasks to process %s items.",
task_id,
entry.id,
total_num_subtasks,
total_num_items,
)
# Make sure this is committed to database before handing off subtasks to celery.
with outer_atomic():
progress = initialize_subtask_info(entry, action_name, total_num_items, subtask_id_list)
# Construct a generator that will return the recipients to use for each subtask.
# Pass in the desired fields to fetch for each recipient.
item_list_generator = _generate_items_for_subtask(
item_querysets,
item_fields,
total_num_items,
items_per_task,
total_num_subtasks,
entry.course_id,
)
# Now create the subtasks, and start them running.
TASK_LOG.info(
"Task %s: creating %s subtasks to process %s items.",
task_id,
total_num_subtasks,
total_num_items,
)
num_subtasks = 0
for item_list in item_list_generator:
subtask_id = subtask_id_list[num_subtasks]
num_subtasks += 1
subtask_status = SubtaskStatus.create(subtask_id)
new_subtask = create_subtask_fcn(item_list, subtask_status)
TASK_LOG.info(
"Queueing BulkEmail Task: %s Subtask: %s at timestamp: %s",
task_id, subtask_id, datetime.now()
)
new_subtask.apply_async()
# Subtasks have been queued so no exceptions should be raised after this point.
# Return the task progress as stored in the InstructorTask object.
return progress
def _acquire_subtask_lock(task_id):
"""
Mark the specified task_id as being in progress.
This is used to make sure that the same task is not worked on by more than one worker
at the same time. This can occur when tasks are requeued by Celery in response to
loss of connection to the task broker. Most of the time, such duplicate tasks are
run sequentially, but they can overlap in processing as well.
Returns true if the task_id was not already locked; false if it was.
"""
# cache.add fails if the key already exists
key = f"subtask-{task_id}"
succeeded = cache.add(key, 'true', SUBTASK_LOCK_EXPIRE)
if not succeeded:
TASK_LOG.warning("task_id '%s': already locked. Contains value '%s'", task_id, cache.get(key))
return succeeded
def _release_subtask_lock(task_id):
"""
Unmark the specified task_id as being no longer in progress.
This is most important to permit a task to be retried.
"""
# According to Celery task cookbook, "Memcache delete is very slow, but we have
# to use it to take advantage of using add() for atomic locking."
key = f"subtask-{task_id}"
cache.delete(key)
def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
"""
Confirms that the current subtask is known to the InstructorTask and hasn't already been completed.
Problems can occur when the parent task has been run twice, and results in duplicate
subtasks being created for the same InstructorTask entry. This maybe happens when Celery
loses its connection to its broker, and any current tasks get requeued.
If a parent task gets requeued, then the same InstructorTask may have a different set of
subtasks defined (to do the same thing), so the subtasks from the first queuing would not
be known to the InstructorTask. We return an exception in this case.
If a subtask gets requeued, then the first time the subtask runs it should run fine to completion.
However, we want to prevent it from running again, so we check here to see what the existing
subtask's status is. If it is complete, we raise an exception. We also take a lock on the task,
so that we can detect if another worker has started work but has not yet completed that work.
The other worker is allowed to finish, and this raises an exception.
Raises a DuplicateTaskException exception if it's not a task that should be run.
If this succeeds, it requires that update_subtask_status() is called to release the lock on the
task.
"""
# Confirm that the InstructorTask actually defines subtasks.
entry = InstructorTask.objects.get(pk=entry_id)
if len(entry.subtasks) == 0:
format_str = "Unexpected task_id '{}': unable to find subtasks of instructor task '{}': rejecting task {}"
msg = format_str.format(current_task_id, entry, new_subtask_status)
TASK_LOG.warning(msg)
raise DuplicateTaskException(msg)
# Confirm that the InstructorTask knows about this particular subtask.
subtask_dict = json.loads(entry.subtasks)
subtask_status_info = subtask_dict['status']
if current_task_id not in subtask_status_info:
format_str = "Unexpected task_id '{}': unable to find status for subtask of instructor task '{}': rejecting task {}" # lint-amnesty, pylint: disable=line-too-long
msg = format_str.format(current_task_id, entry, new_subtask_status)
TASK_LOG.warning(msg)
raise DuplicateTaskException(msg)
# Confirm that the InstructorTask doesn't think that this subtask has already been
# performed successfully.
subtask_status = SubtaskStatus.from_dict(subtask_status_info[current_task_id])
subtask_state = subtask_status.state
if subtask_state in READY_STATES:
format_str = "Unexpected task_id '{}': already completed - status {} for subtask of instructor task '{}': rejecting task {}" # lint-amnesty, pylint: disable=line-too-long
msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status)
TASK_LOG.warning(msg)
raise DuplicateTaskException(msg)
# Confirm that the InstructorTask doesn't think that this subtask is already being
# retried by another task.
if subtask_state == RETRY:
# Check to see if the input number of retries is less than the recorded number.
# If so, then this is an earlier version of the task, and a duplicate.
new_retry_count = new_subtask_status.get_retry_count()
current_retry_count = subtask_status.get_retry_count()
if new_retry_count < current_retry_count:
format_str = "Unexpected task_id '{}': already retried - status {} for subtask of instructor task '{}': rejecting task {}" # lint-amnesty, pylint: disable=line-too-long
msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status)
TASK_LOG.warning(msg)
raise DuplicateTaskException(msg)
# Now we are ready to start working on this. Try to lock it.
# If it fails, then it means that another worker is already in the
# middle of working on this.
if not _acquire_subtask_lock(current_task_id):
format_str = "Unexpected task_id '{}': already being executed - for subtask of instructor task '{}'"
msg = format_str.format(current_task_id, entry)
TASK_LOG.warning(msg)
raise DuplicateTaskException(msg)
def update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count=0):
"""
Update the status of the subtask in the parent InstructorTask object tracking its progress.
Because select_for_update is used to lock the InstructorTask object while it is being updated,
multiple subtasks updating at the same time may time out while waiting for the lock.
The actual update operation is surrounded by a try/except/else that permits the update to be
retried if the transaction times out.
The subtask lock acquired in the call to check_subtask_is_valid() is released here, only when
the attempting of retries has concluded.
"""
try:
_update_subtask_status(entry_id, current_task_id, new_subtask_status)
except DatabaseError:
# If we fail, try again recursively.
retry_count += 1
if retry_count < MAX_DATABASE_LOCK_RETRIES:
TASK_LOG.info("Retrying to update status for subtask %s of instructor task %d with status %s: retry %d",
current_task_id, entry_id, new_subtask_status, retry_count)
update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count)
else:
TASK_LOG.info("Failed to update status after %d retries for subtask %s of instructor task %d with status %s", # lint-amnesty, pylint: disable=line-too-long
retry_count, current_task_id, entry_id, new_subtask_status)
raise
finally:
# Only release the lock on the subtask when we're | |
if brokerObj.delete_queue(queue):
if not silent:
print("Deleted queue: {}".format(queue))
deleted_queues += 1
if not silent and deleted_queues>1:
print('Deleted {} queues'.format(deleted_queues))
def publish(args=None):
"""
Handle the command-line sub-command publish
Usage:
ddmq publish [options] <root> <queue> \"<message>\"
Args:
args: a pre-made args object, in the case of json being parsed from the command-line
Returns:
None
"""
parser = argparse.ArgumentParser(
description='Publish message to a queue.',
usage='''ddmq publish [options] <root> <queue> "<message>"'''
)
# add available options for this sub-command
parser.add_argument('root', help="the message queue's root folder", type=str)
parser.add_argument('queue', help="name of queue to publish to", type=str)
parser.add_argument('message', help="message text within quotes", type=str)
parser.add_argument('-f', action='store_true', help="create the root folder and queue if needed")
parser.add_argument('-p', '--priority', nargs='?', help="define priority of the message (lower number = higer priority)", type=int)
parser.add_argument('-t', '--timeout', nargs='?', help="define timeout of the message in seconds", type=int)
parser.add_argument('-r', '--requeue', action='store_true', help="set to requeue message on fail or timeout. Default priority for requeued messages is 0 (top priority), unless changed by --requeue_prio or config files")
parser.add_argument('-l', '--requeue_limit', nargs='?', help="define the number of times the message is allowed to be requeued before being permanently deleted after expiry", type=int)
parser.add_argument('--requeue_prio', help="set custom priority to message when it is requeued. Implies -r even if not explicitly set", type=int)
parser.add_argument('-C', '--skip_cleaning', action='store_true', help="set to publish the message to the queue without doing cleaning of the queue first")
parser.add_argument('-v', action='store_true', help="verbose mode")
parser.add_argument('-d', action='store_true', help="debug mode")
parser.add_argument('-s', action='store_true', help="silent mode")
# now that we're inside a subcommand, ignore the first two arguments
args = parser.parse_args(sys.argv[2:])
# create a broker object
brokerObj = create_broker(root=args.root, create=args.f, verbose=args.v, debug=args.d)
# make sure the queue exists
if not brokerObj.check_dir(os.path.join(brokerObj.root, args.queue)):
# create it if asked to
if args.f:
try:
# skip names with weird characters in them
if not bool(re.match('^[a-zA-Z0-9_-]+$', args.queue)):
sys.exit("Error: invalid queue name ({})".format(args.queue))
brokerObj.create_queue(args.queue)
except OSError:
sys.exit("Unable to write to the specified queue directory ({}).".format(os.path.join(args.root, args.queue)))
if not args.s:
print("Created new queue: {}".format(args.queue))
else:
sys.exit("The specified queue ({}) does not exist. Please run the same command with the (-f) force flag to create and initiate directories as needed.".format(os.path.join(brokerObj.root, args.queue)))
if args.skip_cleaning:
if not args.s:
print("Skipping queue cleaning.")
# check if custom requeue prio is set
requeue = args.requeue
if args.requeue_prio:
requeue = True
# call the publish function with the given arguments
try:
msg = brokerObj.publish(queue=args.queue, msg_text=args.message, priority=args.priority, skip_cleaning=args.skip_cleaning, requeue=requeue, requeue_prio=args.requeue_prio, timeout=args.timeout, requeue_limit=args.requeue_limit)
except IOError:
sys.exit("Unable to write to the specified queue directory ({}).".format(os.path.join(args.root, args.queue)))
if not args.s:
print("Successfully published message:{0}{0}{1}".format(os.linesep, msg))
def consume(args=None):
"""
Handle the command-line sub-command consume
Usage:
ddmq consume [-hfnCvd] [--format <plain|json|yaml>] <root> <queue>
Args:
args: a pre-made args object, in the case of json being parsed from the command-line
Returns:
None
"""
parser = argparse.ArgumentParser(
description='Consume message(s) from a queue.',
usage='''ddmq consume [-hfnCvd] [--format <plain|json|yaml>] <root> <queue>'''
)
# add available options for this sub-command
parser.add_argument('root', help="the message queue's root folder")
parser.add_argument('queue', help="name of queue to consume from")
parser.add_argument('-f', action='store_true', help="create the root folder and queue if needed")
parser.add_argument('-n', nargs='?', help="the number of messages that will be consumed", type=int)
parser.add_argument('--format', nargs='?', help="specify output format (plain, json, yaml)", default='json', type=str)
parser.add_argument('-C', '--skip-cleaning', action='store_true', help="set to consume the message from the queue without doing cleaning of the queue first")
parser.add_argument('-v', action='store_true', help="verbose mode")
parser.add_argument('-d', action='store_true', help="debug mode")
# now that we're inside a subcommand, ignore the first two arguments
args = parser.parse_args(sys.argv[2:])
if args.format:
if args.format not in ['plain', 'json', 'yaml']:
raise ValueError("Unknown format, {}. Valid formats are plain, json and yaml.")
# create a broker object
brokerObj = create_broker(root=args.root, create=args.f, verbose=args.v, debug=args.d)
# consume the messages
try:
messages = brokerObj.consume(queue=args.queue, n=args.n, skip_cleaning=args.skip_cleaning)
except IOError:
sys.exit("Unable to read/write to the specified queue directory ({}).".format(os.path.join(args.root, args.queue)))
if not messages:
print("No more messages in {}".format(args.queue))
return
# print the messages in requested format
for msg in messages:
if args.format == 'json':
print(json.dumps(msg.__dict__))
elif args.format == 'plain':
print(str(msg))
elif args.format == 'yaml':
print(yaml.dump(msg.__dict__).rstrip())
else:
# should not happen
print(json.dumps(msg))
def ack(args=None):
"""
Handle the command-line sub-command ack
Usage:
ddmq ack [-hCrvd] <root> <queue> <message file1>[,<message file2>,..,<message fileN>]
Args:
args: a pre-made args object, in the case of json being parsed from the command-line
Returns:
None
"""
parser = argparse.ArgumentParser(
description='Positively acknowledge message(s) from a queue.',
usage='''ddmq ack [-hCrvd] <root> <queue> <message file1>[,<message file2>,..,<message fileN>]'''
)
# add available options for this sub-command
parser.add_argument('root', help="the message queue's root folder")
parser.add_argument('queue', help="name of the queue the messages are in")
parser.add_argument('msg_files', help="comma-separated names of file names of the messages to acknowledge")
parser.add_argument('-C', '--skip-cleaning', action='store_false', help="set to ack the message without doing cleaning of the queue first")
parser.add_argument('-r', '--requeue',action='store_true', help="force requeue of the messages")
parser.add_argument('-v', action='store_true', help="verbose mode")
parser.add_argument('-d', action='store_true', help="debug mode")
# now that we're inside a subcommand, ignore the first two arguments
args = parser.parse_args(sys.argv[2:])
# create a broker object
brokerObj = create_broker(root=args.root, verbose=args.v, debug=args.d)
# make the files to a list
msg_files = args.msg_files.split(',')
# send the messages to acknowledgement
acked = brokerObj.ack(args.queue, msg_files, requeue=args.requeue, clean=args.skip_cleaning)
for msg_file in acked:
print('acked {}'.format(os.path.join(brokerObj.root, args.queue, 'work', msg_file)))
# print failed acked
for msg_file in msg_files:
if msg_file not in acked:
print('failed ack {}'.format(os.path.join(brokerObj.root, args.queue, 'work', msg_file)))
def nack(args=None):
"""
Handle the command-line sub-command nack
Usage:
ddmq nack [-hCrvd] <root> <queue> <message file1>[,<message file2>,..,<message fileN>]
Args:
args: a pre-made args object, in the case of json being parsed from the command-line
Returns:
None
"""
parser = argparse.ArgumentParser(
description='Negatively acknowledge message(s) from a queue.',
usage='''ddmq nack [-hCrvd] <root> <queue> <message file1>[,<message file2>,..,<message fileN>]'''
)
# add available options for this sub-command
parser.add_argument('root', help="the message queue's root folder")
parser.add_argument('queue', help="name of the queue the messages are in")
parser.add_argument('msg_files', help="comma-separated names of file names of the messages to acknowledge")
parser.add_argument('-C', '--skip-cleaning', action='store_false', help="set to nack the message without doing cleaning of the queue first")
parser.add_argument('-r', '--requeue',action='store_true', help="force requeue of the messages")
parser.add_argument('-v', action='store_true', help="verbose mode")
parser.add_argument('-d', action='store_true', help="debug mode")
# now that we're inside a subcommand, ignore the first two arguments
args = parser.parse_args(sys.argv[2:])
# create a broker object
brokerObj = create_broker(root=args.root, verbose=args.v, debug=args.d)
# make the files to a list
msg_files = args.msg_files.split(',')
# send the messages to acknowledgement
nacked = brokerObj.nack(args.queue, msg_files, requeue=args.requeue, clean=args.skip_cleaning)
for msg_file in nacked:
print('nacked {}'.format(os.path.join(brokerObj.root, args.queue, 'work', msg_file)))
# print failed acked
for msg_file in msg_files:
if msg_file not in nacked:
print('failed nack {}'.format(os.path.join(brokerObj.root, args.queue, 'work', msg_file)))
def del_msg(args=None):
"""
Handle the command-line sub-command del_msg
Usage:
ddmq del_msg [-hCvds] <root> <queue> <message file1>[,<message file2>,..,<message fileN>]
Args:
args: a pre-made args object, in the case of json being parsed from the command-line
Returns:
None
"""
parser = argparse.ArgumentParser(
description='Delete message(s) from a queue.',
usage='''ddmq del_msg [-hCvds] <root> <queue> <message file1>[,<message file2>,..,<message fileN>]'''
)
# add available options for this sub-command
parser.add_argument('root', help="the message queue's root folder")
parser.add_argument('queue', help="name of the queue the messages are in")
parser.add_argument('msg_files', help="comma-separated names of file names of the messages to acknowledge")
parser.add_argument('-C', '--skip-cleaning', action='store_false', help="set to nack the message without doing cleaning of the queue first")
parser.add_argument('-v', action='store_true', help="verbose mode")
parser.add_argument('-d', action='store_true', help="debug mode")
parser.add_argument('-s', action='store_true', help="silent mode")
# now that we're inside a subcommand, ignore the first two arguments
args = parser.parse_args(sys.argv[2:])
# create a broker object
brokerObj = create_broker(root=args.root, verbose=args.v, debug=args.d)
# readability
queue = args.queue
silent = args.s
# send the messages to deletion
deleted_msgs = 0
for msg_file in args.msg_files.split(','):
# from IPython.core.debugger import Tracer
# Tracer()()
# get the file name
msg_filename = os.path.basename(msg_file)
# is the filename of a consumed message?
match = re.search('^\d+\.\d+\.\d+\.ddmq[a-zA-Z0-9]+$', msg_filename)
if match:
msg_filename = os.path.join(args.root, queue, 'work', msg_filename)
# is the filename of a not yet consumed message?
match = re.search('^\d+\.\d+\.ddmq[a-zA-Z0-9]+$', msg_filename)
if match:
msg_filename = os.path.join(args.root, queue, msg_filename)
# make sure the file exists
if not os.path.isfile(msg_filename):
if not silent:
print("Skipping {}, file does not exists".format(msg_filename))
continue
if brokerObj.delete_message(msg_filename):
if not silent:
print("Deleted {}".format(msg_filename))
deleted_msgs += 1
if not silent and deleted_msgs>1:
print('Deleted {} messages'.format(deleted_msgs))
def purge(args=None):
"""
| |
%(self.minFFWidth))
print("")
def ClassifyInsts(self):
self.pinNonFFInstDict = {}
self.pinFFInstDict = {}
self.numSkipInsts = 0
self.numFFInsts = 0
allFixedStdInsts = [l for l in self.fixedInsts if self.IsMacro(l.height) == False]
self.allStdInsts = self.movableStdInsts + allFixedStdInsts
# retrive minFFWidth
self.DetermineFFCondition(self.targetFFRatio)
for curInst in self.allStdInsts:
# skip for weird cells in academic benchmark
if curInst.IsFeasible() == False:
self.numSkipInsts += 1
continue
# skip for inst that are not available in masterUseList
if curInst.IsFF(self.minFFWidth):
if not curInst.numInputs in self.masterFFMap:
self.numSkipInsts += 1
curInst.SetIsFeasible(False)
continue
else:
if not curInst.numInputs in self.masterInstMap:
self.numSkipInsts += 1
curInst.SetIsFeasible(False)
continue
# only outPin = 1 and inPin > 0 survived now.
hashKey = curInst.GetHashName()
# update FF map
if curInst.IsFF(self.minFFWidth):
self.numFFInsts += 1
if hashKey in self.pinFFInstDict:
self.pinFFInstDict[hashKey].append(curInst)
else:
self.pinFFInstDict[hashKey] = [curInst]
# update nonFF map
else:
if hashKey in self.pinNonFFInstDict:
self.pinNonFFInstDict[hashKey].append(curInst)
else:
self.pinNonFFInstDict[hashKey] = [curInst]
print("NumSkippedInsts: %d (%.2f percent)"
% ( self.numSkipInsts,
1.0 * self.numSkipInsts / len(self.allStdInsts) * 100 ))
print("NumAssignedFFs: %d (%.2f percent)"
% ( self.numFFInsts,
1.0 * self.numFFInsts / (len(self.allStdInsts) - self.numSkipInsts) * 100) )
# convert 2d dict into 2d list to sort
tmpKeys = list(self.pinNonFFInstDict.keys())
tmpVals = list(self.pinNonFFInstDict.values())
self.pinNonFFInstList = [(key,val) for key,val in zip(tmpKeys,tmpVals)]
tmpKeys = list(self.pinFFInstDict.keys())
tmpVals = list(self.pinFFInstDict.values())
self.pinFFInstList = [(key,val) for key,val in zip(tmpKeys,tmpVals)]
# sort
self.pinNonFFInstList.sort()
self.pinFFInstList.sort()
print("NonFF Cell info from Bookshelf. Key = 10000 * numInputs + width)")
for key, val in self.pinNonFFInstList:
print("key", key, "numInst:", len(val))
print("FF Cell info from Bookshelf. Key = 10000 * numInputs + width)")
for key, val in self.pinFFInstList:
print("key", key, "numInst:", len(val))
# mode = 0: NonFFInst
# mode = 1: FFInst
def GetMasterMapList(self, key, numInsts, mode):
numInputs = key // 10000
width = key % 10000
if mode == 0:
masters = self.masterInstMap[numInputs][:]
else:
masters = self.masterFFMap[numInputs][:]
#print("retrieved masters")
#for l in masters:
# print(l[0], l[1].getName())
# first - diff on cellWidth
# second - odb master
masters = [ [abs(width-l[0]),l[1]] for l in masters ]
# get min on cellWidthDiff
minX = min([ l[0] for l in masters ])
# extract all masters that has minX
masters = [ l[1] for l in masters if l[0] == minX ]
#print("Mode: %s " % ("FF" if mode == 0 else "INST"),
# "key:", key,
# "min(widthDiff):", minX,
# "bsWidth:", width,
# "masters:", ",".join([l.getName() for l in masters]))
# quotient
numInstsPerMacro = numInsts // len(masters)
retArr = [l for l in masters for j in range(0,numInstsPerMacro)]
# remainder problem.
while len(retArr) < numInsts:
retArr.append(masters[0])
# shuffle the master mapping
random.seed(0)
random.shuffle(retArr)
return retArr
def GetNonFFMasterMapList(self, key, numInsts):
return self.GetMasterMapList(key, numInsts, mode = 0)
def GetFFMasterMapList(self, key, numInsts):
return self.GetMasterMapList(key, numInsts, mode = 1)
# init chip and block obj
def InitOdb(self,
macroInstPinLayer,
macroInstObsLayer,
primaryLayer):
# initialize dbChip
chip = self.odb.getChip()
if chip != None:
print("[WARNING] Chip obj is already initialized")
else:
chip = self.odbpy.dbChip_create(self.odb)
# initialize dbBlock
block = chip.getBlock()
if block != None:
print("[WARNING] Block obj is already initialized")
else:
block = self.odbpy.dbBlock_create(chip, self.designName)
self.odbBlock = block
# default dbu is from LEF
odbTech = self.odb.getTech()
self.dbu = originalLEFDbu = odbTech.getDbUnitsPerMicron()
odbTech.setDbUnitsPerMicron(originalLEFDbu)
self.odbBlock.setDefUnits(self.dbu)
self.manufacturingGrid = int(odbTech.getManufacturingGrid())
print("[INFO] ManuFacturingGrid:", self.manufacturingGrid)
# retrieve layer info
# 1. fixed macro insts pin layer
# default: metal 3, 4
if macroInstPinLayer == None:
odbMacroInstPinLayer = [odbTech.findRoutingLayer(l) for l in range(3,5)]
horLayer = [l for l in odbMacroInstPinLayer if l.getDirection() == "HORIZONTAL"]
verLayer = [l for l in odbMacroInstPinLayer if l.getDirection() == "VERTICAL"]
if len(horLayer) != 1 or len(verLayer) != 1:
print("[ERROR] Cannot find proper layers for macro inst pin layers.\n"
+ " Please put two routing layers for macro inst pin layers")
exit(1)
self.macroInstPinHorLayer = horLayer[0]
self.macroInstPinVerLayer = verLayer[0]
self.macroInstPinLowerLayer = odbMacroInstPinLayer[0]
print("[WARNING] Fixed macro insts' pin layer is assigned as H: "
+ self.macroInstPinHorLayer.getName()
+ " V: " + self.macroInstPinVerLayer.getName())
elif len(macroInstPinLayer) != 2:
print("[ERROR] Received " + str(len(macroInstPinLayer)) + " layers.\n"
+ " Please put two routing layers for macro inst pin layers" )
exit(1)
else:
odbMacroInstPinLayer = [odbTech.findLayer(l) for l in macroInstPinLayer]
horLayer = [l for l in odbMacroInstPinLayer if l.getDirection() == "HORIZONTAL"]
verLayer = [l for l in odbMacroInstPinLayer if l.getDirection() == "VERTICAL"]
if len(horLayer) != 1 or len(verLayer) != 1:
print("[ERROR] Cannot find proper layers for macro inst pin layers.\n"
+ " Please put two routing layers for macro inst pin layers")
exit(1)
self.macroInstPinHorLayer = horLayer[0]
self.macroInstPinVerLayer = verLayer[0]
self.macroInstPinLowerLayer = odbMacroInstPinLayer[0]
print("[INFO] Fixed macro insts' pin layer is assigned as H: "
+ self.macroInstPinHorLayer.getName()
+ " V: " + self.macroInstPinVerLayer.getName())
# 2. fixed macro insts obs layer -- m1-m4 v1-v3
if macroInstObsLayer == None:
self.macroInstObsLayer = [odbTech.findLayer(l) for l in range(1,6)]
print("[WARNING] Fixed macro insts' pin layer is assigned as \n" +
"\n".join([m.getName() for m in self.macroInstObsLayer]) + "\n")
else:
self.macroInstObsLayer = [odbTech.findLayer(l) for l in macroInstObsLayer]
# 3. primary pins layer (input/output PINS in def)
# default: metal 4
if primaryLayer == None:
self.primaryLayer = odbTech.findRoutingLayer(4)
print("[WARNING] primary pin layer is assigned as " +
self.primaryLayer.getName())
else:
self.primaryLayer = odbTech.findLayer(primaryLayer)
# Create OVERLAP layer for rectilinear macro definition
# this is only needed when *.shape is given
if len(self.bsShapeList) != 0:
if odbTech.findLayer("OVERLAP") == None:
self.odbpy.dbTechLayer_create(odbTech, "OVERLAP", "OVERLAP")
self.macroInstObsLayer.append(odbTech.findLayer("OVERLAP"))
def FillOdb(self):
self.FillOdbRows()
self.FillOdbMacroLef()
self.FillOdbStdInsts()
self.FillOdbFixedMacroInsts()
self.FillOdbMovableMacroInsts()
self.FillOdbNets()
self.FillOdbClockNet()
# note that considering fragemented ROW is not acceptable
# due to different site heights on different tech.
#
# Retrive lx, ly, ux, uy of coreArea and create
def FillOdbRows(self):
lx = 1e30
ly = 1e30
ux = -1e30
uy = -1e30
for curRow in self.bsRowList:
rowLy = int(curRow[1][-1])
rowUy = rowLy + 1
rowLx = int(curRow[-1][2])
rowUx = rowLx + int(curRow[-1][-1])
lx = min(lx, rowLx)
ly = min(ly, rowLy)
ux = max(ux, rowUx)
uy = max(uy, rowUy)
print("BookshelfCore:", lx, ly, ux, uy)
coreDbu = [lx, ly, ux, uy]
coreDbu = [self.GetGridCoordi(int(l * self.bsDbuRatio)) for l in coreDbu]
coreDbuStr = [str(l) for l in coreDbu]
print("DEFCore:", " ".join(coreDbuStr))
dbuLx = coreDbu[0]
dbuLy = coreDbu[1]
dbuUx = coreDbu[2]
dbuUy = coreDbu[3]
numSites = int(round(float(dbuUx - dbuLx)/self.odbSiteWidth))
for idx, curLy in enumerate(range(dbuLy,
dbuUy,
self.odbSiteHeight)):
self.odbpy.dbRow_create(self.odbBlock,
"ROW_%d" % (idx),
self.odbSite,
dbuLx, curLy,
"R0" if idx % 2 == 0 else "MX",
"HORIZONTAL",
numSites,
self.odbSiteWidth)
# update dbuUy
if dbuUy < curLy + self.odbSiteHeight:
dbuUy = curLy + self.odbSiteHeight
print("Total %d Rows are created in OpenDB" % (len(self.odbBlock.getRows())))
# note that die to core spacing is equal to dbuLx and dbuLy
dieUx = dbuUx + dbuLx
dieUy = dbuUy + dbuLy
# due to macro placement snapping, we need to increase the die area.
# first, calculate macro placement offset.
pitchY = self.macroInstPinHorLayer.getPitchY()
offsetY = self.macroInstPinHorLayer.getOffsetY()
self.macroInstHorLayerOffset = offsetY
#= (pitchY + offsetY - (dbuLy % pitchY)) % pitchY
pitchX = self.macroInstPinVerLayer.getPitchX()
offsetX = self.macroInstPinVerLayer.getOffsetX()
self.macroInstVerLayerOffset = offsetX
# = (pitchX + offsetX - (dbuLx % pitchX)) % pitchX
print("Vertical(X) Layer:", self.macroInstPinVerLayer.getName())
print("c2d", dbuLx, "pitch", self.macroInstPinVerLayer.getPitchX())
print("offset", self.macroInstPinVerLayer.getOffsetX())
print("Macro Place Offset - x: %d, y: %d "
% (self.macroInstVerLayerOffset,
self.macroInstHorLayerOffset))
dieUx = dieUx + self.macroInstVerLayerOffset
dieUy = dieUy + self.macroInstHorLayerOffset
rect = self.odbpy.Rect(0, 0, dieUx, dieUy)
self.odbBlock.setDieArea(rect)
print("DEFDie: %d %d %d %d" % (0, 0, dieUx, dieUy))
print("")
def GetGridCoordi(self, coordi):
return self.GetSnapCoordi(self.manufacturingGrid, coordi)
def GetSiteCoordi(self, coordi):
return self.GetSnapCoordi(self.odbSiteHeight, coordi)
def GetSnapCoordi(self, gridUnit, coordi):
return (coordi // gridUnit) * gridUnit
# takes list of [x, y] and generate snapped pin locations
def SnapFixedMacroPinLocations(self, pins, odbMaster):
width = odbMaster.getWidth()
height = odbMaster.getHeight()
pitchX = self.macroInstPinVerLayer.getPitch() * 2
pitchY = self.macroInstPinHorLayer.getPitch() * 2
cntX = width // pitchX
cntY = height // pitchY
# make 2D | |
<reponame>emilysturdivant/BI-geomorph-extraction
# -*- coding: utf-8 -*-
#! python3
'''
Barrier Island Geomorphology Extraction along transects (BI-geomorph-extraction module)
Author: <NAME>
email: <EMAIL>;
These functions require arcpy.
Designed to be imported by either prepper.ipynb or extractor.py.
'''
import time
import os
import collections
import pandas as pd
import numpy as np
from operator import add
import sys
import arcpy
import core.functions as fun
"""
# General use functions
"""
def SetInputFCname(inFCname: 'name of input feature class',
varname: 'name of variable' = '',
system_ext: 'raise SystemExit if file not found?' = True) -> str:
"""Look for input feature class name in workspace and
prompt for different name if not found."""
if len(varname) < 1:
varname = inFCname
if arcpy.Exists(inFCname):
inFCname = inFCname
else:
FCname = input("{} file with path (e.g. {} or '0' for none): ".format(varname, inFCname))
if FCname == '0':
FCname = False
elif not arcpy.Exists(FCname):
FCname = input("'{}' doesn't exist in the workspace. Try again. \n{} file: ".format(FCname, varname, inFCname))
if FCname == '0':
FCname = False
if FCname:
inFCname = os.path.basename(FCname)
else:
print('No data selected for {}.'.format(os.path.basename(inFCname)))
inFCname = False
if system_ext:
raise SystemExit
return(inFCname)
def unique_values(table, field: str):
"""return sorted unique values in field"""
data = arcpy.da.TableToNumPyArray(table, [field])
return(numpy.unique(data[field]))
def CheckValues(inFeatureClass: str, fieldName: str, expectedRange) -> tuple:
"""Check for anomalous values in FC"""
lowrows = list()
highrows = list()
expectedRange.sort() # make sure pair is [low,high]
with arcpy.da.UpdateCursor(inFeatureClass,[fieldName,'trans_sort']) as cursor:
for row in cursor:
if row[0]< expectedRange[0]:
row[0] = None
lowrows.append(row[1])
elif row[0]>expectedRange[1]:
row[0] = None
highrows.append(row[1])
else:
pass
cursor.updateRow(row)
return(lowrows, highrows)
def fieldsAbsent(in_fc: str, fieldnames: str):
"""Find missing fields in feature class. Return False if none missing."""
try:
fieldList = arcpy.ListFields(os.path.join(arcpy.env.workspace,in_fc))
except:
fieldList = arcpy.ListFields(in_fc)
fnamelist = [f.name.lower() for f in fieldList]
mfields = []
for fn in fieldnames:
if not fn.lower() in fnamelist:
mfields.append(fn)
if not len(mfields):
print("All expected fields present in file '{}'.".format(os.path.basename(in_fc)))
return(False)
else:
print("Fields '{}' not present in file '{}'.".format(
mfields, os.path.basename(in_fc)))
return(mfields)
def fieldExists(in_fc: str, fieldname: str) -> bool:
""" Check whether field exists in feature class."""
try:
fieldList = arcpy.ListFields(os.path.join(arcpy.env.workspace, in_fc))
except:
fieldList = arcpy.ListFields(in_fc)
for f in fieldList:
if f.name.lower() == fieldname.lower():
return(True)
return(False)
def CopyAndWipeFC(in_fc: str, out_fc: str, preserveflds: list = []) -> str:
"""Make copy of feature class with all field values Null.
Preserves values in necessary fields and those listed in preserveflds."""
out_fc = os.path.join(arcpy.env.scratchGDB, out_fc)
arcpy.CopyFeatures_management(in_fc, out_fc)
# list all fields that are not required in the FC (e.g. OID@) and not in preserveflds
fldsToWipe = [f.name for f in arcpy.ListFields(out_fc)
if not f.required and not f.name in preserveflds]
# Set value to None
with arcpy.da.UpdateCursor(out_fc, fldsToWipe) as cursor:
for row in cursor:
cursor.updateRow([None] * len(row))
return(out_fc)
def AddNewFields(fc: str, fieldlist: list, fieldtype: str ="DOUBLE", verbose: bool = True) -> str:
"""Add fields to FC if they do not already exist.
New fields must all be the same type."""
# print('Adding fields to {} as type {} if they do not already exist.'.format(out_fc, fieldtype))
def AddNewField(fc, newfname, fieldtype, verbose):
# Add single new field
if not fieldExists(fc, newfname):
arcpy.AddField_management(fc, newfname, fieldtype)
if verbose:
print('Added {} field to {}'.format(newfname, os.path.basename(fc)))
return(fc)
# Execute for multiple fields
if type(fieldlist) is str:
AddNewField(fc, fieldlist, fieldtype, verbose)
elif type(fieldlist) is list or type(fieldlist) is tuple:
for newfname in fieldlist:
AddNewField(fc, newfname, fieldtype, verbose)
else:
print("fieldlist accepts string, list, or tuple of field names. {} type not accepted.".format(type(fieldlist)))
return(fc)
def DeleteExtraFields(inTable: str, keepfields=[]) -> str:
"""Delete all fields from inTable that are not required."""
# list all fields that are not required in the FC (e.g. OID@)
fldsToDelete = [x.name for x in arcpy.ListFields(inTable) if not x.required]
if keepfields:
# remove keepfields from fldsToDelete
[fldsToDelete.remove(f) for f in keepfields if f in fldsToDelete]
if len(fldsToDelete):
arcpy.DeleteField_management(inTable, fldsToDelete)
return(inTable)
def DeleteTempFiles(wildcard: str ='*_temp') -> list:
"""Delete files matching wildcard of type FC, Dataset, or Table from workspace."""
templist = []
try:
templist = templist + arcpy.ListFeatureClasses(wildcard)
except:
pass
try:
templist = templist + arcpy.ListDatasets(wildcard)
except:
pass
try:
templist = templist + arcpy.ListTables(wildcard)
except:
pass
for tempfile in templist:
arcpy.Delete_management(tempfile)
return(templist)
def RemoveLayerFromMXD(lyrname: str) -> bool:
"""Remove layer matching string or wildcard from MXD."""
try:
mxd = arcpy.mapping.MapDocument('CURRENT')
for df in arcpy.mapping.ListDataFrames(mxd):
for lyr in arcpy.mapping.ListLayers(mxd, lyrname, df):
arcpy.mapping.RemoveLayer(df, lyr)
return(True)
else:
return(True)
except:
print("Layer '{}' could not be removed from map document.".format(lyrname))
return(False)
def ReplaceFields(fc: str, newoldfields: dict, fieldtype: str ='DOUBLE') -> str:
"""Use tokens to save geometry properties as attributes
E.g. newoldfields={'LENGTH':'SHAPE@LENGTH'}"""
spatial_ref = arcpy.Describe(fc).spatialReference
for (new, old) in newoldfields.items():
if not fieldExists(fc, new): # Add field if it doesn't already exist
arcpy.DeleteField_management(fc, new)
arcpy.AddField_management(fc, new, fieldtype)
with arcpy.da.UpdateCursor(fc, [new, old], spatial_reference=spatial_ref) as cursor:
for row in cursor:
cursor.updateRow([row[1], row[1]])
if fieldExists(fc, old):
try:
arcpy.DeleteField_management(fc,old)
except:
print(arcpy.GetMessage(2))
pass
return(fc)
def DuplicateField(fc: str, fld: str, newname: str, ftype: str ='') -> str:
"""Copy field values into field with new name"""
# 1. get field type
if len(ftype) < 1:
flds = arcpy.ListFields(fc, fld)
ftype = flds.type
# 2. add new field
arcpy.AddField_management(fc, newname, ftype)
# 3. copy values
with arcpy.da.UpdateCursor(fc, [fld, newname]) as cursor:
for row in cursor:
cursor.updateRow([row[0], row[0]])
return(fc)
def ReplaceValueInFC(fc: str, oldvalue=-99999, newvalue=None, fields="*") -> str:
"""Replace oldvalue with newvalue in fields in fc"""
# First check field types
with arcpy.da.UpdateCursor(fc, fields) as cursor:
fieldindex = range(len(cursor.fields))
for row in cursor:
for i in fieldindex:
if row[i] == oldvalue:
row[i] = newvalue
cursor.updateRow(row)
return(fc)
def CopyFCandReplaceValues(fc, oldvalue=-99999, newvalue=None,
fields="*", out_fc='', out_dir='', verbose=True):
"""Replace oldvalue with newvalue in fields in fc"""
# First check field types
if len(out_fc) > 0:
if len(out_dir) < 1:
out_dir = arcpy.env.workspace
arcpy.FeatureClassToFeatureClass_conversion(fc, out_dir, out_fc)
fc = out_fc
fc = ReplaceValueInFC(fc, oldvalue, newvalue, fields)
if verbose:
print("OUTPUT: {}".format(os.path.basename(fc)))
return(fc)
def ReProject(fc: str, newfc: str, proj_code: int =26918, verbose: bool=True) -> str:
"""If spatial reference does not match desired, project in correct SR."""
if not arcpy.Describe(fc).spatialReference.factoryCode == proj_code: # NAD83 UTM18N
arcpy.Project_management(fc, newfc, arcpy.SpatialReference(proj_code))
if verbose:
print("The projection of {} was changed. The new file is {}.".format(os.path.basename(fc), os.path.basename(newfc)))
else:
newfc = fc
return(newfc)
def DeleteFeaturesByValue(fc: str, fields=[], deletevalue=-99999) -> str:
"""Delete features matching deletevalue."""
# If the fields argument is blank, defaults to use all fields.
if len(fields) < 1:
fs = arcpy.ListFields(fc)
for f in fs:
fields.append(f.name)
# Delete each row where any of the fields listed match the delete value.
with arcpy.da.UpdateCursor(fc, fields) as cursor:
for row in cursor:
for i in range(len(fields)):
if row[i] == deletevalue:
cursor.deleteRow()
return(fc)
"""
Pre-processing
"""
def MorphologyCSV_to_FCsByFeature(csvpath: str, state: int, proj_code, csv_fill = 999, fc_fill = -99999, csv_epsg=4326):
"""
Given a CSV of morphology feature positions from Doran and others
(https://doi.org/10.5066/F7GF0S0Z), convert the points in a given
state to three feature classes (SL, DT, DC).
"""
# initialize
datapre = os.path.splitext(os.path.basename(csvpath))[0]
crs = arcpy.SpatialReference(csv_epsg)
# import CSV as DF
df = pd.read_csv(csvpath)
# remove nulls/fills
df = df[df != csv_fill]
# Subset by state
df = df[df.state == state]
# Subset by feature type
feat_code = 'DT'
subdf = df[df.feature_type == feat_code]
fcname = os.path.join(arcpy.env.scratchGDB, 'm{}_s{}_{}'.format(datapre, state, feat_code))
fc = DFtoFC(subdf, fcname+'_wgs', spatial_ref=crs, xy=['lon', 'lat'], keep_fields='all', fill=fc_fill)
dt_fc = ReProject(fc, fcname, proj_code)
# arcpy.SelectLayerByLocation_management(fcname, "INTERSECT", barrierBoundary)
feat_code = 'DC'
subdf = df[df.feature_type == feat_code]
fcname = os.path.join(arcpy.env.scratchGDB, 'm{}_s{}_{}'.format(datapre, state, feat_code))
fc = DFtoFC(subdf, fcname+'_wgs', spatial_ref=crs, xy=['lon', 'lat'], keep_fields='all', fill=fc_fill)
dc_fc = ReProject(fc, fcname, proj_code)
feat_code = 'SL'
subdf = df[df.feature_type == feat_code]
fcname = os.path.join(arcpy.env.scratchGDB, 'm{}_s{}_{}'.format(datapre, state, feat_code))
fc = DFtoFC(subdf, fcname+'_wgs', spatial_ref=crs, xy=['lon', 'lat'], keep_fields='all', fill=fc_fill)
sl_fc = ReProject(fc, fcname, proj_code)
return(dt_fc, dc_fc, sl_fc)
def ProcessDEM(elevGrid: str, utmSR) -> str:
"""
Check that raster is 5x5 resolution and in correct projection.
"""
# Get raster properties: spatial ref and cell size
sr = arcpy.Describe(elevGrid).spatialReference
cs = arcpy.GetRasterProperties_management(elevGrid, "CELLSIZEX")
# If DEM is already 5x5 in correct projection, return the DEM filename
if sr == utmSR and cs.getOutput(0) != '5':
print('Raster already projected at correct resolution and SR.')
return(elevGrid)
# If cell size is not 1x1m in NAD83 UTM Zone__, Project it to such.
if sr != utmSR or cs.getOutput(0) != '1':
elevGrid2 = elevGrid+'_projected'
arcpy.ProjectRaster_management(elevGrid, elevGrid2, utmSR, cell_size="1")
else:
| |
<filename>TranskribusDU/gcn/DU_gcn_task.py<gh_stars>10-100
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
import pickle
import os.path
import random
import gcn.gcn_models as gcn_models
from gcn.gcn_datasets import GCNDataset
import time
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train', '', "FilePath Train pickle file")
tf.app.flags.DEFINE_string('test', '', "FilePath for the pickle")
tf.app.flags.DEFINE_integer('fold', '1', "FilePath for the pickle")
tf.app.flags.DEFINE_string('out_dir', 'out_res', "outdirectory for saving the results")
tf.app.flags.DEFINE_integer('configid', 0, 'gridid')
tf.app.flags.DEFINE_bool('snake',False, 'whether to work on the snake dataset')
tf.app.flags.DEFINE_bool('das_train',False, ' Training the Model for the DAS paper')
tf.app.flags.DEFINE_bool('das_predict',False, 'Prediction Experiment for the DAS paper')
tf.app.flags.DEFINE_bool('das_predict_workflow',False, 'Prediction Experiment for the DAS paper')
# Details of the training configuration.
tf.app.flags.DEFINE_float('learning_rate', 0.1, """How large a learning rate to use when training, default 0.1 .""")
tf.app.flags.DEFINE_integer('nb_iter', 3000, """How many training steps to run before ending, default 1.""")
tf.app.flags.DEFINE_integer('nb_layer', 1, """How many layers """)
tf.app.flags.DEFINE_integer('eval_iter', 256, """How often to evaluate the training results.""")
tf.app.flags.DEFINE_string('path_report', 'default', """Path for saving the results """)
tf.app.flags.DEFINE_string('grid_configs', '3_4_5', """Configs to be runned on all the folds """)
tf.app.flags.DEFINE_integer('qsub_taskid', -1, 'qsub_taskid')
#For Snake python DU_gcn_task.py --snake=True --configid=22
import errno
import os
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _make_grid_qsub(grid_qsub=0):
if grid_qsub==0:
tid=0
C={}
for fold_id in [1,2,3,4]:
#for config in [4,5]:
#for config in [27,28,29]:
for config in [31]:
#for config in [3, 4]:
#for config in [5]:
C[tid]=(fold_id,config)
tid+=1
return C
else:
raise NotImplementedError
def get_config(config_id=0):
config = {}
if config_id == 0:
config['nb_iter'] = 1000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 1
config['node_indim'] = -1
config['nconv_edge'] = 1
elif config_id==-1:
#Debug Configuration with few iterations
config['nb_iter'] = 10
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 2
config['node_indim'] = -1
config['nconv_edge'] = 10
elif config_id==1:
#config['nb_iter'] = 2000
config['nb_iter'] = 1000
config['lr'] = 0.001
config['stack_instead_add'] = False
config['mu'] = 0.0
config['num_layers'] = 1
config['node_indim'] = -1
config['nconv_edge'] = 1
config['fast_convolve'] = True
#config['train_Wn0']=False
elif config_id==2:
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 1
config['node_indim'] = -1
config['nconv_edge'] = 10
config['fast_convolve'] = True
elif config_id==3:
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 1
config['node_indim'] = -1
config['nconv_edge'] = 50
config['fast_convolve'] = True
elif config_id==4:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 2
config['node_indim'] = -1
config['nconv_edge'] = 7
config['fast_convolve'] = True
elif config_id==5:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
config['fast_convolve']=True
#config['train_Wn0']=False
#Projection
elif config_id == 6:
# config['nb_iter'] = 2000
config['nb_iter'] = 1500
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.1
config['num_layers'] = 2
config['node_indim'] = 20 # INDIM =2 not working here
config['nconv_edge'] = 10
#Config for snakes ..
elif config_id == 7:
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.1
config['num_layers'] = 1
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 121
#config['activation']=tf.tanh
elif config_id == 8:
config['nb_iter'] = 500
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] =10
#config['snake']=True
#config['activation'] = tf.tanh
#Feature in snake. No way just to consider on neighbor , in table this is possible due to the type of feature , which are group
###########################################
elif config_id == 9:
config['nb_iter'] = 2000
config['lr'] = 0.0005
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 5
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] =4
config['snake'] = True
# Testing Regularization Effect ...
# Back to Config 5 but with regularization
elif config_id==10:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.001
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
elif config_id==11:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.01
config['stack_instead_add'] = True
config['mu'] = 0.001
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
elif config_id==12:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.1
config['stack_instead_add'] = True
config['mu'] = 0.001
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
#Config Deep
elif config_id == 13:
# config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.1
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 5
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 5
#Test Residual Connection
elif config_id == 14:
# config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.1
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 10
config['residual_connection']=True
elif config_id == 15:
# config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 2
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 50
config['shared_We']=True
elif config_id == 16:
# config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.1
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 10
config['opti']=tf.train.AdagradOptimizer(config['lr'])
elif config_id == 17:
# config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] = 10
config['opti']=tf.train.RMSPropOptimizer(config['lr'])
#Dropout Mode Test
elif config_id==18:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
config['dropout_rate'] = 0.2 #means we keep with a proba of 0.8
config['dropout_mode'] = 2
#Dropout Edges..
elif config_id==19:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
config['dropout_rate'] = 0.2 #means we keep with a proba of 0.8
config['dropout_mode'] = 4
elif config_id==20:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.005
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
config['dropout_rate'] = 0.0
config['dropout_mode'] = 0
elif config_id==21:
#config['nb_iter'] = 2000
config['nb_iter'] = 2000
config['lr'] = 0.0005
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 3
config['node_indim'] = -1 #INDIM =2 not working here
config['nconv_edge'] = 10
config['dropout_rate'] = 0.0
config['dropout_mode'] = 0
elif config_id == 22:
config['nb_iter'] = 200
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
#config['num_layers'] = 2 #Mean Node Accuracy 0.92
#config['num_layers'] = 5 #Mean Node Accuracy 0.9381
config['num_layers'] = 9 # --> 9523 converges quickly
config['node_indim'] = -1 # INDIM =2 not working here #Should add bias to convolutions, no ?
config['nconv_edge'] =4 #Already by default
config['snake']=True
config['dropout_rate'] = 0.0
config['dropout_mode'] = 0
elif config_id == 23:
config['nb_iter'] = 200
config['lr'] = 0.0001
config['stack_instead_add'] = True
config['mu'] = 0.0
#config['num_layers'] = 2 #Mean Node Accuracy 0.92
#config['num_layers'] = 3 #Mean Node Accuracy 0.9381
config['num_layers'] = 6 # --> 9523 converges quickly
config['node_indim'] = -1 # INDIM =2 not working here #Should add bias to convolutions, no ?
config['nconv_edge'] =4 #Already by default
config['snake']=True
config['dropout_rate'] = 0.1
config['dropout_mode'] = 2
elif config_id == 24:
config['nb_iter'] = 800
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 9
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] =5
config['dropout_rate'] = 0.0
config['dropout_mode'] = 0
#config['shared_We'] = True
elif config_id == 25:
config['nb_iter'] = 500
config['lr'] = 0.001
config['stack_instead_add'] = True
config['mu'] = 0.0
config['num_layers'] = 20
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] =2
config['dropout_rate'] = 0.0
config['dropout_mode'] = 0
#config['shared_We'] = True
elif config_id == 26: #Config for the Snake with the same feature rep as CRF ie the fixed_node one
config['nb_iter'] = 500
config['lr'] = 0.001
config['stack_instead_add'] = False #Default True
config['mu'] = 0.0
config['num_layers'] = 7
config['node_indim'] = -1 # INDIM =2 not working here
config['nconv_edge'] =10
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.