hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
e64a8568b52a1832a0a886a8d46529eefd7e6004
23
py
Python
MPUAPD_Rip.py
amrosado/BRFSS_Master
1bb3f0f6b26668cf1014976697a8aba29ffa7760
[ "MIT" ]
null
null
null
MPUAPD_Rip.py
amrosado/BRFSS_Master
1bb3f0f6b26668cf1014976697a8aba29ffa7760
[ "MIT" ]
null
null
null
MPUAPD_Rip.py
amrosado/BRFSS_Master
1bb3f0f6b26668cf1014976697a8aba29ffa7760
[ "MIT" ]
null
null
null
__author__ = 'arosado'
11.5
22
0.73913
2
23
6.5
1
0
0
0
0
0
0
0
0
0
0
0
0.130435
23
1
23
23
0.65
0
0
0
0
0
0.304348
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
e65db3158c371825167d3dce2be535cf55981fc2
274
py
Python
pyplan/pyplan/modelmanager/serializers/NodePropertySerializer.py
jorgedouglas71/pyplan-ide
5ad0e4a2592b5f2716ff680018f717c65de140f5
[ "MIT" ]
17
2019-12-04T19:22:19.000Z
2021-07-28T11:17:05.000Z
pyplan/pyplan/modelmanager/serializers/NodePropertySerializer.py
jorgedouglas71/pyplan-ide
5ad0e4a2592b5f2716ff680018f717c65de140f5
[ "MIT" ]
9
2019-12-13T15:34:43.000Z
2022-02-10T11:43:00.000Z
pyplan/pyplan/modelmanager/serializers/NodePropertySerializer.py
jorgedouglas71/pyplan-ide
5ad0e4a2592b5f2716ff680018f717c65de140f5
[ "MIT" ]
5
2019-12-04T15:57:06.000Z
2021-08-20T19:59:26.000Z
from rest_framework import serializers class NodePropertySerializer(serializers.Serializer): name = serializers.CharField(required=True, allow_blank=False, max_length=200) value = serializers.CharField(required=False, allow_blank=True, allow_null=True, default="")
45.666667
96
0.810219
32
274
6.78125
0.65625
0.184332
0.258065
0
0
0
0
0
0
0
0
0.012097
0.094891
274
5
97
54.8
0.862903
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.25
0
1
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
e661f72f9dbb36bbf7ba5105acab698ce68c4bce
139
py
Python
homeworks/aleksey_gukov/lesson09/level02.py
tgrx/Z22
b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff
[ "Apache-2.0" ]
null
null
null
homeworks/aleksey_gukov/lesson09/level02.py
tgrx/Z22
b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff
[ "Apache-2.0" ]
8
2019-11-15T18:15:56.000Z
2020-02-03T18:05:05.000Z
homeworks/aleksey_gukov/lesson09/level02.py
tgrx/Z22
b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff
[ "Apache-2.0" ]
null
null
null
def rotate_left(list_f, step): for _ in range(step): list_f.append(list_f.pop(0)) list_s = list_f[:] return list_s
23.166667
36
0.611511
24
139
3.208333
0.583333
0.25974
0
0
0
0
0
0
0
0
0
0.009804
0.266187
139
5
37
27.8
0.745098
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0
0
0.4
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
e665c89fef0429073d891d3130be6ad51c0bca4b
7,460
py
Python
scripts/ndvilog.py
omad/dratools
9c7edac30606f7d8105feb4db62ef1e567d20d3e
[ "Apache-2.0" ]
null
null
null
scripts/ndvilog.py
omad/dratools
9c7edac30606f7d8105feb4db62ef1e567d20d3e
[ "Apache-2.0" ]
null
null
null
scripts/ndvilog.py
omad/dratools
9c7edac30606f7d8105feb4db62ef1e567d20d3e
[ "Apache-2.0" ]
null
null
null
times = [{'writing_data': 6.627535581588745, 'ndvi_pc10': 27.36231303215027, 'loading_data': 109.62806057929993, 'ndvi_pc90': 21.421257734298706, 'ndvi_pc50': 27.33169937133789}, {'writing_data': 6.443411588668823, 'ndvi_pc10': 39.16243243217468, 'loading_data': 123.19368815422058, 'ndvi_pc90': 38.72961163520813, 'ndvi_pc50': 36.39958381652832}, {'writing_data': 6.733800649642944, 'ndvi_pc10': 32.248281955718994, 'loading_data': 56.15437984466553, 'ndvi_pc90': 31.813125610351562, 'ndvi_pc50': 35.00871157646179}, {'writing_data': 6.484926462173462, 'ndvi_pc10': 30.538794994354248, 'loading_data': 72.27108573913574, 'ndvi_pc90': 31.11927819252014, 'ndvi_pc50': 30.832648038864136}, {'writing_data': 6.359814405441284, 'ndvi_pc10': 46.55086040496826, 'loading_data': 59.67847752571106, 'ndvi_pc90': 39.096333265304565, 'ndvi_pc50': 42.16282844543457}, {'writing_data': 6.564153432846069, 'ndvi_pc10': 45.175180196762085, 'loading_data': 87.63148283958435, 'ndvi_pc90': 49.379384994506836, 'ndvi_pc50': 45.34032440185547}, {'writing_data': 6.604489326477051, 'ndvi_pc10': 32.93318557739258, 'loading_data': 78.88174366950989, 'ndvi_pc90': 31.08031392097473, 'ndvi_pc50': 29.89056134223938}, {'writing_data': 6.4211084842681885, 'ndvi_pc10': 41.337995767593384, 'loading_data': 82.0915629863739, 'ndvi_pc90': 39.3358314037323, 'ndvi_pc50': 39.409401655197144}, {'writing_data': 6.546594858169556, 'ndvi_pc10': 35.091548681259155, 'loading_data': 47.397238969802856, 'ndvi_pc90': 34.05122685432434, 'ndvi_pc50': 33.63374209403992}, {'writing_data': 6.11034631729126, 'ndvi_pc10': 43.16675114631653, 'loading_data': 63.00188064575195, 'ndvi_pc90': 40.11013221740723, 'ndvi_pc50': 40.178325176239014}, {'writing_data': 6.267251491546631, 'ndvi_pc10': 36.879666328430176, 'loading_data': 59.68582105636597, 'ndvi_pc90': 35.762709617614746, 'ndvi_pc50': 36.186012506484985}, {'writing_data': 6.086328744888306, 'ndvi_pc10': 34.747477531433105, 'loading_data': 51.13255739212036, 'ndvi_pc90': 33.50469946861267, 'ndvi_pc50': 33.493101358413696}, {'writing_data': 6.232825517654419, 'ndvi_pc10': 37.161699056625366, 'loading_data': 72.47414040565491, 'ndvi_pc90': 36.648998975753784, 'ndvi_pc50': 38.17262291908264}, {'writing_data': 6.4867870807647705, 'ndvi_pc10': 22.58499264717102, 'loading_data': 50.658077239990234, 'ndvi_pc90': 23.183380842208862, 'ndvi_pc50': 23.05723762512207}, {'writing_data': 6.315629243850708, 'ndvi_pc10': 24.482516288757324, 'loading_data': 52.54269289970398, 'ndvi_pc90': 23.933518171310425, 'ndvi_pc50': 23.647018909454346}, {'writing_data': 6.466713190078735, 'ndvi_pc10': 33.10160160064697, 'loading_data': 39.69579243659973, 'ndvi_pc90': 35.594271183013916, 'ndvi_pc50': 33.5105459690094}, {'writing_data': 6.146531581878662, 'ndvi_pc10': 41.39342451095581, 'loading_data': 82.68737983703613, 'ndvi_pc90': 40.2240526676178, 'ndvi_pc50': 42.00848603248596}, {'writing_data': 6.4085304737091064, 'ndvi_pc10': 24.061297178268433, 'loading_data': 65.79187154769897, 'ndvi_pc90': 24.270387649536133, 'ndvi_pc50': 23.630946397781372}, {'writing_data': 5.905577898025513, 'ndvi_pc10': 37.23882865905762, 'loading_data': 55.112420082092285, 'ndvi_pc90': 36.291778326034546, 'ndvi_pc50': 36.75375556945801}, {'writing_data': 6.305130481719971, 'ndvi_pc10': 35.108429193496704, 'loading_data': 56.73119306564331, 'ndvi_pc90': 35.175426959991455, 'ndvi_pc50': 35.82203245162964}, {'writing_data': 5.938111066818237, 'ndvi_pc10': 36.650582790374756, 'loading_data': 55.64810013771057, 'ndvi_pc90': 36.924052238464355, 'ndvi_pc50': 36.689247369766235}, {'writing_data': 5.939232587814331, 'ndvi_pc10': 34.53633236885071, 'loading_data': 64.5890371799469, 'ndvi_pc90': 32.73802304267883, 'ndvi_pc50': 35.01725482940674}, {'writing_data': 6.481805801391602, 'ndvi_pc10': 15.486124753952026, 'loading_data': 44.980570793151855, 'ndvi_pc90': 16.636183738708496, 'ndvi_pc50': 19.395575284957886}, {'writing_data': 6.364058494567871, 'ndvi_pc10': 34.192909955978394, 'loading_data': 41.18637776374817, 'ndvi_pc90': 33.94004559516907, 'ndvi_pc50': 32.72668814659119}, {'writing_data': 6.19329047203064, 'ndvi_pc10': 21.083044290542603, 'loading_data': 54.03144335746765, 'ndvi_pc90': 18.876606702804565, 'ndvi_pc50': 20.58382821083069}, {'writing_data': 6.390938997268677, 'ndvi_pc10': 17.885555505752563, 'loading_data': 64.85111451148987, 'ndvi_pc90': 17.912266969680786, 'ndvi_pc50': 16.847716808319092}, {'writing_data': 6.0089945793151855, 'ndvi_pc10': 37.12860655784607, 'loading_data': 80.22634196281433, 'ndvi_pc90': 36.44176435470581, 'ndvi_pc50': 37.66867637634277}, {'writing_data': 5.877416372299194, 'ndvi_pc10': 37.55026960372925, 'loading_data': 67.21783351898193, 'ndvi_pc90': 34.27371907234192, 'ndvi_pc50': 33.92675566673279}, {'writing_data': 6.228186845779419, 'ndvi_pc10': 31.5383403301239, 'loading_data': 61.13664984703064, 'ndvi_pc90': 30.51774287223816, 'ndvi_pc50': 30.585197687149048}, {'writing_data': 6.655982971191406, 'ndvi_pc10': 10.241628408432007, 'loading_data': 41.03610324859619, 'ndvi_pc90': 8.946972131729126, 'ndvi_pc50': 10.193110942840576}, {'writing_data': 6.053949356079102, 'ndvi_pc10': 34.719430446624756, 'loading_data': 69.46687150001526, 'ndvi_pc90': 31.912947416305542, 'ndvi_pc50': 34.160250186920166}, {'writing_data': 5.863107919692993, 'ndvi_pc10': 35.627281665802, 'loading_data': 58.374780893325806, 'ndvi_pc90': 35.16462683677673, 'ndvi_pc50': 35.169331073760986}, {'writing_data': 6.245217323303223, 'ndvi_pc10': 26.947230339050293, 'loading_data': 37.8329381942749, 'ndvi_pc90': 28.4262912273407, 'ndvi_pc50': 26.491452932357788}, {'writing_data': 6.1906843185424805, 'ndvi_pc10': 10.243769407272339, 'loading_data': 47.66607213020325, 'ndvi_pc90': 10.901005268096924, 'ndvi_pc50': 9.574926376342773}, {'writing_data': 6.27140212059021, 'ndvi_pc10': 9.982360601425171, 'loading_data': 52.55823802947998, 'ndvi_pc90': 9.319277286529541, 'ndvi_pc50': 9.227921962738037}, {'writing_data': 6.332434177398682, 'ndvi_pc10': 4.836212396621704, 'loading_data': 32.75589728355408, 'ndvi_pc90': 5.307963848114014, 'ndvi_pc50': 4.731549501419067}, {'writing_data': 6.2034995555877686, 'ndvi_pc10': 36.06976509094238, 'loading_data': 81.16117906570435, 'ndvi_pc90': 35.752469539642334, 'ndvi_pc50': 37.97353792190552}, {'writing_data': 6.317094087600708, 'ndvi_pc10': 6.397157907485962, 'loading_data': 32.58416557312012, 'ndvi_pc90': 7.095927000045776, 'ndvi_pc50': 6.454242944717407}, {'writing_data': 6.197620153427124, 'ndvi_pc10': 11.273391008377075, 'loading_data': 38.74477291107178, 'ndvi_pc90': 10.42611837387085, 'ndvi_pc50': 11.03548288345337}, {'writing_data': 6.481883764266968, 'ndvi_pc10': 3.8671746253967285, 'loading_data': 26.47277569770813, 'ndvi_pc90': 3.9668197631835938, 'ndvi_pc50': 3.716965675354004}, {'writing_data': 6.128786563873291, 'ndvi_pc10': 16.900771617889404, 'loading_data': 45.50841689109802, 'ndvi_pc90': 19.169956922531128, 'ndvi_pc50': 16.847289562225342}, {'writing_data': 6.797720909118652, 'ndvi_pc10': 4.179133892059326, 'loading_data': 24.04671049118042, 'ndvi_pc90': 4.420083284378052, 'ndvi_pc50': 4.485124349594116}, {'writing_data': 6.50871467590332, 'ndvi_pc10': 5.876180410385132, 'loading_data': 35.49396777153015, 'ndvi_pc90': 5.70380973815918, 'ndvi_pc50': 5.584086894989014}, {'writing_data': 6.459113836288452, 'ndvi_pc10': 4.990781784057617, 'loading_data': 42.639532804489136, 'ndvi_pc90': 4.918926239013672, 'ndvi_pc50': 4.503567218780518}]
165.777778
178
0.781233
881
7,460
6.365494
0.331442
0.086305
0.083452
0
0
0
0
0
0
0
0
0.547592
0.059249
7,460
44
179
169.545455
0.251496
0
0
0
0
0
0.300804
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
e66654b8447a56b134aceb85a995affba3faf8dc
168
py
Python
shop/products/urls.py
jsulopz/django-tutorial
a37361a9b715bd0d183d0d7bd69ef59aacd230d3
[ "MIT" ]
null
null
null
shop/products/urls.py
jsulopz/django-tutorial
a37361a9b715bd0d183d0d7bd69ef59aacd230d3
[ "MIT" ]
null
null
null
shop/products/urls.py
jsulopz/django-tutorial
a37361a9b715bd0d183d0d7bd69ef59aacd230d3
[ "MIT" ]
null
null
null
from django.urls import path from . import views urlpatterns = [ path('', views.view_for_products, name='index'), path('pepa', views.new_view, name='pepa'), ]
21
52
0.678571
23
168
4.826087
0.608696
0
0
0
0
0
0
0
0
0
0
0
0.160714
168
8
53
21
0.787234
0
0
0
0
0
0.076923
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
050da5298ea848311016c309c2041ee71e6847b8
614
py
Python
users/signals.py
maks-nurgazy/student-attendance-menegement
6389b02ee8309aacbadfeac135146ee7091a85bb
[ "MIT" ]
null
null
null
users/signals.py
maks-nurgazy/student-attendance-menegement
6389b02ee8309aacbadfeac135146ee7091a85bb
[ "MIT" ]
null
null
null
users/signals.py
maks-nurgazy/student-attendance-menegement
6389b02ee8309aacbadfeac135146ee7091a85bb
[ "MIT" ]
null
null
null
from django.core.signals import request_finished from django.db.models.signals import post_save from django.dispatch import receiver from users.models import User # @receiver(request_finished, sender=User) # def create_user_profile(sender, instance, created, **kwargs): # print(kwargs) # if created: # print(kwargs) # @receiver(post_delete, sender=User) # def delete_user_profile(sender, instance, **kwargs): # roles = instance.roles.all() # if Role.STUDENT in roles: # instance.student_profile.save() # elif Role.TEACHER in roles: # instance.teacher_profile.save()
29.238095
63
0.71987
78
614
5.538462
0.410256
0.069444
0.060185
0.115741
0
0
0
0
0
0
0
0
0.172638
614
20
64
30.7
0.850394
0.687296
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
053f51cb4db6fe7eb0deb7288a51515320878c20
87
py
Python
utils/clear_terminal.py
LeandroLFE/capmon
9d1200301628ea4fec0e8ed09d5e9b67a426d923
[ "MIT" ]
null
null
null
utils/clear_terminal.py
LeandroLFE/capmon
9d1200301628ea4fec0e8ed09d5e9b67a426d923
[ "MIT" ]
null
null
null
utils/clear_terminal.py
LeandroLFE/capmon
9d1200301628ea4fec0e8ed09d5e9b67a426d923
[ "MIT" ]
null
null
null
from os import system, name clear_terminal = system('cls' if name == 'nt' else 'clear')
43.5
59
0.712644
14
87
4.357143
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.149425
87
2
59
43.5
0.824324
0
0
0
0
0
0.113636
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
0566abdb65db9fbc62bb3dc2a69e7540ff6a2ad1
339
py
Python
pystratis/api/federation/requestmodels/atheightrequest.py
TjadenFroyda/pyStratis
9cc7620d7506637f8a2b84003d931eceb36ac5f2
[ "MIT" ]
8
2021-06-30T20:44:22.000Z
2021-12-07T14:42:22.000Z
pystratis/api/federation/requestmodels/atheightrequest.py
TjadenFroyda/pyStratis
9cc7620d7506637f8a2b84003d931eceb36ac5f2
[ "MIT" ]
2
2021-07-01T11:50:18.000Z
2022-01-25T18:39:49.000Z
pystratis/api/federation/requestmodels/atheightrequest.py
TjadenFroyda/pyStratis
9cc7620d7506637f8a2b84003d931eceb36ac5f2
[ "MIT" ]
4
2021-07-01T04:36:42.000Z
2021-09-17T10:54:19.000Z
from pydantic import Field from pystratis.api import Model # noinspection PyUnresolvedReferences class AtHeightRequest(Model): """A request model for the federation/mineratheight and federationatheight endpoints. Args: block_height (int): The height to query. """ block_height: int = Field(alias='blockHeight')
24.214286
89
0.743363
38
339
6.578947
0.736842
0.088
0.112
0
0
0
0
0
0
0
0
0
0.182891
339
13
90
26.076923
0.902527
0.504425
0
0
0
0
0.074324
0
0
0
0
0
0
1
0
true
0
0.5
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
05827522d49be07384007825cbee0282c1975d19
384
py
Python
nxbt/__init__.py
zfghterb721/nxbt
f27f9aec539199aad92a0edd2bf875649b31ea5b
[ "MIT" ]
257
2020-09-16T05:29:05.000Z
2022-03-31T07:38:01.000Z
nxbt/__init__.py
zfghterb721/nxbt
f27f9aec539199aad92a0edd2bf875649b31ea5b
[ "MIT" ]
48
2020-10-18T00:52:13.000Z
2022-03-27T02:02:21.000Z
nxbt/__init__.py
zfghterb721/nxbt
f27f9aec539199aad92a0edd2bf875649b31ea5b
[ "MIT" ]
37
2020-09-16T05:50:05.000Z
2022-03-31T21:39:55.000Z
from .controller import ControllerServer from .controller import ControllerProtocol from .controller import SwitchReportParser from .controller import SwitchResponses from .controller import Controller from .bluez import * from .nxbt import Nxbt from .nxbt import Buttons from .nxbt import Sticks from .nxbt import JOYCON_L from .nxbt import JOYCON_R from .nxbt import PRO_CONTROLLER
29.538462
42
0.841146
50
384
6.4
0.3
0.15
0.2625
0.125
0
0
0
0
0
0
0
0
0.125
384
12
43
32
0.952381
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
556747fbf93023c2764ce2504210ac942a3a6f29
1,027
py
Python
src/asphalt/serialization/serializers/pickle.py
Asphalt-framework/asphalt-serialization
07fbf1e2295e6e27c9b396f2b88943166759cf32
[ "Apache-2.0" ]
null
null
null
src/asphalt/serialization/serializers/pickle.py
Asphalt-framework/asphalt-serialization
07fbf1e2295e6e27c9b396f2b88943166759cf32
[ "Apache-2.0" ]
null
null
null
src/asphalt/serialization/serializers/pickle.py
Asphalt-framework/asphalt-serialization
07fbf1e2295e6e27c9b396f2b88943166759cf32
[ "Apache-2.0" ]
null
null
null
from __future__ import annotations import pickle from typing import Any from ..api import Serializer class PickleSerializer(Serializer): """ Serializes objects using the standard library :mod:`pickle` module. .. warning:: This serializer is insecure because it allows execution of arbitrary code when deserializing. Avoid using this if at all possible. :param protocol: pickle protocol level to use (defaults to the highest possible) """ __slots__ = "protocol" def __init__(self, protocol: int = pickle.HIGHEST_PROTOCOL): assert ( 0 <= protocol <= pickle.HIGHEST_PROTOCOL ), f'"protocol" must be between 0 and {pickle.HIGHEST_PROTOCOL}' self.protocol: int = protocol def serialize(self, obj: Any) -> bytes: return pickle.dumps(obj, protocol=self.protocol) def deserialize(self, payload: bytes) -> Any: return pickle.loads(payload) @property def mimetype(self) -> str: return "application/python-pickle"
27.756757
85
0.685492
121
1,027
5.694215
0.570248
0.047896
0.091437
0
0
0
0
0
0
0
0
0.002528
0.229796
1,027
36
86
28.527778
0.868521
0.290166
0
0
0
0
0.129815
0.071327
0
0
0
0
0.055556
1
0.222222
false
0
0.222222
0.166667
0.722222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
558731a52addd34709dde7921bc67fe971c73ba9
99
py
Python
mywebsite/customercare/apps.py
Zadigo/ecommerce_template
a4572c3faeaeb9cd399351c0fd1f19a4ef94de27
[ "MIT" ]
16
2020-07-01T03:42:40.000Z
2022-02-21T21:02:27.000Z
mywebsite/customercare/apps.py
Zadigo/ecommerce_template
a4572c3faeaeb9cd399351c0fd1f19a4ef94de27
[ "MIT" ]
14
2020-11-19T18:55:28.000Z
2022-02-01T22:08:23.000Z
mywebsite/customercare/apps.py
Zadigo/ecommerce_template
a4572c3faeaeb9cd399351c0fd1f19a4ef94de27
[ "MIT" ]
7
2020-06-30T23:55:36.000Z
2021-11-12T00:06:40.000Z
from django.apps import AppConfig class CustomercareConfig(AppConfig): name = 'customercare'
16.5
36
0.777778
10
99
7.7
0.9
0
0
0
0
0
0
0
0
0
0
0
0.151515
99
5
37
19.8
0.916667
0
0
0
0
0
0.121212
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
55873900a38007b737e2e82333a9960afdfbad3b
434
py
Python
cgi-bin/nph-sys-reboot.py
kuuen/drive-recorder
9fac7e629460d5358af417f759573bbbff83519d
[ "MIT" ]
null
null
null
cgi-bin/nph-sys-reboot.py
kuuen/drive-recorder
9fac7e629460d5358af417f759573bbbff83519d
[ "MIT" ]
null
null
null
cgi-bin/nph-sys-reboot.py
kuuen/drive-recorder
9fac7e629460d5358af417f759573bbbff83519d
[ "MIT" ]
null
null
null
#!/usr/bin/python3 # -*- coding: utf-8 -*- import os import subprocess import multrunchk import time # 多重起動チェック if multrunchk.chekMultipleRun('nph-sys-reboot.py', '') == False: print('HTTP/1.1 302 Found') print('Location: ../index.html') print('') quit() # 再起動コマンド発行。事前にvisudoに登録しておく必要がある subprocess.Popen(['sudo', '/sbin/reboot']) # 処理結果を返すほうが親切か? print('HTTP/1.1 302 Found') print('Location: ../index.html') print('')
17.36
64
0.679724
54
434
5.462963
0.62963
0.061017
0.067797
0.074576
0.311864
0.311864
0.311864
0.311864
0.311864
0.311864
0
0.031579
0.124424
434
24
65
18.083333
0.744737
0.218894
0
0.461538
0
0
0.345345
0
0
0
0
0
0
1
0
true
0
0.307692
0
0.307692
0.461538
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
3
55ad8013f89b7b4ed6a37bb911cfa22884a597fb
44,840
py
Python
pycolo/layers.py
sieben/pycolo
a272db31e33a384e050c0b68513b96ead76d10ae
[ "0BSD" ]
null
null
null
pycolo/layers.py
sieben/pycolo
a272db31e33a384e050c0b68513b96ead76d10ae
[ "0BSD" ]
1
2021-01-05T13:32:43.000Z
2021-01-05T13:32:43.000Z
pycolo/layers.py
remyleone/pycolo
a272db31e33a384e050c0b68513b96ead76d10ae
[ "0BSD" ]
1
2015-05-13T13:59:52.000Z
2015-05-13T13:59:52.000Z
# coding=utf-8 import logging import random from threading import Thread, Timer from pycolo import DEFAULT_OVERALL_TIMEOUT from pycolo.message import Message class Layer: """ An abstract Layer class that enforced a uniform interface for building a layered communications stack. """ receivers = list() numMessagesSent = 0 numMessagesReceived = 0 # def __init__(self, name): # self.name = name # def __str__(self): # return self.name def sendMessage(self, msg): """ :param msg: """ if msg: self.doSendMessage(msg) self.numMessagesSent += 1 def receiveMessage(self, msg): """ :param msg: """ if msg: self.numMessagesReceived += 1 self.doReceiveMessage(msg) def doSendMessage(self, msg): """ :param msg: """ pass def doReceiveMessage(self, msg): """ :param msg: """ pass def deliverMessage(self, msg): """ pass message to registered receivers :param msg: """ raise NotImplementedError # if self.receivers: # for receiver in self.receivers: # receiver.receiveMessage(msg) def registerReceiver(self, receiver): """ check for valid receiver :param receiver: """ raise NotImplementedError # if receiver and receiver != self: # # lazy creation of receiver list # if not self.receivers: # self.receivers = list() # # add receiver to list # self.receivers.append(receiver) def unregisterReceiver(self, receiver): """ remove receiver from list :param receiver: """ raise NotImplementedError # if self.receivers: # self.receivers.remove(receiver) class AdverseLayer(Layer): """ This class describes the functionality of a layer that drops messages with a given probability in order to test retransmissions between MessageLayer and UDPLayer etc. """ def __init__(self, txPacketLossProbability=0.0, rxPacketLossProbability=0.0): self.txPacketLossProbability = txPacketLossProbability self.rxPacketLossProbability = rxPacketLossProbability def doSendMessage(self, msg): """ Send a message if the randomly picked number tell it so. :param msg: """ raise NotImplementedError # if random.SystemRandom() >= self.txPacketLossProbability: # self.sendMessageOverLowerLayer(msg) # else: # logging.info("[%s] Outgoing message dropped: %s" % str(self), msg.key()) def doReceiveMessage(self, msg): """ Receive a message if the randomly picked number tell it so. :param msg: """ raise NotImplementedError # if random.SystemRandom() >= self.rxPacketLossProbability: # self.deliverMessage(msg) # else: # logging.info("[%s] Incoming message dropped: %s", str(self), msg.key()) class MatchingLayer(Layer): """ This class matches the request/response pairs using the token option. It must be below the TransferLayer, which requires set buddies for each message ({@link Response#getRequest()} and {@link Request#getResponse()}). """ #pairs = dict() class RequestResponsePair: """ Entity class to keep state of transfers """ # key = str() # request = request() def __init__(self): raise NotImplementedError # super(MatchingLayer, self).__init__() def doSendMessage(self, msg): raise NotImplementedError # if isinstance(msg, (request,)): # self.addOpenRequest(msg) # self.sendMessageOverLowerLayer(msg) def doReceiveMessage(self, msg): raise NotImplementedError # if isinstance(msg, (Response,)): # # check for missing token # if not self.pair and len(self.length): # logging.info("Remote endpoint failed to echo token: {:s}".format(msg.key())) # # TODO try to recover from peerAddress # # let timeout handle the problem # return # if self.pair: # # attach request and response to each other # self.response.setRequest(self.pair.request) # self.pair.request.setResponse(self.response) # logging.info("Matched open request: {:s}".format(self.response.sequenceKey())) # # TODO: ObservingManager.getInstance().isObserving(msg.exchangeKey()); # if msg.getFirstOption(OptionNumberRegistry.OBSERVE) is None: # self.removeOpenRequest(self.response.sequenceKey()) # else: # logging.info("Dropping unexpected response: {:s}".format(self.response.sequenceKey())) # return # self.deliverMessage(msg) def addOpenRequest(self, request): raise NotImplementedError # # create new Transaction # exchange = self.RequestResponsePair() # exchange.key = request.sequenceKey() # exchange.request = request # logging.info("Storing open request: {:s}".format(exchange.key)) # # associate token with Transaction # self.pairs.put(exchange.key, exchange) # return exchange def getOpenRequest(self, key): raise NotImplementedError # return self.pairs.get(key) def removeOpenRequest(self, key): raise NotImplementedError # exchange = self.pairs.remove(key) # logging.info("Cleared open request: {:s}".format(exchange.key)) def getStats(self): raise NotImplementedError # stats = dict() # stats["Open requests"] = self.pairs # stats["Messages sent"] = self.numMessagesSent # stats["Messages received"] = self.numMessagesReceived # return str(stats) class UpperLayer(Layer): """ generated source for class UpperLayer """ def sendMessageOverLowerLayer(self, msg): # """ generated source for method sendMessageOverLowerLayer """ # # check if lower layer assigned # if self.lowerLayer is not None: # self.lowerLayer.sendMessage(msg) # else: # logging.critical("[%s] ERROR: No lower layer present", self.getClass().__name__) raise NotImplementedError def setLowerLayer(self, layer): """ generated source for method setLowerLayer """ # # unsubscribe from old lower layer # if self.lowerLayer is not None: # self.lowerLayer.unregisterReceiver(self) # # set new lower layer # lowerLayer = layer # # subscribe to new lower layer # if lowerLayer is not None: # lowerLayer.registerReceiver(self) raise NotImplementedError def getLowerLayer(self): """ generated source for method getLowerLayer """ #return self.lowerLayer raise NotImplementedError #lowerLayer = layers() class TransactionLayer(Layer): """ The class TransactionLayer provides the functionality of the CoAP messaging layer as a subclass of {@link UpperLayer}. It introduces reliable transport of confirmable messages over underlying layers by making use of retransmissions and exponential backoff, matching of confirmables to their corresponding ACK / RST, detection and cancellation of duplicate messages, retransmission of ACK / RST messages upon receiving duplicate confirmable messages. """ # The message ID used for newly generated messages. # currentMID = random.SystemRandom() * 0x10000 def nextMessageID(self): """ Returns the next message ID to use out of the consecutive 16 - bit range. @return the current message ID """ raise NotImplementedError # self.currentMID += 1 # self.currentMID %= 0x10000 # return self.currentMID # The timer daemon to schedule retransmissions. # timer = Timer(true) # run as daemon # The Table to store the transactions of outgoing messages. # transactionTable = {} # The cache for duplicate detection. # dupCache = MessageCache() # Cache used to retransmit replies to incoming messages # replyCache = MessageCache() class Transaction: """ Entity class to keep state of retransmissions. """ # msg = Message() # retransmitTask = RestransmitTask() # numRetransmit = 0 # timeout = 0 # to satisfy RESPONSE_RANDOM_FACTOR class MessageCache: """ The MessageCache is a utility class used for duplicate detection and reply retransmissions. It is a ring buffer whose size is configured through the Californium properties file. """ def removeEldestEntry(eldest): raise NotImplementedError # return size() > MESSAGE_CACHE_SIZE # class RetransmitTask(Timer): # """ # Utility class to handle timeouts. # """ # transaction = Transaction() # # def RetransmitTask(transaction): # self.transaction = transaction # # def run(): # handleResponseTimeout(transaction) def initialTimeout(self): """ Calculates the initial timeout for outgoing confirmable messages. :return: the timeout in milliseconds """ raise NotImplementedError # min = RESPONSE_TIMEOUT # f = RESPONSE_RANDOM_FACTOR # return min + (min * (f - 1) * random.SystemRandom()) def doSendMessage(msg): raise NotImplementedError # set message ID # if msg.getMID() < 0: # msg.setMID(self.nextMessageID()) # # # check if message needs confirmation, i.e., a reply is expected # if msg.isConfirmable(): # # create new transmission context for retransmissions # self.addTransaction(msg) # # elif msg.isReply(): # # put message into ring buffer in case peer retransmits # self.replyCache.put(msg.transactionKey(), msg) # # # send message over unreliable channel # self.sendMessageOverLowerLayer(msg) def doReceiveMessage(msg): raise NotImplementedError # # check for duplicate # if msg.key() in dupCache: # # check for retransmitted Confirmable # if msg.isConfirmable(): # # retrieve cached reply # reply = replyCache.get(msg.transactionKey()) # if reply: # # retransmit reply # try: # logging.info("Replied to duplicate confirmable: %s" % msg.key()) # self.sendMessageOverLowerLayer(reply) # except IOException # e: # logging.severe("Replying to duplicate confirmable failed: %s\n%s", msg.key(), e.getMessage()) # else: # logging.info("Dropped duplicate confirmable without cached reply: %s" % msg.key()) # # # drop duplicate anyway # return # # else: # # ignore duplicate # logging.info(String.format("Dropped duplicate: %s", msg.key())) # return # # else: # # cache received message # dupCache.put(msg.key(), msg) # # # # check for reply to CON and remove transaction # if msg.isReply(): # # retrieve transaction for the incoming message # Transaction # transaction = getTransaction(msg) # # if transaction: # # transmission completed # removeTransaction(transaction) # # if msg.isEmptyACK(): # # transaction is complete, no information for higher layers # return # # else # if (msg.getType() == Message.messageType.RST): # handleIncomingReset(msg) # return # # elif (msg.getType() == Message.messageType.RST): # handleIncomingReset(msg) # return # # else: # # ignore unexpected reply except RST, which could match to a NON sent by the endpoint # logging.warning("Dropped unexpected reply: %s", msg.key()) # return # # # Only accept Responses here, Requests must be handled at application level # if (msg instanceof Response & & msg.isConfirmable()) { # try: # logging.info("Accepted confirmable response: %s" % msg.key()) # sendMessageOverLowerLayer(msg.newAccept()) # except IOException as e: # logging.severe("Accepting confirmable failed: %s\n%s" % msg.key(), e.getMessage()) # # pass message to registered receivers # deliverMessage(msg) def handleIncomingReset(msg): # remove possible observers raise NotImplementedError # ObservingManager.getInstance().removeObserver(msg.getPeerAddress().toString(), msg.getMID()) def handleResponseTimeout(transaction): raise NotImplementedError # max = MAX_RETRANSMIT # # # check if limit of retransmissions reached # if transaction.numRetransmit < max: # # retransmit message # transaction.msg.setRetransmissioned(transaction.numRetransmit) # # logging.info("Retransmitting %s (%d of %d)" % transaction.msg.key(), transaction.numRetransmit, max) # # try: # sendMessageOverLowerLayer(transaction.msg) # except IOException, e: # # logging.severe("Retransmission failed: %s", e.getMessage()) # removeTransaction(transaction) # # # schedule next retransmission # scheduleRetransmission(transaction) # # return # # else: # # cancel transmission # removeTransaction(transaction) # # # cancel observations # ObservingManager.getInstance().removeObserver(transaction.msg.getPeerAddress().toString()) # # # invoke event handler method # transaction.msg.handleTimeout() def addTransaction(msg): raise NotImplementedError # initialize new transmission context # transaction = Transaction() # transaction.msg = msg # transaction.numRetransmit = 0 # transaction.retransmitTask = None # # transactionTable.put(msg.transactionKey(), transaction) # # # schedule first retransmission # self.scheduleRetransmission(transaction) # # logging.info("Stored new transaction for %s" % msg.key()) # # return transaction def getTransaction(msg): raise NotImplementedError # return transactionTable.get(msg.transactionKey()) def removeTransaction(transaction): # cancel any pending retransmission schedule raise NotImplementedError # transaction.retransmitTask.cancel() # transaction.retransmitTask = None # # # remove transaction from table # self.transactionTable.remove(transaction.msg.transactionKey()) # # logging.info("Cleared transaction for %s" % transaction.msg.key()) def scheduleRetransmission(transaction): raise NotImplementedError # # cancel existing schedule (if any) # if transaction.retransmitTask: # transaction.retransmitTask.cancel() # # # create new retransmission task # transaction.retransmitTask = RetransmitTask(transaction) # # # calculate timeout using exponential back - off # # if transaction.timeout == 0: # # use initial timeout # transaction.timeout = self.initialTimeout() # else: # # double timeout # transaction.timeout *= 2 # # # schedule retransmission task # timer.schedule(transaction.retransmitTask, transaction.timeout) def __str__(self): raise NotImplementedError # stats = dict() # stats["Current message ID"] = self.currentMID # stats["Open transactions"] = self.transactionTable.size() # stats["Messages sent"] = self.numMessagesSent # stats["Messages received"] = self.numMessagesReceived # return str(stats) class TransferLayer(Layer): """ The class TransferLayer provides support for <http://tools.ietf.org/html/draft-ietf-core-block">blockwise transfers {@link #doSendMessage(Message)} and {@link #doReceiveMessage(Message)} do not distinguish between clients and server directly, but rather between incoming and outgoing transfers. This saves duplicate code, but introduces rather confusing Request/Response checks at various places. TODO: Explore alternative designs. """ class TransferContext: #cache = Message() #uriPath = "" # current = BlockOption() # TODO: timer #def __init__(self, msg): # raise NotImplementedError # # if isinstance(msg, (request,)): # self.cache = msg # self.uriPath = msg.getUriPath() # self.current = msg.getFirstOption(options.BLOCK1) # elif isinstance(msg, (Response,)): # msg.requiresToken(False) # # FIXME check if still required after new TokenLayer # self.cache = msg # self.uriPath = msg.getRequest().getUriPath() # self.current = msg.getFirstOption(options.BLOCK2) # logging.info("Created new transfer context for {:s}: {:s}".format(self.uriPath, msg.sequenceKey())) incoming = dict() outgoing = dict() defaultSZX = int() def __init__(self, defaultBlockSize): raise NotImplementedError # super(TransferLayer, self).__init__() # if defaultBlockSize == 0: # defaultBlockSize = Properties.std.getInt("DEFAULT_BLOCK_SIZE") # if defaultBlockSize > 0: # self.defaultSZX = BlockOption.encodeSZX(defaultBlockSize) # if not BlockOption.validSZX(self.defaultSZX): # self.defaultSZX = 6 if defaultBlockSize > 1024 else BlockOption.encodeSZX(defaultBlockSize & 0x07f0) # logging.warning("Unsupported block size {:d}, using {:d} instead".format(defaultBlockSize, BlockOption.decodeSZX(self.defaultSZX))) # else: # self.defaultSZX = -1 def doSendMessage(self, msg): raise NotImplementedError # sendSZX = self.defaultSZX # sendNUM = 0 # if isinstance(msg, (Response,)) and msg.getRequest() is not None: # if self.buddyBlock: # if self.buddyBlock.getSZX() < self.defaultSZX: # sendSZX = self.buddyBlock.getSZX() # sendNUM = self.buddyBlock.getNUM() # if msg.payloadSize() > BlockOption.decodeSZX(sendSZX): # if self.msgBlock is not None: # if self.block1 is not None and self.block1.getM() or block2 is not None and self.block2.getM(): # msg.setOption(self.block1) # msg.setOption(self.block2) # self.outgoing.put(msg.sequenceKey(), self.transfer) # logging.info("Caching blockwise transfer for NUM {:d}: {:s}".format(sendNUM, msg.sequenceKey())) # else: # logging.info("Answering block request without caching: {:s} | {:s}".format(msg.sequenceKey(), block2)) # self.sendMessageOverLowerLayer(self.msgBlock) # else: # logging.info("Rejecting initial out-of-scope request: {:s} | NUM: {:d}, SZX: {:d} ({:d} bytes), M: n/a, {:d} bytes available".format(msg.sequenceKey(), sendNUM, sendSZX, BlockOption.decodeSZX(sendSZX), msg.payloadSize())) # self.handleOutOfScopeError(msg.newReply(True)) # else: # self.sendMessageOverLowerLayer(msg) def doReceiveMessage(self, msg): raise NotImplementedError # blockIn = None # blockOut = None # if isinstance(msg, (request,)): # blockIn = msg.getFirstOption(options.BLOCK1) # blockOut = msg.getFirstOption(options.BLOCK2) # elif isinstance(msg, (Response,)): # blockIn = msg.getFirstOption(options.BLOCK2) # blockOut = msg.getFirstOption(options.BLOCK1) # if blockOut is not None: # blockOut.setNUM(blockOut.getNUM() + 1) # else: # logging.warning("Unknown message type received: {:s}".format(msg.key())) # return # if blockIn is None and msg.requiresBlockwise(): # blockIn = BlockOption(options.BLOCK1, 0, self.defaultSZX, True) # self.handleIncomingPayload(msg, blockIn) # return # elif blockIn is not None: # self.handleIncomingPayload(msg, blockIn) # return # elif blockOut is not None: # logging.info("Received demand for next block: {:s} | {:s}".format(msg.sequenceKey(), blockOut)) # if self.transfer: # if isinstance(msg, (request,)) and not msg.getUriPath() == transfer.uriPath: # self.outgoing.remove(msg.sequenceKey()) # logging.info("Freed blockwise transfer by client token reuse: {:s}".format(msg.sequenceKey())) # else: # if isinstance(msg, (request,)): # self.transfer.cache.setMID(msg.getMID()) # if next is not None: # try: # logging.info("Sending next block: {:s} | {:s}".format(next.sequenceKey(), blockOut)) # self.sendMessageOverLowerLayer(next) # except IOException as e: # logging.critical("Failed to send block response: {:s}".format(e.getMessage())) # if not self.respBlock.getM() and isinstance(msg, (request,)): # self.outgoing.remove(msg.sequenceKey()) # logging.info("Freed blockwise download by completion: {:s}".format(next.sequenceKey())) # return # elif isinstance(msg, (Response,)) and not blockOut.getM(): # self.outgoing.remove(msg.sequenceKey()) # logging.info("Freed blockwise upload by completion: {:s}".format(msg.sequenceKey())) # msg.setRequest(self.transfer.cache) # else: # logging.warning("Rejecting out-of-scope demand for cached transfer (freed): {:s} | {:s}, {:d} bytes available".format(msg.sequenceKey(), blockOut, transfer.cache.payloadSize())) # self.outgoing.remove(msg.sequenceKey()) # self.handleOutOfScopeError(msg.newReply(True)) # return # elif isinstance(msg, (Response,)): # if self.transfer: # msg.setRequest(self.transfer.cache) # self.outgoing.remove(msg.sequenceKey()) # logging.info("Freed outgoing transfer by client abort: {:s}".format(msg.sequenceKey())) # transfer = self.incoming.get(msg.sequenceKey()) # if transfer is not None: # msg.setRequest(transfer.cache) # self.incoming.remove(msg.sequenceKey()) # logging.info("Freed incoming transfer by client abort: {:s}".format(msg.sequenceKey())) # self.deliverMessage(msg) raise NotImplementedError def handleIncomingPayload(self, msg, blockOpt): raise NotImplementedError # transfer = self.incoming.get(msg.sequenceKey()) # if blockOpt.getNUM() > 0 and transfer is not None: # if blockOpt.getNUM() * blockOpt.getSize() == (transfer.current.getNUM() + 1) * transfer.current.getSize(): # transfer.cache.appendPayload(msg.getPayload()) # transfer.cache.setMID(msg.getMID()) # logging.info("Received next block: {:s} | {:s}".format(msg.sequenceKey(), blockOpt)) # else: # logging.info("Dropping wrong block: {:s} | {:s}".format(msg.sequenceKey(), blockOpt)) # elif blockOpt.getNUM() == 0 and msg.payloadSize() > 0: # if msg.payloadSize() > blockOpt.getSize(): # blockOpt.setNUM(newNUM - 1) # msg.setPayload(Arrays.copyOf(msg.getPayload(), newNUM)) # transfer = self.TransferContext(msg) # self.incoming.put(msg.sequenceKey(), transfer) # logging.info("Incoming blockwise transfer: {:s} | {:s}".format(msg.sequenceKey(), blockOpt)) # else: # logging.info("Rejecting out-of-order block: {:s} | {:s}".format(msg.sequenceKey(), blockOpt)) # self.handleIncompleteError(msg.newReply(True)) # return # if blockOpt.getM(): # if self.demandSZX > self.defaultSZX: # demandNUM = self.demandSZX / self.defaultSZX * self.demandNUM # demandSZX = self.defaultSZX # if isinstance(msg, (Response,)): # reply = request(codes.METHOD_GET, not msg.isNonConfirmable()) # reply.setURI("coap://" + msg.getPeerAddress().__str__() + transfer.uriPath) # demandNUM += 1 # elif isinstance(msg, (request,)): # reply = Response(codes.RESP_VALID) # reply.setType(self.messageType.ACK if msg.isConfirmable() else self.messageType.NON) # reply.setPeerAddress(msg.getPeerAddress()) # if msg.isConfirmable(): # reply.setMID(msg.getMID()) # else: # logging.critical("Unsupported message type: {:s}".format(msg.key())) # return # reply.setOption(msg.getFirstOption(options.TOKEN)) # reply.setOption(next) # try: # logging.info("Demanding next block: {:s} | {:s}".format(reply.sequenceKey(), next)) # sendMessageOverLowerLayer(reply) # except IOException as e: # logging.critical("Failed to request block: {:s}".format(e.getMessage())) # transfer.current = blockOpt # else: # transfer.cache.setOption(blockOpt) # logging.info("Finished blockwise transfer: {:s}".format(msg.sequenceKey())) # self.incoming.remove(msg.sequenceKey()) # self.deliverMessage(transfer.cache) def handleOutOfScopeError(self, resp): raise NotImplementedError # """ generated source for method handleOutOfScopeError """ # resp.setCode(codes.RESP_BAD_REQUEST) # resp.setPayload("BlockOutOfScope") # try: # sendMessageOverLowerLayer(resp) # except IOException as e: # logging.critical("Failed to send error message: {:s}".format(e.getMessage())) def handleIncompleteError(self, resp): raise NotImplementedError # """ generated source for method handleIncompleteError """ # resp.setCode(codes.RESP_REQUEST_ENTITY_INCOMPLETE) # resp.setPayload("Start with block num 0") # try: # sendMessageOverLowerLayer(resp) # except IOException as e: # logging.critical("Failed to send error message: {:s}".format(e.getMessage())) def getBlock(cls, msg, num, szx): raise NotImplementedError # """ generated source for method getBlock """ # blockSize = 1 << (szx + 4) # payloadOffset = num * blockSize # payloadLeft = msg.payloadSize() - payloadOffset # if payloadLeft > 0: # if isinstance(msg, (request,)): # block = request(msg.getCode(), msg.isConfirmable()) # else: # block = Response(msg.getCode()) # if num == 0 and msg.getType() == Message.messageType.CON: # block.setType(Message.messageType.CON) # else: # block.setType(Message.messageType.NON if msg.isNonConfirmable() else Message.messageType.ACK) # block.setMID(msg.getMID()) # block.setPeerAddress(msg.getPeerAddress()) # for opt in msg.getOptions(): # block.addOption(opt) # if not m: # blockSize = payloadLeft # System.arraycopy(msg.getPayload(), payloadOffset, blockPayload, 0, blockSize) # block.setPayload(blockPayload) # if isinstance(msg, (request,)): # blockOpt = BlockOption(OptionNumberRegistry.BLOCK1, num, szx, m) # else: # blockOpt = BlockOption(OptionNumberRegistry.BLOCK2, num, szx, m) # block.setOption(blockOpt) # return block # else: # return None def getStats(self): raise NotImplementedError # stats = {"Default block size": BlockOption.decodeSZX(self.defaultSZX), # "Outgoing cache size": len(self.outgoing), "Incoming cache size": len(self.incoming), # "Messages sent": self.numMessagesSent, "Messages received": self.numMessagesReceived} # return str(stats) class TokenLayer(UpperLayer): """ This class takes care of unique tokens for each sequence of request/response exchanges. Additionally, the TokenLayer takes care of an overall timeout for each request/response exchange. """ #exchanges = dict() # A timer for scheduling overall request timeouts. #timer = Timer(True) # The time to wait for requests to complete, in milliseconds. #sequenceTimeout = 0 class RequestResponseSequence: """ Entity class to keep state of transfers """ # public String key public Request request # public # TimerTask # timeoutTask class TimeoutTask: # from TimerTask # """ # Utility class to provide transaction timeouts # """ def run(self, sequence): raise NotImplementedError #self.transferTimedOut(sequence) def __init__(self, sequence, sequenceTimeout=DEFAULT_OVERALL_TIMEOUT): raise NotImplementedError # member initialization # self.sequenceTimeout = sequenceTimeout # self.sequence = sequence def doSendMessage(msg): raise NotImplementedError # # set token option if required # if msg.requiresToken(): # msg.setToken(TokenManager.getInstance().acquireToken(true)) # # # use overall timeout for clients (e.g., server crash after separate response ACK) # if msg is Request: # logging.info(String.format("Requesting response for %s: %s", ((Request) # msg).getUriPath(), msg.sequenceKey())); # addExchange((Request) # msg); # elif (msg.getCode() == codes.EMPTY_MESSAGE): # logging.info(String.format("Accepting request: %s", msg.key())) # else: # logging.info(String.format("Responding request: %s", msg.sequenceKey())) # # self.sendMessageOverLowerLayer(msg) def doReceiveMessage(msg): raise NotImplementedError # if (msg # instanceof # Response) { # # response = (Response) # msg # # RequestResponseSequence # sequence = getExchange(msg.sequenceKey()) # # # check for missing token # if (not sequence and not response.getToken()): # logging.warning("Remote endpoint failed to echo token: %s" % msg.key()) # # # TODO try to recover from peerAddress # # # if sequence: # # cancel timeout # sequence.timeoutTask.cancel() # # # TODO separate observe registry # if msg.getFirstOption(options.OBSERVE) == null: # removeExchange(msg.sequenceKey()) # # logging.info("Incoming response from %s: %s # RTT: %fms", ((Response) # msg).getRequest().getUriPath(), msg.sequenceKey(), ((Response) # msg).getRTT())); # deliverMessage(msg) # # else: # logging.warning("Dropping unexpected response: %s", response.sequenceKey()) # # elif msg is Request: # logging.info("Incoming request: %s" % msg.sequenceKey()) # # self.deliverMessage(msg) def addExchange(request): raise NotImplementedError # be aware when manually setting tokens, as request/response will be replace # self.removeExchange(request.sequenceKey()) # # # create new Transaction # RequestResponseSequence # sequence = new # RequestResponseSequence() # sequence.key = request.sequenceKey() # sequence.request = request # sequence.timeoutTask = new # TimeoutTask(sequence) # # # associate token with Transaction # exchanges.put(sequence.key, sequence) # # timer.schedule(sequence.timeoutTask, sequenceTimeout) # # logging.fine("Stored new exchange: %s" % sequence.key) # # return sequence def getExchange(key): raise NotImplementedError # return exchanges.get(key) def removeExchange(key): raise NotImplementedError # exchange = exchanges.remove(key) # # if exchange: # exchange.timeoutTask.cancel() # TokenManager.getInstance().releaseToken(exchange.request.getToken()) # logging.finer(String.format("Cleared exchange: %s", exchange.key)) def transferTimedOut(exchange): raise NotImplementedError # cancel transaction # self.removeExchange(exchange.key) # # logging.warning("Request/Response exchange timed out: %s" % exchange.request.sequenceKey()) # # exchange.request.handleTimeout() # call event handler def getStats(self): raise NotImplementedError # stats = dict() # # stats["Request-Response exchanges"] = self.exchanges.size() # stats["Messages sent"] = self.numMessagesSent # stats["Messages received"] = self.numMessagesReceived # # return str(stats) class UDPLayer(Layer): """ The class UDPLayer exchanges CoAP messages with remote endpoints using UDP datagrams. It is an unreliable channel and thus datagrams may arrive out of order, appear duplicated, or are lost without any notice, especially on lossy physical layers. The UDPLayer is the base layer of the stack, sub-calssing {@link Layer}. Any {@link UpperLayer} can be stacked on top, using a Communicator as stack builder. """ # The UDP socket used to send and receive datagrams # TODO Use MulticastSocket #socket = DatagramSocket() # The thread that listens on the socket for incoming datagrams #receiverThread = ReceiverThread() # Inner Classes ////////////////////////////////////////////////////////// class ReceiverThread(Thread): """ generated source for class ReceiverThread """ def __init__(self): raise NotImplementedError # """ generated source for method __init__ """ # super(ReceiverThread, self).__init__("ReceiverThread") def run(self): """ generated source for method run """ # always listen for incoming datagrams # while True: # # allocate buffer # # +1 to check for > RX_BUFFER_SIZE # # initialize new datagram # # receive datagram # try: # self.socket.receive(datagram) # except IOException as e: # logging.critical("Could not receive datagram: %s" % e.getMessage()) # continue # # TODO: Dispatch to worker thread # self.datagramReceived(self.datagram) def __init__(self, port, daemon): """ Constructor for a new UDP layer @param port The local UDP port to listen for incoming messages @param daemon True if receiver thread should terminate with main thread """ raise NotImplementedError # super(UDPLayer, self).__init__() # # initialize members # self.socket = DatagramSocket(port) # self.receiverThread = self.ReceiverThread() # # decide if receiver thread terminates with main thread # self.receiverThread.setDaemon(daemon) # # start listening right from the beginning # self.receiverThread.start() def setDaemon(self, on): """ Decides if the listener thread persists after the main thread terminates @param on True if the listener thread should stay alive after the main thread terminates. This is useful for e.g. server applications """ raise NotImplementedError # self.receiverThread.setDaemon(on) def doSendMessage(self, msg): raise NotImplementedError # payload = msg.toByteArray() # retrieve payload # # create datagram # datagram = DatagramPacket(payload, msg.getPeerAddress().getAddress(), msg.getPeerAddress().getPort(),) # # remember when this message was sent for the first time # # set timestamp only once in order # # to handle retransmissions correctly # if msg.getTimestamp() == -1: # msg.setTimestamp(System.nanoTime()) # # send it over the UDP socket # self.socket.send(datagram) def doReceiveMessage(self, msg): raise NotImplementedError # pass message to registered receivers # self.deliverMessage(msg) def datagramReceived(self, datagram): raise NotImplementedError # if datagram.getLength() > 0: # # get current time # # extract message data from datagram # # create new message from the received data # if self.msg is not None: # # remember when this message was received # self.msg.setTimestamp(timestamp) # self.msg.setPeerAddress(EndpointAddress(datagram.getAddress(), datagram.getPort())) # if datagram.getLength() > Properties.std.getInt("RX_BUFFER_SIZE"): # logging.info("Marking large datagram for blockwise transfer: {:s}".format(msg.key())) # self.msg.requiresBlockwise(True) # # protect against unknown exceptions # try: # # call receive handler # receiveMessage(msg) # except Exception as e: # e.with_traceback() # logging.critical(self.builder.__str__()) # else: # logging.critical("Illegal datagram received:\n%s" % data.__str__()) # else: # logging.info("Dropped empty datagram from: {:s}:{:d}".format(datagram.getAddress().getHostName(), datagram.getPort())) def isDaemon(self): """ Checks whether the listener thread persists after the main thread terminates @return True if the listener thread stays alive after the main thread terminates. This is useful for e.g. server applications """ raise NotImplementedError #return self.receiverThread.isDaemon() def getPort(self): raise NotImplementedError # return self.socket.getLocalPort() def getStats(self): raise NotImplementedError # stats = dict() # stats["UDP port"] = self.port # stats["Messages sent"] = self.numMessagesSent # stats["Messages received"] = self.numMessagesReceived # return str(stats) class Communicator(UpperLayer): """ The class Communicator provides the message passing system and builds the communication stack through which messages are sent and received. As a subclass of {@link UpperLayer} it is actually a composite layer that contains the subsequent layers in the order defined in {@link #buildStack()}. Endpoints must register as a receiver using {@link #registerReceiver(MessageReceiver)}. Prior to that, they should configure the Communicator using @link {@link #setup(int, boolean)}. A client only using {@link Request}s are not required to do any of that. Here, {@link Message}s will create the required instance automatically. The Communicator implements the Singleton pattern, as there should only be one stack per endpoint and it is required in different contexts to send a message. It is not using the Enum approach because it still needs to inherit from {@link UpperLayer}. """ # singleton = None # udpPort = 0 # runAsDaemon = True # # transferBlockSize = 0 # # tokenLayer = TokenLayer() # transferLayer = TransferLayer() # matchingLayer = MatchingLayer() # transactionLayer = TransactionLayer() # adverseLayer = AdverseLayer() # udpLayer = UDPLayer() def __init__(self): """ Constructor for a new Communicator @param port The local UDP port to listen for incoming messages @param daemon True if receiver thread should terminate with main thread @param defaultBlockSize The default block size used for block-wise transfers or -1 to disable outgoing block-wise transfers """ raise NotImplementedError # initialize layers # self.tokenLayer = TokenLayer() # self.transferLayer = TransferLayer(self.transferBlockSize) # self.matchingLayer = MatchingLayer() # self.transactionLayer = TransactionLayer() # self.adverseLayer = AdverseLayer() # self.udpLayer = UDPLayer(self.udpPort, self.runAsDaemon) # # connect layers # self.buildStack() def setupPort(port): raise NotImplementedError # if port != self.udpPort and not singleton: # if not singleton: # udpPort = port # logging.config("Custom port: %d", udpPort) # else: # logging.severe("Communicator already initialized, setup failed") def setupTransfer(defaultBlockSize): raise NotImplementedError # if (defaultBlockSize!=transferBlockSize && singleton==null): # if (singleton==null): # transferBlockSize = defaultBlockSize # logging.config("Custom block size: %d" % transferBlockSize) # else: # logging.severe("Communicator already initialized, setup failed") def setupDeamon(daemon): raise NotImplementedError # if daemon != self.runAsDaemon and not singleton: # if not singleton: # self.runAsDaemon = daemon # logging.config("Custom daemon option: %b" % self.runAsDaemon) # else: # logging.critical("Communicator already initialized, setup failed") def buildStack(self): """ This method connects the layers in order to build the communication stack It can be overridden by subclasses in order to add further layers, e.g. for introducing a layer that drops or duplicates messages by a probabilistic model in order to evaluate the implementation. """ raise NotImplementedError # self.setLowerLayer(self.tokenLayer) # self.tokenLayer.setLowerLayer(self.transferLayer) # self.transferLayer.setLowerLayer(self.matchingLayer) # self.matchingLayer.setLowerLayer(self.transactionLayer) # self.transactionLayer.setLowerLayer(self.udpLayer) # # transactionLayer.setLowerLayer(adverseLayer); # # adverseLayer.setLowerLayer(udpLayer); def doSendMessage(self, msg): """ defensive programming before entering the stack, lower layers should assume a correct message. """ raise NotImplementedError # if msg: # # check message before sending through the stack # if msg.getPeerAddress().getAddress() is None: # Exception("Remote address not specified") # # delegate to first layer # self.sendMessageOverLowerLayer(msg) def doReceiveMessage(self, msg): raise NotImplementedError # if isinstance(msg, (Response,)): # # initiate custom response handling # if response.getRequest() is not None: # response.getRequest().handleResponse(response) # # pass message to registered receivers # self.deliverMessage(msg)
38.259386
238
0.604862
4,253
44,840
6.351987
0.167411
0.055081
0.01799
0.008551
0.227392
0.179197
0.139182
0.121414
0.083065
0.05323
0
0.002562
0.295049
44,840
1,171
239
38.292058
0.852072
0.803747
0
0.536585
0
0
0
0
0
0
0
0.00427
0
1
0.402439
false
0.012195
0.030488
0
0.54878
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
3
e951b065304a1d1aaefd84a7567d6fc5594c4297
5,243
py
Python
flask_identity/mixins.py
SolardiaX/flask-identity
1af5c6b8422abd434f3b534d72c22adb0d435679
[ "MIT" ]
null
null
null
flask_identity/mixins.py
SolardiaX/flask-identity
1af5c6b8422abd434f3b534d72c22adb0d435679
[ "MIT" ]
null
null
null
flask_identity/mixins.py
SolardiaX/flask-identity
1af5c6b8422abd434f3b534d72c22adb0d435679
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ identity.mixins ~~~~~~~~~~~~~~~~~~~ Mixins of Flask-Identity :author: solardiax <solardiax@hotmail.com> :copyright: (c) 2020 by DreamEx Works. :license: GPL-3.0, see LICENSE for more details. """ # noinspection PyUnresolvedReferences class RoleMixin(object): """Mixin for `Role` model definitions""" __hash__ = object.__hash__ def __eq__(self, other): return super(object).__eq__(other) def __ne__(self, other): return not self.__eq__(other) # noinspection PyUnresolvedReferences class UserMixin(object): """ This class adds required methods to the User data-model. Example: class User(db.Model, UserMixin): ... """ __hash__ = object.__hash__ @property def is_actived(self): return True if not hasattr(self, 'active') else self.active @property def is_authenticated(self): return True @property def is_anonymous(self): return False def get_id(self): try: return str(self.id) except AttributeError: raise NotImplementedError('No `id` attribute - override `get_id`') def has_roles(self, *requirements): """ Return True if the user has all of the specified roles. Return False otherwise. has_roles() accepts a list of requirements: has_roles(requirement1, requirement2, requirement3). Each requirement is either a role_name, or a tuple_of_role_names. role_name example: 'manager' tuple_of_role_names: ('funny', 'witty', 'hilarious') A role_name-requirement is accepted when the user has this role. A tuple_of_role_names-requirement is accepted when the user has ONE of these roles. has_roles() returns true if ALL of the requirements have been accepted. For example: has_roles('a', ('b', 'c'), d) Translates to: User has role 'a' AND (role 'b' OR role 'c') AND role 'd' """ roles = getattr(self, 'roles') if hasattr(self, 'roles') else () role_names = [(r.name if isinstance(r, RoleMixin) else r) for r in roles] for requirement in requirements: if isinstance(requirement, (list, tuple)): tuple_of_role_names = requirement authorized = False for role_name in tuple_of_role_names: if role_name in role_names: # tuple_of_role_names requirement was met: break out of loop authorized = True break if not authorized: return False else: # this is a role_name requirement role_name = requirement # the user must have this role if role_name not in role_names: return False # role_name requirement failed: return False # All requirements have been met: return True return True def get_auth_token(self): """ Constructs the user's authentication token. This data **MUST** be securely signed using the identity token_context """ from .utils import current_identity field = current_identity.config_value('IDENTITY_FIELD') uniquifier = getattr(self, 'uniquifier') if hasattr(self, 'uniquifier') else None # noinspection PyProtectedMember return current_identity._token_context.generate_token({ field: getattr(self, field), 'uniquifier': uniquifier }) def get_security_payload(self): """Serialize user object as response payload.""" from .utils import current_identity field = current_identity.config_value('IDENTITY_FIELD') uniquifier = getattr(self, 'uniquifier') if hasattr(self, 'uniquifier') else None return {"id": str(self.id), field: getattr(self, field), 'uniquifier': uniquifier} def verify_password(self, passwd): """Verify password""" from .utils import current_identity # noinspection PyProtectedMember return current_identity._hash_context.verify_context(passwd, self.password) def __eq__(self, other): """ Checks the equality of two `UserMixin` objects using `get_id`. """ if isinstance(other, UserMixin): return self.get_id() == other.get_id() return NotImplemented def __ne__(self, other): """ Checks the inequality of two `UserMixin` objects using `get_id`. """ equal = self.__eq__(other) if equal is NotImplemented: return NotImplemented return not equal class AnonymousUserMixin(object): """ This is the default object for representing an anonymous user. """ @property def is_authenticated(self): return False @property def is_actived(self): return False @property def is_anonymous(self): return True def get_id(self): return # noinspection PyUnusedLocal,PyMethodMayBeStatic def has_roles(self, *role): return False
29.621469
91
0.611673
592
5,243
5.221284
0.277027
0.023293
0.025235
0.031058
0.296344
0.225817
0.159172
0.087997
0.087997
0.087997
0
0.002737
0.303071
5,243
176
92
29.789773
0.843186
0.345031
0
0.481013
0
0
0.045556
0
0
0
0
0
0
1
0.21519
false
0.025316
0.037975
0.126582
0.582278
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
e95c3417ff52f5d1b48338785a71f6559401b18d
856
py
Python
hio/server/fo/fo.py
serafim-costa/LDP_Protocols
da4ceb81577d190f88fd57fd2a3224ff0462b30b
[ "MIT" ]
78
2018-12-31T09:06:05.000Z
2022-03-28T09:02:11.000Z
hio/server/fo/fo.py
serafim-costa/LDP_Protocols
da4ceb81577d190f88fd57fd2a3224ff0462b30b
[ "MIT" ]
1
2018-02-19T17:34:00.000Z
2018-02-19T19:19:21.000Z
hio/server/fo/fo.py
serafim-costa/LDP_Protocols
da4ceb81577d190f88fd57fd2a3224ff0462b30b
[ "MIT" ]
27
2018-12-03T15:09:22.000Z
2022-03-22T09:55:44.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created by tianhao.wang at 9/18/18 """ import abc import math import numpy as np from scipy.stats import norm class FO(object): __metaclass__ = abc.ABCMeta def __init__(self, args): self.args = args self.ee = 0.0 self.estimates = [] self.total_count = 0 self.p = 0 self.q = 0 self.g = 0 self.var = 0.0 self.d = 0 self.c = 0 @abc.abstractmethod def init_e(self, eps, domain): return def estimate(self, perturbed_datas): return @abc.abstractmethod def perturb(self, datas, domain): return @abc.abstractmethod def support_sr(self, report, value): return @abc.abstractmethod def aggregate(self, domain, perturbed_datas): return
18.212766
49
0.575935
111
856
4.324324
0.522523
0.072917
0.166667
0.1625
0
0
0
0
0
0
0
0.027491
0.320093
856
46
50
18.608696
0.797251
0.089953
0
0.290323
0
0
0
0
0
0
0
0
0
1
0.193548
false
0
0.129032
0.16129
0.548387
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
3
e9c9ff18cb418e9b07b8975619e6c01f149273f7
17,047
py
Python
Arlo.py
LenShustek/ArloCamera
c10b481f3ed6cf660846ccac5735ab2f0cd42e90
[ "MIT" ]
11
2016-05-15T17:01:31.000Z
2021-08-31T07:51:46.000Z
Arlo.py
LenShustek/ArloCamera
c10b481f3ed6cf660846ccac5735ab2f0cd42e90
[ "MIT" ]
1
2016-09-04T06:01:51.000Z
2016-09-21T00:17:58.000Z
Arlo.py
LenShustek/ArloCamera
c10b481f3ed6cf660846ccac5735ab2f0cd42e90
[ "MIT" ]
3
2016-01-19T22:46:13.000Z
2020-03-28T01:52:19.000Z
## # Copyright 2016 Jeffrey D. Walter # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## # 14 Sep 2016, Len Shustek: Added Logout() import json import requests class Arlo(object): def __init__(self, username, password): self.headers = {} self.Login(username, password) def get(self, url, caller, headers={}): headers.update(self.headers) r = requests.get(url, headers=headers) r.raise_for_status() body = r.json() if body['success'] == True: if 'data' in body: return body['data'] else: raise Exception(caller+' failed', body) def post(self, url, body, caller, headers={}): headers.update(self.headers) r = requests.post(url, json=body, headers=headers) r.raise_for_status() body = r.json() if body['success'] == True: if 'data' in body: return body['data'] else: raise Exception(caller+' failed', body) def put(self, url, body, caller, headers={}): headers.update(self.headers) r = requests.put(url, json=body, headers=headers) r.raise_for_status() body = r.json() if body['success'] == True: if 'data' in body: return body['data'] else: raise Exception(caller+' failed', body) ## # This call returns the following: #{ # "userId":"XXX-XXXXXXX", # "email":"user@example.com", # "token":"2_5HicFJMXXXXX-S_7IuK2EqOUHXXXXXXXXXXX1CXKWTThgU18Va_XXXXXX5S00hUafv3PV_if_Bl_rhiFsDHYwhxI3CxlVnR5f3q2XXXXXX-Wnt9F7D82uN1f4cXXXXX-FMUsWF_6tMBqwn6DpzOaIB7ciJrnr2QJyKewbQouGM6", # "paymentId":"XXXXXXXX", # "authenticated":1472961381, # "accountStatus":"registered", # "serialNumber":"XXXXXXXXXXXXX", # "countryCode":"US", # "tocUpdate":false, # "policyUpdate":false, # "validEmail":true #} ## def Login(self, username, password): self.username = username self.password = password body = self.post('https://arlo.netgear.com/hmsweb/login', {'email': self.username, 'password': self.password}, 'Login') self.headers = { 'Authorization': body['token'] } self.user_id = body['userId'] return body def Logout(self): return self.put('https://arlo.netgear.com/hmsweb/logout', {}, 'Logout') ## # The following are examples of the json you would need to pass in the body of the Notify() call to interact with Arlo: # # Set System Mode (Armed, Disarmed) - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"modes","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"active":"mode0"}} # Set System Mode (Calendar) - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"schedule","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"active":true}} # Configure The Schedule (Calendar) - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"schedule","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"schedule":[{"modeId":"mode0","startTime":0},{"modeId":"mode2","startTime":28800000},{"modeId":"mode0","startTime":64800000},{"modeId":"mode0","startTime":86400000},{"modeId":"mode2","startTime":115200000},{"modeId":"mode0","startTime":151200000},{"modeId":"mode0","startTime":172800000},{"modeId":"mode2","startTime":201600000},{"modeId":"mode0","startTime":237600000},{"modeId":"mode0","startTime":259200000},{"modeId":"mode2","startTime":288000000},{"modeId":"mode0","startTime":324000000},{"modeId":"mode0","startTime":345600000},{"modeId":"mode2","startTime":374400000},{"modeId":"mode0","startTime":410400000},{"modeId":"mode0","startTime":432000000},{"modeId":"mode0","startTime":518400000}]} # Create Mode - # {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"add","resource":"rules","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"name":"Record video on Camera 1 if Camera 1 detects motion","id":"ruleNew","triggers":[{"type":"pirMotionActive","deviceId":"XXXXXXXXXXXXX","sensitivity":80}],"actions":[{"deviceId":"XXXXXXXXXXXXX","type":"recordVideo","stopCondition":{"type":"timeout","timeout":15}},{"type":"sendEmailAlert","recipients":["__OWNER_EMAIL__"]},{"type":"pushNotification"}]}} # {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"add","resource":"modes","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"name":"Test","rules":["rule3"]}} # Delete Mode - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"delete","resource":"modes/mode3","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true} # Camera Off - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"cameras/XXXXXXXXXXXXX","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"privacyActive":false}} # Night Vision On - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"cameras/XXXXXXXXXXXXX","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"zoom":{"topleftx":0,"toplefty":0,"bottomrightx":1280,"bottomrighty":720},"mirror":true,"flip":true,"nightVisionMode":1,"powerSaveMode":2}} # Motion Detection Test - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"cameras/XXXXXXXXXXXXX","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"motionSetupModeEnabled":true,"motionSetupModeSensitivity":80}} # # device_id = locations.data.uniqueIds # # System Properties: ("resource":"modes") # active (string) - Mode Selection (mode2 = All Motion On, mode1 = Armed, mode0 = Disarmed, etc.) # # System Properties: ("resource":"schedule") # active (bool) - Mode Selection (true = Calendar) # # Camera Properties: ("resource":"cameras/{id}") # privacyActive (bool) - Camera On/Off # zoom (topleftx (int), toplefty (int), bottomrightx (int), bottomrighty (int)) - Camera Zoom Level # mirror (bool) - Mirror Image (left-to-right or right-to-left) # flip (bool) - Flip Image Vertically # nightVisionMode (int) - Night Mode Enabled/Disabled (1, 0) # powerSaveMode (int) - PowerSaver Mode (3 = Best Video, 2 = Optimized, 1 = Best Battery Life) # motionSetupModeEnabled (bool) - Motion Detection Setup Enabled/Disabled # motionSetupModeSensitivity (int 0-100) - Motion Detection Sensitivity ## def Notify(self, device_id, xcloud_id, body): return self.post('https://arlo.netgear.com/hmsweb/users/devices/notify/'+device_id, body, 'Notify', headers={"xCloudId":xcloud_id}) def Arm(self, device_id, xcloud_id): return self.Notify(device_id, xcloud_id, {"from":self.user_id+"_web","to":device_id,"action":"set","resource":"modes","publishResponse":"true","properties":{"active":"mode1"}}) def Disarm(self, device_id, xcloud_id): return self.Notify(device_id, xcloud_id, {"from":self.user_id+"_web","to":device_id,"action":"set","resource":"modes","publishResponse":"true","properties":{"active":"mode0"}}) def Calendar(self, device_id, xcloud_id): return self.Notify(device_id, xcloud_id, {"from":self.user_id+"_web","to":device_id,"action":"set","resource":"schedule","publishResponse":"true","properties":{"active":"true"}}) def CustomMode(self, device_id, xcloud_id, mode): return self.Notify(device_id, xcloud_id, {"from":self.user_id+"_web","to":device_id,"action":"set","resource":"modes","publishResponse":"true","properties":{"active":mode}}) def DeleteMode(self, device_id, xcloud_id, mode): return self.Notify(device_id, xcloud_id, {"from":self.user_id+"_web","to":device_id,"action":"delete","resource":"modes/"+mode,"publishResponse":"true"}) def ToggleCamera(self, device_id, xcloud_id, active=True): return self.Notify(device_id, xcloud_id, {"from":self.user_id+"_web","to":device_id,"action":"set","resource":"cameras/"+device_id,"publishResponse":"true","properties":{"privacyActive":active}}) def Reset(self): return self.get('https://arlo.netgear.com/hmsweb/users/library/reset', 'Reset') def GetServiceLevel(self): return self.get('https://arlo.netgear.com/hmsweb/users/serviceLevel', 'GetServiceLevel') def GetPaymentOffers(self): return self.get('https://arlo.netgear.com/hmsweb/users/payment/offers', 'GetPaymentOffers') def GetProfile(self): return self.get('https://arlo.netgear.com/hmsweb/users/profile', 'GetProfile') def GetFriends(self): return self.get('https://arlo.netgear.com/hmsweb/users/friends', 'GetFriends') ## # This call returns the following: #{ # "id":"XXX-XXXXXXX_20160823042047", # "name":"Home", # "ownerId":"XXX-XXXXXXX", # "longitude":X.XXXXXXXXXXXXXXXX, # "latitude":X.XXXXXXXXXXXXXXXX, # "address":"123 Middle Of Nowhere Bumbfuck, EG, 12345", # "homeMode":"schedule", # "awayMode":"mode1", # "geoEnabled":false, # "geoRadius":150.0, # "uniqueIds":[ # "XXX-XXXXXXX_XXXXXXXXXXXXX" # ], # "smartDevices":[ # "XXXXXXXXXX", # "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" # ], # "pushNotifyDevices":[ # "XXXXXXXXXX" # ] #} ## def GetLocations(self): return self.get('https://arlo.netgear.com/hmsweb/users/locations', 'GetLocations') ## # This method returns an array that contains the basestation, cameras, etc. and their metadata. # ## def GetDevices(self, device_type=None): return self.get('https://arlo.netgear.com/hmsweb/users/devices', 'GetDevices') def GetLibraryMetaData(self, from_date, to_date): return self.post('https://arlo.netgear.com/hmsweb/users/library/metadata', {'dateFrom':from_date, 'dateTo':to_date}, 'GetRecordingMetaData') def UpdateProfile(self, first_name, last_name): return self.put('https://arlo.netgear.com/hmsweb/users/profile', {'firstName': first_name, 'lastName': last_name}, 'UpdateProfile') def UpdatePassword(self, password): r = self.post('https://arlo.netgear.com/hmsweb/users/changePassword', {'currentPassword':self.password,'newPassword':password}, 'ChangePassword') self.password = password return r ## # This is an example of the json you would pass in the body to UpdateFriends(): #{ # "firstName":"Some", # "lastName":"Body", # "devices":{ # "XXXXXXXXXXXXX":"Camera 1", # "XXXXXXXXXXXXX":"Camera 2 ", # "XXXXXXXXXXXXX":"Camera 3" # }, # "lastModified":1463977440911, # "adminUser":true, # "email":"user@example.com", # "id":"XXX-XXXXXXX" #} ## def UpdateFriends(self, body): return self.put('https://arlo.netgear.com/hmsweb/users/friends', body, 'UpdateFriends') def UpdateDeviceName(self, parent_id, device_id, name): return self.put('https://arlo.netgear.com/hmsweb/users/devices/renameDevice', {'deviceId':device_id, 'deviceName':name, 'parentId':parent_id}, 'UpdateDeviceName') ## # This is an example of the json you would pass in the body to UpdateDisplayOrder() of your devices in the UI. # # XXXXXXXXXXXXX is the device id of each camera. You can get this from GetDevices(). #{ # "devices":{ # "XXXXXXXXXXXXX":1, # "XXXXXXXXXXXXX":2, # "XXXXXXXXXXXXX":3 # } #} ## def UpdateDisplayOrder(self, body): return self.post('https://arlo.netgear.com/hmsweb/users/devices/displayOrder', body, 'UpdateDisplayOrder') ## # This call returns the following: # presignedContentUrl is a link to the actual video in Amazon AWS. # presignedThumbnailUrl is a link to the thumbnail .jpg of the actual video in Amazon AWS. # #[ # { # "mediaDurationSecond": 30, # "contentType": "video/mp4", # "name": "XXXXXXXXXXXXX", # "presignedContentUrl": "https://arlos3-prod-z2.s3.amazonaws.com/XXXXXXX_XXXX_XXXX_XXXX_XXXXXXXXXXXXX/XXX-XXXXXXX/XXXXXXXXXXXXX/recordings/XXXXXXXXXXXXX.mp4?AWSAccessKeyId=XXXXXXXXXXXXXXXXXXXX&Expires=1472968703&Signature=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", # "lastModified": 1472881430181, # "localCreatedDate": XXXXXXXXXXXXX, # "presignedThumbnailUrl": "https://arlos3-prod-z2.s3.amazonaws.com/XXXXXXX_XXXX_XXXX_XXXX_XXXXXXXXXXXXX/XXX-XXXXXXX/XXXXXXXXXXXXX/recordings/XXXXXXXXXXXXX_thumb.jpg?AWSAccessKeyId=XXXXXXXXXXXXXXXXXXXX&Expires=1472968703&Signature=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", # "reason": "motionRecord", # "deviceId": "XXXXXXXXXXXXX", # "createdBy": "XXXXXXXXXXXXX", # "createdDate": "20160903", # "timeZone": "America/Chicago", # "ownerId": "XXX-XXXXXXX", # "utcCreatedDate": XXXXXXXXXXXXX, # "currentState": "new", # "mediaDuration": "00:00:30" # } #] ## def GetLibrary(self, from_date, to_date): return self.post('https://arlo.netgear.com/hmsweb/users/library', {'dateFrom':from_date, 'dateTo':to_date}, 'GetRecordings') ## # Delete a single video recording from Arlo. # # All of the date info and device id you need to pass into this method are given in the results of the GetLibrary() call. # ## def DeleteRecording(self, created_date, utc_created_date, device_id): return self.post('https://arlo.netgear.com/hmsweb/users/library/recycle', {'data':[{'createdDate':created_date,'utcCreatedDate':utc_created_date,'deviceId':device_id}]}, 'DeleteRecording') ## # Delete a batch of video recordings from Arlo. # # The GetLibrary() call response json can be passed directly to this method if you'd like to delete the same list of videos you queried for. # If you want to delete some other batch of videos, then you need to send an array of objects representing each video you want to delete. # #[ # { # "createdDate":"20160904", # "utcCreatedDate":1473010280395, # "deviceId":"XXXXXXXXXXXXX" # }, # { # "createdDate":"20160904", # "utcCreatedDate":1473010280395, # "deviceId":"XXXXXXXXXXXXX" # } #] ## def BatchDeleteRecordings(self, recording_metadata): return self.post('https://arlo.netgear.com/hmsweb/users/library/recycle', {'data':recording_metadata}, 'BatchDeleteRecordings') ## # Returns the whole video from the presignedContentUrl. # # Obviously, this function is generic and could be used to download anything. :) ## def GetRecording(self, url, chunk_size=4096): video = '' r = requests.get(url, stream=True) r.raise_for_status() for chunk in r.iter_content(chunk_size): if chunk: video += chunk return video ## # Returns a generator that is the chunked video stream from the presignedContentUrl. # # Obviously, this function is generic and could be used to download anything. :) ## def StreamRecording(self, url, chunk_size=4096): r = requests.get(url, stream=True) r.raise_for_status() for chunk in r.iter_content(chunk_size): yield chunk ## # This function returns a generator that is a chunked live video stream. # # To initiate a stream pass the following: #{ # "to":"XXXXXXXXXXXXX", # "from":"XXX-XXXXXXX_web", # "resource":"cameras/XXXXXXXXXXXXX", # "action":"set", # "publishResponse":true, # "transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX", # "properties":{ # "activityState":"startPositionStream" # } #} # The request to /users/devices/startStream returns: #{ # "data":{ # "url":"rtmps://vzwow09-z2-prod.vz.netgear.com:80/vzmodulelive?egressToken=b1b4b675_ac03_4182_9844_043e02a44f71&userAgent=web&cameraId=48B4597VD8FF5_1473010750131" # }, # "success":true #} # which is the url of the video stream, which this function then uses to call StreamRecording(). ## def StartStream(self, body): body = self.post('https://arlo.netgear.com/hmsweb/users/devices/startStream', body, 'StartStream') for chunk in self.StreamRecording(body['url']): yield chunk
48.291785
908
0.659412
1,890
17,047
5.87672
0.240212
0.020167
0.028811
0.034213
0.429639
0.402809
0.358603
0.354191
0.327541
0.289547
0
0.032018
0.17921
17,047
352
909
48.428977
0.761792
0.538687
0
0.306306
0
0
0.249115
0.002752
0
0
0
0
0
1
0.288288
false
0.072072
0.018018
0.207207
0.576577
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
0
0
0
3
e9d5ef34f4f1e3542bf02b5a42e224fd812cc2d5
354
py
Python
catalyst/dl/utils/__init__.py
andrey-avdeev/catalyst
fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3
[ "Apache-2.0" ]
null
null
null
catalyst/dl/utils/__init__.py
andrey-avdeev/catalyst
fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3
[ "Apache-2.0" ]
null
null
null
catalyst/dl/utils/__init__.py
andrey-avdeev/catalyst
fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3
[ "Apache-2.0" ]
null
null
null
# flake8: noqa from catalyst.utils import * # from .trace import * from .callbacks import process_callbacks from .criterion import ( accuracy, average_accuracy, dice, f1_score, iou, jaccard, mean_average_accuracy, reduced_focal_loss, sigmoid_focal_loss ) from .torch import get_loader, process_components from .visualization import plot_metrics
29.5
65
0.80226
46
354
5.913043
0.630435
0.073529
0
0
0
0
0
0
0
0
0
0.006536
0.135593
354
11
66
32.181818
0.882353
0.09322
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.625
0
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
759b7ced84530f54c2df12b863106d9d265111fe
412
py
Python
bomber_monkey/features/player/player.py
MonkeyPatchIo/bomber-monkey
8a351ef1a0ef18e9d98ad72d7274c41f02c0ed1b
[ "MIT" ]
null
null
null
bomber_monkey/features/player/player.py
MonkeyPatchIo/bomber-monkey
8a351ef1a0ef18e9d98ad72d7274c41f02c0ed1b
[ "MIT" ]
null
null
null
bomber_monkey/features/player/player.py
MonkeyPatchIo/bomber-monkey
8a351ef1a0ef18e9d98ad72d7274c41f02c0ed1b
[ "MIT" ]
null
null
null
from bomber_monkey.features.player.player_slot import PlayerSlot from python_ecs.ecs import Component class Player(Component): def __init__(self, slot: PlayerSlot, power: int) -> None: super().__init__() self.power = power self.slot = slot @property def player_id(self): return self.slot.player_id @property def color(self): return self.slot.color
22.888889
64
0.669903
52
412
5.057692
0.442308
0.121673
0.106464
0.136882
0
0
0
0
0
0
0
0
0.240291
412
17
65
24.235294
0.840256
0
0
0.153846
0
0
0
0
0
0
0
0
0
1
0.230769
false
0
0.153846
0.153846
0.615385
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
75d7592a83afb095a37efba0a976f07a9d4a320c
407
py
Python
changetext/common_state.py
dfint/changetextpy_script
0481ab9225e070bfb17cfa0d194e1ed6c6c15e4d
[ "MIT" ]
null
null
null
changetext/common_state.py
dfint/changetextpy_script
0481ab9225e070bfb17cfa0d194e1ed6c6c15e4d
[ "MIT" ]
1
2021-11-03T06:53:22.000Z
2021-11-03T06:53:22.000Z
changetext/common_state.py
dfint/changetextpy_script
0481ab9225e070bfb17cfa0d194e1ed6c6c15e4d
[ "MIT" ]
null
null
null
class ChangeTextState: def __init__(self): self.prev_tail = '' self.context = None _change_text_state = None def init(): global _change_text_state _change_text_state = ChangeTextState() init() def get_state() -> ChangeTextState: global _change_text_state if _change_text_state is None: _change_text_state = ChangeTextState() return _change_text_state
17.695652
46
0.70516
48
407
5.416667
0.354167
0.269231
0.403846
0.146154
0
0
0
0
0
0
0
0
0.226044
407
22
47
18.5
0.825397
0
0
0.285714
0
0
0
0
0
0
0
0
0
1
0.214286
false
0
0
0
0.357143
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
75f5b186a011063efa4c17887c5384c8551e5511
413
py
Python
util/tree.py
aliaksandr-klimovich/sandbox
adab6b18f3d78a891c38609abc71b5d7a751919c
[ "Unlicense" ]
1
2020-12-25T18:21:18.000Z
2020-12-25T18:21:18.000Z
util/tree.py
aliaksandr-klimovich/sandbox
adab6b18f3d78a891c38609abc71b5d7a751919c
[ "Unlicense" ]
1
2021-02-11T15:05:59.000Z
2021-02-11T15:05:59.000Z
util/tree.py
aliaksandr-klimovich/sandbox
adab6b18f3d78a891c38609abc71b5d7a751919c
[ "Unlicense" ]
null
null
null
from collections import defaultdict class PrettyDefaultDict(defaultdict): def __str__(self): return dict.__str__(self) def __repr__(self): return dict.__repr__(self) def tree(): """ >>> t = tree() >>> t['a']['b']['c'] = 1 >>> t {'a': {'b': {'c': 1}}} """ return PrettyDefaultDict(tree) if __name__ == '__main__': import doctest doctest.testmod()
16.52
37
0.559322
45
413
4.6
0.511111
0.067633
0.135266
0.038647
0.048309
0
0
0
0
0
0
0.006557
0.261501
413
24
38
17.208333
0.672131
0.164649
0
0
0
0
0.025237
0
0
0
0
0
0
1
0.272727
false
0
0.181818
0.181818
0.818182
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
f93c35d57bdbb73dc7d2f76e61f5dbd04a57b802
96
py
Python
tests/test_exception.py
raffaelfoidl/noworkflow
aa4ca189df24fec6c7abd32bcca6a097b21fdf31
[ "MIT" ]
108
2015-02-04T14:16:51.000Z
2022-03-06T13:52:45.000Z
tests/test_exception.py
raffaelfoidl/noworkflow
aa4ca189df24fec6c7abd32bcca6a097b21fdf31
[ "MIT" ]
92
2015-01-19T14:58:06.000Z
2021-04-19T17:28:50.000Z
tests/test_exception.py
raffaelfoidl/noworkflow
aa4ca189df24fec6c7abd32bcca6a097b21fdf31
[ "MIT" ]
31
2015-03-03T23:53:59.000Z
2021-11-11T04:23:44.000Z
def y(): raise TypeError def x(): y() try: x() except TypeError: print("x")
7.384615
19
0.5
13
96
3.692308
0.615385
0
0
0
0
0
0
0
0
0
0
0
0.333333
96
12
20
8
0.75
0
0
0
0
0
0.010417
0
0
0
0
0
0
1
0.25
true
0
0
0
0.25
0.125
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
0
0
3
f94214986bce56369b90ec0db6184c796c528aae
300
py
Python
data/juno_nominal/coordinates_reactors_equal.py
gnafit/gna
c1a58dac11783342c97a2da1b19c97b85bce0394
[ "MIT" ]
5
2019-10-14T01:06:57.000Z
2021-02-02T16:33:06.000Z
data/juno_nominal/coordinates_reactors_equal.py
gnafit/gna
c1a58dac11783342c97a2da1b19c97b85bce0394
[ "MIT" ]
null
null
null
data/juno_nominal/coordinates_reactors_equal.py
gnafit/gna
c1a58dac11783342c97a2da1b19c97b85bce0394
[ "MIT" ]
null
null
null
# nominal reactor positions data = dict([ ('YJ1', [52.5]), ('YJ2', [52.5]), ('YJ3', [52.5]), ('YJ4', [52.5]), ('YJ5', [52.5]), ('YJ6', [52.5]), ('TS1', [52.5]), ('TS2', [52.5]), ('TS3', [52.5]), ('TS4', [52.5]), ('DYB', [215.0]), ('HZ', [265.0]), ])
17.647059
27
0.346667
41
300
2.536585
0.536585
0.288462
0
0
0
0
0
0
0
0
0
0.220183
0.273333
300
16
28
18.75
0.256881
0.083333
0
0
0
0
0.128205
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
f955be5cd5c1b23ffcdfcc8c4ea4a60916313772
701
py
Python
taxman/calculator/summary.py
robinmitra/taxman
a7afc0b4a1449cd46e90cd3af05f4a5d65a8acbf
[ "MIT" ]
3
2019-01-07T13:08:59.000Z
2021-01-11T10:34:52.000Z
taxman/calculator/summary.py
robinmitra/taxman
a7afc0b4a1449cd46e90cd3af05f4a5d65a8acbf
[ "MIT" ]
null
null
null
taxman/calculator/summary.py
robinmitra/taxman
a7afc0b4a1449cd46e90cd3af05f4a5d65a8acbf
[ "MIT" ]
null
null
null
class Summary: def __init__(self, total_income, net_income, income_tax, employees_ni, employers_ni): self._total_income = total_income self._net_income = net_income self._income_tax = income_tax self._employees_ni = employees_ni self._employers_ni = employers_ni @property def total_income(self): return self._total_income @property def net_income(self): return self._net_income @property def income_tax(self): return self._income_tax @property def employees_ni(self): return self._employees_ni @property def employers_ni(self): return self._employers_ni
24.172414
74
0.659058
85
701
4.976471
0.164706
0.130024
0.165485
0.094563
0
0
0
0
0
0
0
0
0.278174
701
28
75
25.035714
0.835968
0
0
0.217391
0
0
0
0
0
0
0
0
0
1
0.26087
false
0
0
0.217391
0.521739
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
f95addc59232628a2deac81ce824f616a4317ca3
227
py
Python
tools/polly/bin/detail/ios_dev_root.py
Kondr11/LABA7
7c5628192f8bac7ea68df725bc0529f8f94302bc
[ "MIT" ]
861
2015-02-03T10:20:11.000Z
2022-03-13T20:19:12.000Z
tools/polly/bin/detail/ios_dev_root.py
Kondr11/LABA7
7c5628192f8bac7ea68df725bc0529f8f94302bc
[ "MIT" ]
197
2015-04-15T20:13:30.000Z
2021-03-22T14:18:33.000Z
tools/polly/bin/detail/ios_dev_root.py
Kondr11/LABA7
7c5628192f8bac7ea68df725bc0529f8f94302bc
[ "MIT" ]
236
2015-03-05T19:52:07.000Z
2021-12-24T06:02:07.000Z
# Copyright (c) 2014, Ruslan Baratov # All rights reserved. import os import re def get(ios_version): dev_dir = re.sub(r'\.', '_', ios_version) dev_dir = 'IOS_{}_DEVELOPER_DIR'.format(dev_dir) return os.getenv(dev_dir)
20.636364
50
0.709251
36
227
4.194444
0.638889
0.15894
0.172185
0.211921
0
0
0
0
0
0
0
0.020725
0.14978
227
10
51
22.7
0.761658
0.242291
0
0
0
0
0.136095
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0
0.666667
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
f9634b3fc173dcbff32d340a448316b1f290665f
164
py
Python
instructors/course-2015/functions_gens_and_ducks/examples/gen_lines_of_file.py
mgadagin/PythonClass
70b370362d75720b3fb0e1d6cc8158f9445e9708
[ "MIT" ]
46
2017-09-27T20:19:36.000Z
2020-12-08T10:07:19.000Z
instructors/course-2015/functions_gens_and_ducks/examples/gen_lines_of_file.py
mgadagin/PythonClass
70b370362d75720b3fb0e1d6cc8158f9445e9708
[ "MIT" ]
6
2018-01-09T08:07:37.000Z
2020-09-07T12:25:13.000Z
instructors/course-2015/functions_gens_and_ducks/examples/gen_lines_of_file.py
mgadagin/PythonClass
70b370362d75720b3fb0e1d6cc8158f9445e9708
[ "MIT" ]
18
2017-10-10T02:06:51.000Z
2019-12-01T10:18:13.000Z
"""utility functions to read in and parse a file efficiently """ def gen_file_line(text): with open(text) as fp: for line in fp: yield line
23.428571
60
0.634146
26
164
3.923077
0.769231
0
0
0
0
0
0
0
0
0
0
0
0.286585
164
7
61
23.428571
0.871795
0.347561
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0
0
0.25
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
f998d55860cb60330d4722550f0cd3c396cba071
527
py
Python
editor_api/helpers/executable_api.py
jphuart/swatplus-automatic-workflow
dd2eeb7f882eb2d4ab7e1e5265c10b9beb93ddc4
[ "MIT" ]
8
2020-06-28T07:50:29.000Z
2022-01-05T16:29:48.000Z
editor_api/helpers/executable_api.py
jphuart/swatplus-automatic-workflow
dd2eeb7f882eb2d4ab7e1e5265c10b9beb93ddc4
[ "MIT" ]
null
null
null
editor_api/helpers/executable_api.py
jphuart/swatplus-automatic-workflow
dd2eeb7f882eb2d4ab7e1e5265c10b9beb93ddc4
[ "MIT" ]
5
2020-06-28T07:50:31.000Z
2021-08-16T07:09:59.000Z
import json, sys class Unbuffered(object): def __init__(self, stream): self.stream = stream def write(self, data): self.stream.write(data) self.stream.flush() def writelines(self, datas): self.stream.writelines(datas) self.stream.flush() def __getattr__(self, attr): return getattr(self.stream, attr) class ExecutableApi: def emit_progress(self, percent, message): sys.stdout.write('\r\t-> percent: {0} - {1} '.format(str(percent).rjust(3), message)) sys.stdout.flush()
26.35
100
0.667932
69
527
4.971014
0.463768
0.204082
0.081633
0.104956
0
0
0
0
0
0
0
0.006993
0.185958
527
19
101
27.736842
0.792541
0
0
0.125
0
0
0.074144
0
0
0
0
0
0
1
0.3125
false
0
0.0625
0.0625
0.5625
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
3
f99b650e4e26f321485e30cd9938a83f3e79873d
404
py
Python
tests/beta_tests/test_grasshopper_if_else_syntax_debug.py
the-zebulan/CodeWars
1eafd1247d60955a5dfb63e4882e8ce86019f43a
[ "MIT" ]
40
2016-03-09T12:26:20.000Z
2022-03-23T08:44:51.000Z
tests/beta_tests/test_grasshopper_if_else_syntax_debug.py
akalynych/CodeWars
1eafd1247d60955a5dfb63e4882e8ce86019f43a
[ "MIT" ]
null
null
null
tests/beta_tests/test_grasshopper_if_else_syntax_debug.py
akalynych/CodeWars
1eafd1247d60955a5dfb63e4882e8ce86019f43a
[ "MIT" ]
36
2016-11-07T19:59:58.000Z
2022-03-31T11:18:27.000Z
import unittest from katas.kyu_8.grasshopper_if_else_syntax_debug import checkAlive class CheckAliveTestCase(unittest.TestCase): def test_true_1(self): self.assertTrue(checkAlive(1)) def test_true_2(self): self.assertTrue(checkAlive(3)) def test_false_1(self): self.assertFalse(checkAlive(-2)) def test_false_2(self): self.assertFalse(checkAlive(0))
22.444444
67
0.720297
53
404
5.245283
0.490566
0.100719
0.079137
0.201439
0
0
0
0
0
0
0
0.027273
0.183168
404
17
68
23.764706
0.815152
0
0
0
0
0
0
0
0
0
0
0
0.363636
1
0.363636
false
0
0.181818
0
0.636364
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
3
f9da27cb6e50b155cae8ccf9b2d900c966758ee9
284
py
Python
coursesessions/api/urls.py
s-wirth/thesis
a093dfb39a644b95509e2f5b25fbf3b2efa72545
[ "MIT" ]
null
null
null
coursesessions/api/urls.py
s-wirth/thesis
a093dfb39a644b95509e2f5b25fbf3b2efa72545
[ "MIT" ]
null
null
null
coursesessions/api/urls.py
s-wirth/thesis
a093dfb39a644b95509e2f5b25fbf3b2efa72545
[ "MIT" ]
null
null
null
from django.conf.urls import url from coursesessions.api.views import SessionListAPIView, SessionDetailAPIView urlpatterns = [ url(r'^$', SessionListAPIView.as_view(), name='session-list'), url(r'^(?P<pk>[0-9]+)/$', SessionDetailAPIView.as_view(), name='session-detail'), ]
31.555556
85
0.721831
34
284
5.970588
0.676471
0.039409
0.098522
0.167488
0
0
0
0
0
0
0
0.007843
0.102113
284
8
86
35.5
0.788235
0
0
0
0
0
0.158451
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
f9dd4554210e6bbb69453f81622bcccffb72124c
346
py
Python
main/controller/demo.py
nguyentranhoan/uit-mobile
8546312b01373d94cf00c64f7eacb769e0f4ccce
[ "BSD-3-Clause" ]
null
null
null
main/controller/demo.py
nguyentranhoan/uit-mobile
8546312b01373d94cf00c64f7eacb769e0f4ccce
[ "BSD-3-Clause" ]
null
null
null
main/controller/demo.py
nguyentranhoan/uit-mobile
8546312b01373d94cf00c64f7eacb769e0f4ccce
[ "BSD-3-Clause" ]
null
null
null
from common.controller import get, router, post from service.demo import DemoService @router('/demos', tags=['demo']) class DemoController: def __init__(self, demo_service: DemoService) -> None: super().__init__() self.demo_service = demo_service @get("/?") def list(self): return self.demo_service.list()
23.066667
58
0.67052
41
346
5.365854
0.512195
0.2
0.204545
0.172727
0
0
0
0
0
0
0
0
0.199422
346
14
59
24.714286
0.794224
0
0
0
0
0
0.034682
0
0
0
0
0
0
1
0.2
false
0
0.2
0.1
0.6
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
f9e49ed02fee4b9a0c608fb1f944e47bee3c6794
190
py
Python
helper/math/modular_exp.py
AlbertoDuarte/criptography
1778da9027d340c8aa96004258b2da0be5df22cd
[ "MIT" ]
null
null
null
helper/math/modular_exp.py
AlbertoDuarte/criptography
1778da9027d340c8aa96004258b2da0be5df22cd
[ "MIT" ]
null
null
null
helper/math/modular_exp.py
AlbertoDuarte/criptography
1778da9027d340c8aa96004258b2da0be5df22cd
[ "MIT" ]
null
null
null
def modular_exp(b, e, mod): if e == 0: return 1 res = modular_exp(b, e//2, mod) res = (res * res ) % mod if e%2 == 1: res = (res * b) % mod return res
15.833333
35
0.452632
32
190
2.625
0.375
0.214286
0.261905
0.285714
0
0
0
0
0
0
0
0.043478
0.394737
190
11
36
17.272727
0.686957
0
0
0
0
0
0
0
0
0
0
0
0
1
0.125
false
0
0
0
0.375
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
ddb5a838b7d0b0ef5054b93e5a6e6d6f3dcd2a20
214
py
Python
api/purepage/dbinit.py
guyskk/purepage
64e2180b751e787f9fe477f9b212b31c84d34cfb
[ "MIT" ]
13
2016-05-03T07:56:43.000Z
2019-08-03T05:58:58.000Z
api/purepage/dbinit.py
guyskk/purepage
64e2180b751e787f9fe477f9b212b31c84d34cfb
[ "MIT" ]
null
null
null
api/purepage/dbinit.py
guyskk/purepage
64e2180b751e787f9fe477f9b212b31c84d34cfb
[ "MIT" ]
3
2016-06-04T12:49:34.000Z
2019-04-24T08:51:34.000Z
import rethinkdb as r setups = [ r.db_create("purepage"), r.table_create("user"), r.table_create("article"), r.table("user").index_create("email"), r.table("article").index_create("author"), ]
21.4
46
0.640187
29
214
4.551724
0.482759
0.181818
0.181818
0
0
0
0
0
0
0
0
0
0.158879
214
9
47
23.777778
0.733333
0
0
0
0
0
0.191589
0
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
ddc387e07bc35d9e56baf7a3cf9d5e844877e550
43
py
Python
MovieRecEngine/__init__.py
MrR0b0t-23/MovieRecSys
09ac6edb15b58973b5affc52c7f566cf26e9df71
[ "MIT" ]
1
2021-04-03T08:41:23.000Z
2021-04-03T08:41:23.000Z
MovieRecEngine/__init__.py
MrR0b0t-23/MovieRecSys
09ac6edb15b58973b5affc52c7f566cf26e9df71
[ "MIT" ]
null
null
null
MovieRecEngine/__init__.py
MrR0b0t-23/MovieRecSys
09ac6edb15b58973b5affc52c7f566cf26e9df71
[ "MIT" ]
1
2021-06-23T04:14:12.000Z
2021-06-23T04:14:12.000Z
#version of module __version__ = "0.0.3"
14.333333
21
0.674419
7
43
3.571429
0.714286
0
0
0
0
0
0
0
0
0
0
0.085714
0.186047
43
3
21
14.333333
0.628571
0.395349
0
0
0
0
0.208333
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
ddefb967dca5431defbe3d5e1aee38f006934e58
8,667
py
Python
demo/main.py
ZiyangTian/tsp
b198a358d95389b34b04b694a6c7883cf21b53f0
[ "MIT" ]
null
null
null
demo/main.py
ZiyangTian/tsp
b198a358d95389b34b04b694a6c7883cf21b53f0
[ "MIT" ]
null
null
null
demo/main.py
ZiyangTian/tsp
b198a358d95389b34b04b694a6c7883cf21b53f0
[ "MIT" ]
null
null
null
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import problems.tspn as tspn import solvers.rkga as rkga import solvers.opt as opt def tsp_test(): # data_file = os.path.join('demo', 'problems', 'tsp_1.txt') # parameters = np.array(pd.read_csv(data_file, sep=' ')) # parameters = np.random.uniform(size=(30, 2)) # parameters = np.array([ # 0.607122, 0.664447, 0.953593, 0.021519, 0.757626, 0.921024, 0.586376, 0.433565, 0.786837, 0.052959, # 0.016088, 0.581436, 0.496714, 0.633571, 0.227777, 0.971433, 0.665490, 0.074331, 0.383556, 0.104392]) # parameters = np.reshape(parameters, (10, 2))# np.transpose(np.reshape(parameters, (2, 10))) # # # output = [1, 3, 8, 6, 10, 9, 5, 2, 4, 7, 1] # rank = [0, 2, 7, 5, 9, 8, 4, 1, 3, 6, 0] parameters = np.array([ 0.902421046498, 0.77621271719, 0.722727240262, 0.579024761326, 0.202222502748, 0.629848925738, 0.577683327113, 0.943735341041, 0.387801872815, 0.846231151452, 0.248295276407, 0.526557661494, 0.367962107849, 0.578749472622, 0.555896495344, 0.251653475865, 0.907255613781, 0.245249027563, 0.52544531979, 0.115256640047, 0.892999587695, 0.492378832333, 0.683712610357, 0.622637519591, 0.57168470001, 0.764729790149, 0.455321988383, 0.383651641103, 0.114378142586, 0.0686257141516, 0.880942807367, 0.565920630956, 0.0762609396025, 0.710417785386, 0.222822900992, 0.043125608507, 0.226433818255, 0.568887954037, 0.858631853949, 0.949648342561, 0.390812160258, 0.8303623789, 0.257238816645, 0.558283157573, 0.763412862654, 0.158490634265, 0.624987630978, 0.390107045565, 0.130862344781, 0.510372583137, 0.674928789702, 0.881428356182, 0.964366215304, 0.525331313649, 0.706494712117, 0.449096582924, 0.788732677484, 0.526778478004, 0.318321798568, 0.3545657325, 0.607914569902, 0.670640404295, 0.070789142522, 0.556820580678, 0.809475252633, 0.394581642841, 0.317719793413, 0.870039653289, 0.375853198069, 0.19272404987, 0.342342952722, 0.821914770523, 0.589592002558, 0.210835572805, 0.748900316792, 0.84695495136, 0.516684883833, 0.801904939729, 0.508203739261, 0.432345157039, 0.936113150967, 0.636238987707, 0.39863340474, 0.0702509602592, 0.208057203091, 0.947559363155, 0.0211812129964, 0.352147795879, 0.674019523967, 0.0999767914088, 0.380454813888, 0.440897245531, 0.0485243910601, 0.862506649068, 0.620546948127, 0.545954375688, 0.514113046765, 0.66313845183, 0.350561488721, 0.000114044320017]) parameters = np.reshape(parameters, (50, 2)) rank = np.array([ 1, 41, 27, 11, 16, 29, 2, 12, 31, 49, 13, 39, 5, 21, 36, 34, 43, 47, 17, 44, 32, 25, 3, 19, 22, 6, 7, 48, 28, 33, 9, 23, 45, 10, 37, 8, 24, 40, 14, 46, 30, 35, 42, 50, 18, 15, 4, 26, 38, 20, 1]) - 1 neighbors = tspn.EllipsoidTSPN(parameters) solver = rkga.TSPSolver(population_size=10000) solver.compile(neighbors, traceback=True, use_cuda=True) # print(solver.solve(1000, 100)) # exit() max_num_generations = 300 for g in range(1, max_num_generations + 1): solver.evolute() objective = solver.optimal.objective if np.isnan(objective): break print(g, objective) optimal_rank = solver.optimal.rank optimal_rank = optimal_rank + [optimal_rank[0]] plt.clf() plt.close() plt.figure() plt.plot( neighbors.parameters[:, 0], neighbors.parameters[:, 1], 'ro', color='red') plt.plot( neighbors.parameters[:, 0][optimal_rank], neighbors.parameters[:, 1][optimal_rank], 'r-', color='blue') # plt.plot( # neighbors.parameters[:, 0][rank], neighbors.parameters[:, 1][rank], 'r-', color='green') plt.pause(0.001) print(solver.optimal) waypoints = neighbors.parameters[rank] print('using...') optimal_rank_opt = opt.opt2_search(neighbors.parameters, optimal_rank) plt.clf() plt.close() plt.figure() plt.plot( neighbors.parameters[:, 0], neighbors.parameters[:, 1], 'ro', color='red') plt.plot( neighbors.parameters[:, 0][optimal_rank_opt], neighbors.parameters[:, 1][optimal_rank_opt], 'r-', color='blue') plt.ioff() plt.show() def opt_test(): parameters = np.array([ 0.902421046498, 0.77621271719, 0.722727240262, 0.579024761326, 0.202222502748, 0.629848925738, 0.577683327113, 0.943735341041, 0.387801872815, 0.846231151452, 0.248295276407, 0.526557661494, 0.367962107849, 0.578749472622, 0.555896495344, 0.251653475865, 0.907255613781, 0.245249027563, 0.52544531979, 0.115256640047, 0.892999587695, 0.492378832333, 0.683712610357, 0.622637519591, 0.57168470001, 0.764729790149, 0.455321988383, 0.383651641103, 0.114378142586, 0.0686257141516, 0.880942807367, 0.565920630956, 0.0762609396025, 0.710417785386, 0.222822900992, 0.043125608507, 0.226433818255, 0.568887954037, 0.858631853949, 0.949648342561, 0.390812160258, 0.8303623789, 0.257238816645, 0.558283157573, 0.763412862654, 0.158490634265, 0.624987630978, 0.390107045565, 0.130862344781, 0.510372583137, 0.674928789702, 0.881428356182, 0.964366215304, 0.525331313649, 0.706494712117, 0.449096582924, 0.788732677484, 0.526778478004, 0.318321798568, 0.3545657325, 0.607914569902, 0.670640404295, 0.070789142522, 0.556820580678, 0.809475252633, 0.394581642841, 0.317719793413, 0.870039653289, 0.375853198069, 0.19272404987, 0.342342952722, 0.821914770523, 0.589592002558, 0.210835572805, 0.748900316792, 0.84695495136, 0.516684883833, 0.801904939729, 0.508203739261, 0.432345157039, 0.936113150967, 0.636238987707, 0.39863340474, 0.0702509602592, 0.208057203091, 0.947559363155, 0.0211812129964, 0.352147795879, 0.674019523967, 0.0999767914088, 0.380454813888, 0.440897245531, 0.0485243910601, 0.862506649068, 0.620546948127, 0.545954375688, 0.514113046765, 0.66313845183, 0.350561488721, 0.000114044320017]) parameters = np.reshape(parameters, (50, 2)) problem = tspn.TSP(parameters, traceback=True) solver = rkga.TSPSolver(population_size=10000) solver.compile(problem, use_cuda=False) solution = solver.solve(1000, 100) print(solution) optimal_rank = solver.optimal.rank optimal_rank = optimal_rank + [optimal_rank[0]] plt.figure() plt.plot(parameters[:, 0], parameters[:, 1], 'ro', color='red') plt.plot(parameters[:, 0][optimal_rank + [optimal_rank[0]]], parameters[:, 1][optimal_rank + [optimal_rank[0]]], 'r-', color='blue') optimal_rank_opt, optimal_journey_opt = opt.opt2_search(parameters, optimal_rank) print('optimal_journey_opt=%f' % optimal_journey_opt) plt.figure() plt.plot(parameters[:, 0], parameters[:, 1], 'ro', color='red') plt.plot( parameters[:, 0][optimal_rank_opt], parameters[:, 1][optimal_rank_opt], 'r-', color='green') plt.ioff() plt.show() def tspn_test(): neighbors = tspn.EllipsoidTSPN.from_randomly_generated( (20,), [0.]*6, [1., 1., 1., 0.1, 0.1, 0.1]) nodes = tspn.TSP(neighbors.parameters[:, :3]) tsp_solver = rkga.TSPSolver(population_size=10000) tsp_solver.compile(nodes, use_cuda=False) tspn_solver = rkga.TSPNSolver(population_size=300, opt2_prop=0.1) tspn_solver.compile(neighbors, use_cuda=False) print(tsp_solver.solve(1000).objective) print(tspn_solver.solve(1000).objective) for _ in range(5): tspn_solver.initialize() print(tspn_solver.solve(1000).objective) def data_test(): parameters_file = '/Users/Tianziyang/Desktop/data/tsp/3.parameters' rank_file = '/Users/Tianziyang/Desktop/data/tsp/3.rank' parameters = np.array(pd.read_csv(parameters_file, header=None)) rank = list(np.array(pd.read_csv(rank_file, header=None))[0]) rank = rank + [rank[0]] neighbors = tspn.EllipsoidTSPN(parameters) plt.figure() plt.plot( neighbors.parameters[:, 0], neighbors.parameters[:, 1], 'ro', color='red') plt.plot( neighbors.parameters[:, 0][rank], neighbors.parameters[:, 1][rank], 'r-', color='blue') print('...') # plt.plot( # neighbors.parameters[:, 0][rank], neighbors.parameters[:, 1][rank], 'r-', color='green') plt.pause(0.1) waypoints = neighbors.parameters[rank] print(np.sqrt(np.square((waypoints[1:] - waypoints[:-1])).sum(-1)).sum()) plt.ioff() plt.show() if __name__ == '__main__': import data.generator data.generator.main()
46.596774
119
0.673705
1,109
8,667
5.191163
0.251578
0.045857
0.025013
0.030571
0.709571
0.672746
0.645649
0.623068
0.60535
0.60535
0
0.418258
0.178493
8,667
185
120
46.848649
0.390309
0.09415
0
0.572414
0
0
0.023621
0.014045
0
0
0
0
0
1
0.027586
false
0
0.055172
0
0.082759
0.068966
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
fb1a54d1e2d3dfebc90291c5e0d1d169089d4524
448
py
Python
firstapp/migrations/0010_auto_20220112_1420.py
vashuteotia123/SDSGems
8c0cf0d4fe4458f727a27d64804de3201b6b1c87
[ "MIT" ]
1
2022-02-06T13:00:05.000Z
2022-02-06T13:00:05.000Z
firstapp/migrations/0010_auto_20220112_1420.py
vashuteotia123/SDSGems
8c0cf0d4fe4458f727a27d64804de3201b6b1c87
[ "MIT" ]
null
null
null
firstapp/migrations/0010_auto_20220112_1420.py
vashuteotia123/SDSGems
8c0cf0d4fe4458f727a27d64804de3201b6b1c87
[ "MIT" ]
null
null
null
# Generated by Django 3.0.5 on 2022-01-12 08:50 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('firstapp', '0009_auto_20220109_2022'), ] operations = [ migrations.RemoveField( model_name='salesreturn', name='amount', ), migrations.RemoveField( model_name='salesreturn', name='center_stone', ), ]
20.363636
48
0.575893
43
448
5.860465
0.72093
0.166667
0.206349
0.238095
0.357143
0.357143
0
0
0
0
0
0.100977
0.314732
448
21
49
21.333333
0.71987
0.100446
0
0.4
1
0
0.177057
0.057357
0
0
0
0
0
1
0
false
0
0.066667
0
0.266667
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
fb20a790c8fd091da659646d73f97e41353fcc03
1,079
py
Python
riskquant/model/poisson_frequency.py
confusedcrib/riskquant
27c46f8e6df62ba7faac27ed6d5745b5026fd9a5
[ "Apache-2.0" ]
567
2020-01-31T18:02:38.000Z
2022-03-24T14:36:39.000Z
riskquant/model/poisson_frequency.py
aspat20/riskquant
7321c880f4a3e13adb10c848199b52175d8d6999
[ "Apache-2.0" ]
14
2020-02-04T15:35:33.000Z
2020-11-04T20:45:30.000Z
riskquant/model/poisson_frequency.py
aspat20/riskquant
7321c880f4a3e13adb10c848199b52175d8d6999
[ "Apache-2.0" ]
49
2020-02-01T02:22:10.000Z
2022-03-31T21:28:09.000Z
"""A Poisson model suitable for frequency. Returns an array of ints. """ # Copyright 2019-2020 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np class PoissonFrequency(object): def __init__(self, frequency): """:param frequency = Mean rate per interval""" if frequency < 0: raise AssertionError("Frequency must be non-negative.") self.frequency = frequency def draw(self, n=1): return np.random.poisson(self.frequency, n) def mean(self): return self.frequency
32.69697
76
0.696942
150
1,079
4.986667
0.646667
0.080214
0.034759
0.042781
0
0
0
0
0
0
0
0.016667
0.221501
1,079
32
77
33.71875
0.87381
0.63392
0
0
0
0
0.083558
0
0
0
0
0
0.1
1
0.3
false
0
0.1
0.2
0.7
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
fb2af56963c1ad9298c94e031abe591ba698b168
64
py
Python
src/ConfigService/__init__.py
christopher-henderson/Tweeter
9acf716821b0c799e41acc77a800961343715370
[ "MIT" ]
null
null
null
src/ConfigService/__init__.py
christopher-henderson/Tweeter
9acf716821b0c799e41acc77a800961343715370
[ "MIT" ]
1
2015-04-25T17:54:07.000Z
2015-04-25T17:54:26.000Z
src/ConfigService/__init__.py
christopher-henderson/Tweeter
9acf716821b0c799e41acc77a800961343715370
[ "MIT" ]
null
null
null
from ConfigService import ConfigService Config = ConfigService()
32
39
0.859375
6
64
9.166667
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.09375
64
2
40
32
0.948276
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
fb2b62529199fd8060d49d3d1a33c9e1c4b1288e
20
py
Python
smos_tools/__init__.py
ARGANS/smos-tools
4a0e5bb54fdef52725e30f14a6a971ebeeb71881
[ "MIT" ]
2
2019-04-10T08:40:12.000Z
2019-08-15T01:38:31.000Z
smos_tools/__init__.py
ARGANS/smos-tools
4a0e5bb54fdef52725e30f14a6a971ebeeb71881
[ "MIT" ]
null
null
null
smos_tools/__init__.py
ARGANS/smos-tools
4a0e5bb54fdef52725e30f14a6a971ebeeb71881
[ "MIT" ]
1
2019-08-03T05:29:30.000Z
2019-08-03T05:29:30.000Z
name = "smos_tools"
10
19
0.7
3
20
4.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.15
20
1
20
20
0.764706
0
0
0
0
0
0.5
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
34bc8cbd46ff35b772ab2afc492bfc0727b94fcb
63
py
Python
notebooks/solutions/visualize_00.py
cpaniaguam/pandas-head-to-tail
c809b6ae5834057c51006ecc908266e6d5d05b15
[ "CC-BY-4.0" ]
88
2016-12-29T06:49:10.000Z
2022-03-19T20:37:27.000Z
notebooks/solutions/visualize_00.py
paritosh666/pandas-head-to-tail
891a72ea5a21f8e0c8f6a6d22c03a1de26a6f30b
[ "CC-BY-4.0" ]
8
2018-06-17T21:47:27.000Z
2018-07-11T22:31:17.000Z
notebooks/solutions/visualize_00.py
paritosh666/pandas-head-to-tail
891a72ea5a21f8e0c8f6a6d22c03a1de26a6f30b
[ "CC-BY-4.0" ]
76
2016-12-30T08:56:28.000Z
2022-02-27T08:05:26.000Z
sns.factorplot('embarked', data=t, kind="count", hue="class");
31.5
62
0.68254
9
63
4.777778
1
0
0
0
0
0
0
0
0
0
0
0
0.063492
63
1
63
63
0.728814
0
0
0
0
0
0.285714
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
34bd1a095999687dfdfdef9f3d052398d7dd9fea
297
py
Python
api/v0/const.py
xmm/fstore
ad6afe96a455619d5c3db7a2b485d4e3281c18d2
[ "BSD-3-Clause" ]
1
2015-09-10T04:25:03.000Z
2015-09-10T04:25:03.000Z
api/v0/const.py
xmm/fstore
ad6afe96a455619d5c3db7a2b485d4e3281c18d2
[ "BSD-3-Clause" ]
1
2017-12-01T04:53:47.000Z
2017-12-01T04:53:47.000Z
api/v0/const.py
xmm/fstore
ad6afe96a455619d5c3db7a2b485d4e3281c18d2
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- ''' Copyright (c) 2014 @author: Marat Khayrullin <xmm.dev@gmail.com> ''' API_VERSION_V0 = 0 API_VERSION = API_VERSION_V0 bp_name = 'api_v0' api_v0_prefix = '{prefix}/v{version}'.format( prefix='/api', # current_app.config['URL_PREFIX'], version=API_VERSION_V0 )
19.8
55
0.680135
44
297
4.295455
0.590909
0.21164
0.190476
0.201058
0
0
0
0
0
0
0
0.043137
0.141414
297
14
56
21.214286
0.698039
0.407407
0
0
0
0
0.173653
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
34d0904ccf466f373b1a0f29b6eeb9412f52ac07
190
py
Python
blackhathacker.py
bob34-byte/pytube
30d1e1d336b3b896c15f2ef1be5432dd92441f0e
[ "MIT" ]
null
null
null
blackhathacker.py
bob34-byte/pytube
30d1e1d336b3b896c15f2ef1be5432dd92441f0e
[ "MIT" ]
null
null
null
blackhathacker.py
bob34-byte/pytube
30d1e1d336b3b896c15f2ef1be5432dd92441f0e
[ "MIT" ]
null
null
null
import pygame pygame.init() screen = pygame.display.set_mode(500,500) running = True while running: for event in pygame.event.get(): if event.type = pygame.QUIT: running = False
21.111111
41
0.710526
28
190
4.785714
0.678571
0
0
0
0
0
0
0
0
0
0
0.038462
0.178947
190
8
42
23.75
0.820513
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0.125
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
34f58139b1263cdf13af270de6bb46d2a11ba19c
273
py
Python
test/fixtures.py
pmatiello/assemblage
a508b354e3a90f286e4e79e420e7d83478fc8cef
[ "MIT" ]
1
2017-12-22T21:47:48.000Z
2017-12-22T21:47:48.000Z
test/fixtures.py
pmatiello/assemblage
a508b354e3a90f286e4e79e420e7d83478fc8cef
[ "MIT" ]
null
null
null
test/fixtures.py
pmatiello/assemblage
a508b354e3a90f286e4e79e420e7d83478fc8cef
[ "MIT" ]
null
null
null
class no_deps(object): pass class one_dep(object): def __init__(self, dependency): self.dependency = dependency class two_deps(object): def __init__(self, first_dep, second_dep): self.first_dep = first_dep self.second_dep = second_dep
22.75
46
0.688645
37
273
4.621622
0.378378
0.140351
0.152047
0.19883
0
0
0
0
0
0
0
0
0.223443
273
11
47
24.818182
0.806604
0
0
0
0
0
0
0
0
0
0
0
0
1
0.222222
false
0.111111
0
0
0.555556
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
3
34f5cba36d6d70e3b0dd6e5a05062754009044dc
47
py
Python
pytype/__version__.py
priyansh19/pytype
44b1f6f7cddccb326abac4c21b4f26688369764e
[ "Apache-2.0" ]
2
2019-07-25T12:53:02.000Z
2019-08-18T16:26:16.000Z
pytype/__version__.py
priyansh19/pytype
44b1f6f7cddccb326abac4c21b4f26688369764e
[ "Apache-2.0" ]
null
null
null
pytype/__version__.py
priyansh19/pytype
44b1f6f7cddccb326abac4c21b4f26688369764e
[ "Apache-2.0" ]
null
null
null
# pylint: skip-file __version__ = '2019.07.11'
15.666667
26
0.702128
7
47
4.142857
1
0
0
0
0
0
0
0
0
0
0
0.195122
0.12766
47
2
27
23.5
0.512195
0.361702
0
0
0
0
0.357143
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
5519a5af8a94ff074421e2feee72e2140be2a703
8,134
py
Python
scenario_runner/srunner/tools/background_manager.py
neilsambhu/carla-simulator3
9838cd12c865950088d33a931f75424da0077fc7
[ "CNRI-Python", "RSA-MD" ]
null
null
null
scenario_runner/srunner/tools/background_manager.py
neilsambhu/carla-simulator3
9838cd12c865950088d33a931f75424da0077fc7
[ "CNRI-Python", "RSA-MD" ]
null
null
null
scenario_runner/srunner/tools/background_manager.py
neilsambhu/carla-simulator3
9838cd12c865950088d33a931f75424da0077fc7
[ "CNRI-Python", "RSA-MD" ]
null
null
null
#!/usr/bin/env python # # This work is licensed under the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. """ Several atomic behaviors to help with the communication with the background activity, removing its interference with other scenarios """ import py_trees from srunner.scenariomanager.scenarioatomics.atomic_behaviors import AtomicBehavior class RoadBehaviorManager(AtomicBehavior): """ Updates the blackboard to change the parameters of the road behavior. None values imply that these values won't be changed Args: num_front_vehicles (int): Amount of vehicles in front of the ego. Can't be negative num_back_vehicles (int): Amount of vehicles behind it. Can't be negative vehicle_dist (float): Minimum distance between the road vehicles. Must between 0 and 'spawn_dist' spawn_dist (float): Minimum distance between spawned vehicles. Must be positive """ def __init__(self, num_front_vehicles=None, num_back_vehicles=None, vehicle_dist=None, spawn_dist=None, name="RoadBehaviorManager"): self._num_front_vehicles = num_front_vehicles self._num_back_vehicles = num_back_vehicles self._vehicle_dist = vehicle_dist self._spawn_dist = spawn_dist super(RoadBehaviorManager, self).__init__(name) def update(self): py_trees.blackboard.Blackboard().set( "BA_RoadBehavior", [self._num_front_vehicles, self._num_back_vehicles, self._vehicle_dist, self._spawn_dist], overwrite=True ) return py_trees.common.Status.SUCCESS class OppositeBehaviorManager(AtomicBehavior): """ Updates the blackboard to change the parameters of the opposite road behavior. None values imply that these values won't be changed Args: source_dist (float): Distance between the opposite sources and the ego vehicle. Must be positive vehicle_dist (float) Minimum distance between the opposite vehicles. Must between 0 and 'spawn_dist' spawn_dist (float): Minimum distance between spawned vehicles. Must be positive max_actors (int): Max amount of concurrent alive actors spawned by the same source. Can't be negative """ def __init__(self, source_dist=None, vehicle_dist=None, spawn_dist=None, max_actors=None, name="OppositeBehaviorManager"): self._source_dist = source_dist self._vehicle_dist = vehicle_dist self._spawn_dist = spawn_dist self._max_actors = max_actors super(OppositeBehaviorManager, self).__init__(name) def update(self): py_trees.blackboard.Blackboard().set( "BA_OppositeBehavior", [self._source_dist, self._vehicle_dist, self._spawn_dist, self._max_actors], overwrite=True ) return py_trees.common.Status.SUCCESS class JunctionBehaviorManager(AtomicBehavior): """ Updates the blackboard to change the parameters of the junction behavior. None values imply that these values won't be changed Args: source_dist (float): Distance between the junctiob sources and the junction entry. Must be positive vehicle_dist (float) Minimum distance between the junction vehicles. Must between 0 and 'spawn_dist' spawn_dist (float): Minimum distance between spawned vehicles. Must be positive max_actors (int): Max amount of concurrent alive actors spawned by the same source. Can't be negative """ def __init__(self, source_dist=None, vehicle_dist=None, spawn_dist=None, max_actors=None, name="JunctionBehaviorManager"): self._source_dist = source_dist self._vehicle_dist = vehicle_dist self._spawn_dist = spawn_dist self._max_actors = max_actors super(JunctionBehaviorManager, self).__init__(name) def update(self): py_trees.blackboard.Blackboard().set( "BA_JunctionBehavior", [self._source_dist, self._vehicle_dist, self._spawn_dist, self._max_actors], overwrite=True ) return py_trees.common.Status.SUCCESS class Scenario2Manager(AtomicBehavior): """ Updates the blackboard to tell the background activity that a Scenario2 has to be triggered. 'stop_duration' is the amount of time, in seconds, the vehicles will be stopped """ def __init__(self, stop_duration=10, name="Scenario2Manager"): self._stop_duration = stop_duration super(Scenario2Manager, self).__init__(name) def update(self): py_trees.blackboard.Blackboard().set("BA_Scenario2", self._stop_duration, overwrite=True) return py_trees.common.Status.SUCCESS class Scenario4Manager(AtomicBehavior): """ Updates the blackboard to tell the background activity that a Scenario4 has been triggered. 'crossing_dist' is the distance between the crossing actor and the junction """ def __init__(self, crossing_dist=10, name="Scenario4Manager"): self._crossing_dist = crossing_dist super(Scenario4Manager, self).__init__(name) def update(self): """Updates the blackboard and succeds""" py_trees.blackboard.Blackboard().set("BA_Scenario4", self._crossing_dist, overwrite=True) return py_trees.common.Status.SUCCESS class Scenario7Manager(AtomicBehavior): """ Updates the blackboard to tell the background activity that a Scenario7 has been triggered 'entry_direction' is the direction from which the incoming traffic enters the junction. It should be something like 'left', 'right' or 'opposite' """ def __init__(self, entry_direction, name="Scenario7Manager"): self._entry_direction = entry_direction super(Scenario7Manager, self).__init__(name) def update(self): """Updates the blackboard and succeds""" py_trees.blackboard.Blackboard().set("BA_Scenario7", self._entry_direction, overwrite=True) return py_trees.common.Status.SUCCESS class Scenario8Manager(AtomicBehavior): """ Updates the blackboard to tell the background activity that a Scenario8 has been triggered 'entry_direction' is the direction from which the incoming traffic enters the junction. It should be something like 'left', 'right' or 'opposite' """ def __init__(self, entry_direction, name="Scenario8Manager"): self._entry_direction = entry_direction super(Scenario8Manager, self).__init__(name) def update(self): """Updates the blackboard and succeds""" py_trees.blackboard.Blackboard().set("BA_Scenario8", self._entry_direction, overwrite=True) return py_trees.common.Status.SUCCESS class Scenario9Manager(AtomicBehavior): """ Updates the blackboard to tell the background activity that a Scenario9 has been triggered 'entry_direction' is the direction from which the incoming traffic enters the junction. It should be something like 'left', 'right' or 'opposite' """ def __init__(self, entry_direction, name="Scenario9Manager"): self._entry_direction = entry_direction super(Scenario9Manager, self).__init__(name) def update(self): """Updates the blackboard and succeds""" py_trees.blackboard.Blackboard().set("BA_Scenario9", self._entry_direction, overwrite=True) return py_trees.common.Status.SUCCESS class Scenario10Manager(AtomicBehavior): """ Updates the blackboard to tell the background activity that a Scenario10 has been triggered 'entry_direction' is the direction from which the incoming traffic enters the junction. It should be something like 'left', 'right' or 'opposite' """ def __init__(self, entry_direction, name="Scenario10Manager"): self._entry_direction = entry_direction super(Scenario10Manager, self).__init__(name) def update(self): """Updates the blackboard and succeds""" py_trees.blackboard.Blackboard().set("BA_Scenario10", self._entry_direction, overwrite=True) return py_trees.common.Status.SUCCESS
40.467662
109
0.715884
998
8,134
5.594188
0.154309
0.050152
0.050152
0.054809
0.73079
0.721118
0.687623
0.66201
0.66201
0.653054
0
0.006506
0.206295
8,134
200
110
40.67
0.858271
0.403246
0
0.47191
0
0
0.06302
0.010066
0
0
0
0
0
1
0.202247
false
0
0.022472
0
0.426966
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
5521e7fb95205e5d9fdd79ded765033334c9c80e
3,258
py
Python
huaban/get_boards_url.py
kernel1994/simple_spider
5333d6ff7c20c805f528d26040c1d78b18c326db
[ "MIT" ]
null
null
null
huaban/get_boards_url.py
kernel1994/simple_spider
5333d6ff7c20c805f528d26040c1d78b18c326db
[ "MIT" ]
null
null
null
huaban/get_boards_url.py
kernel1994/simple_spider
5333d6ff7c20c805f528d26040c1d78b18c326db
[ "MIT" ]
1
2019-01-17T02:42:33.000Z
2019-01-17T02:42:33.000Z
# coding:utf-8 # author: gaozhengjie # E-mail: gaozhengj@foxmail.com # Home: https://www.gaozhengjie.cn/ # Desc: Save the urls of id (category directory) of a user in huaban.com to the local urls.txt file. import requests import json # The header is very important and interesting, so be sure to set it up, otherwise you won't get what you want. headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36', 'accept': '*/*', 'cookie':'_uab_collina=155757332922427333827892; sid=8rdXlP7boQ0UouNPum0Wrq9YtqB.tuHW4kTKV%2BaD%2B1Xvn82ubiIaSiJMWJWfCcZRJtaQUlY; _f=iVBORw0KGgoAAAANSUhEUgAAADIAAAAUCAYAAADPym6aAAABJElEQVRYR%2B1VOxYCIQwMF7KzsvFGXmW9kY2VnQfxCvgCRmfzCD9lnz53myWQAJOZBEfeeyIi7xz%2FyEXzZRPFhYbPc3hHXO6I6TbFixmfEyByeQQSxu6BcAXSkIGMazMjuBcz8pQcq44o0Iuyyc1p38C62kNsOdeSZDOQlLRQ80uOMalDgWCGMfsW2B5%2FATMUyGh2uhgptV9Ly6l5nNOa1%2F6zmjTqkH2aGEk2jY72%2B5k%2BNd9lBfLMh8GIP11iK95vw8uv7RQr4oNxOfbQ%2F7g5Z4meveyt0uKDEIiMLRC4jrG1%2FjkwKxCRE2e5lF30leyXYvQ628MZKV3q64HUFvnPAMkVuSWlEouLSiuV6dp2WtPBrPZ7uO5I18tbXWvEC27t%2BTcv%2Bx0JuJAoUm2L%2FQAAAABJRU5ErkJggg%3D%3D%2CWin32.1366.768.24; Hm_lvt_d4a0e7c3cd16eb58a65472f40e7ee543=1557573215; UM_distinctid=16aa699246245a-0ae9968fda6da2-5a442916-100200-16aa69924636a9; __auc=661f053016aa699274d8cb53e0a; __asc=1d94eed716aa7143ffd3169df19; CNZZDATA1256903590=1618668537-1557570594-%7C1557581395; _cnzz_CV1256903590=is-logon%7Clogged-out%7C1557582065027; Hm_lpvt_d4a0e7c3cd16eb58a65472f40e7ee543=1557582064', 'accept-encoding':'gzip', 'accept-language':'zh-CN,zh;q=0.9,en;q=0.8', 'referer':'https://huaban.com/qe43fqwuht/', 'Content-Type':'application/json; charset=utf-8', 'X-Request':'JSON', 'X-Requested-With':'XMLHttpRequest' } # Unfortunately, I didn't find the rules inside. This is the URL I manually filtered. # I will improve it if there is a chance. # qe43fqwuht is the id of the user who I want to crwal. url_list = [ 'https://huaban.com/qe43fqwuht/', 'https://huaban.com/qe43fqwuht/?jvjjofqw&limit=10&wfl=1&max=30595361', 'https://huaban.com/qe43fqwuht/?jvjjofqx&limit=10&wfl=1&max=32147297', 'https://huaban.com/qe43fqwuht/?jvjjofqy&limit=10&wfl=1&max=18879356', 'https://huaban.com/qe43fqwuht/?jvjjofqz&limit=10&wfl=1&max=34344656', 'https://huaban.com/qe43fqwuht/?jvjjofr0&limit=10&wfl=1&max=38524174', 'https://huaban.com/qe43fqwuht/?jvjjofr1&limit=10&wfl=1&max=18177330', 'https://huaban.com/qe43fqwuht/?jvjjofr2&limit=10&wfl=1&max=39828976', 'https://huaban.com/qe43fqwuht/?jvjjofr3&limit=10&wfl=1&max=42824839', 'https://huaban.com/qe43fqwuht/?jvjjofr4&limit=10&wfl=1&max=51987919' ] board_id_list = [] for each_page in url_list: res = requests.get(each_page, headers=headers) for each_id in json.loads(res.text)['user']['boards']: print(each_id['board_id'], each_id['title']) board_id_list.append(each_id['board_id']) with open('urls.txt', 'w', encoding='utf-8') as fp: for i in board_id_list: fp.write('https://huaban.com/boards/' + str(i) + '/\n') with open('urls.txt', 'r', encoding='utf-8') as fp: urls = fp.readlines()
66.489796
1,022
0.745242
400
3,258
5.995
0.5125
0.048791
0.070058
0.110092
0.065888
0
0
0
0
0
0
0.173352
0.120012
3,258
49
1,023
66.489796
0.663062
0.148557
0
0
0
0.314286
0.749724
0.373942
0
0
0
0
0
1
0
false
0
0.057143
0
0.057143
0.028571
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
552393990bc3283a72724fc1fc2cdb860c675261
231
py
Python
src/visu/__init__.py
JonasFrey96/PLR2
a0498e6ff283a27c6db11b3d57d3b3100026f069
[ "MIT" ]
null
null
null
src/visu/__init__.py
JonasFrey96/PLR2
a0498e6ff283a27c6db11b3d57d3b3100026f069
[ "MIT" ]
2
2020-06-30T17:33:54.000Z
2020-07-07T18:12:21.000Z
src/visu/__init__.py
JonasFrey96/PLR2
a0498e6ff283a27c6db11b3d57d3b3100026f069
[ "MIT" ]
null
null
null
from .helper_functions import save_image from .visualizer import Visualizer, plot_pcd, plot_two_pcd, SequenceVisualizer __all__ = ( 'Visualizer', 'save_image', 'plot_pcd', 'plot_two_pcd', 'SequenceVisualizer' )
23.1
78
0.731602
26
231
6
0.461538
0.115385
0.141026
0.179487
0.448718
0.448718
0
0
0
0
0
0
0.17316
231
9
79
25.666667
0.816754
0
0
0
0
0
0.251082
0
0
0
0
0
0
1
0
false
0
0.222222
0
0.222222
0
1
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
9b277ebded23099d56ac5500c9da3f55fe644859
808
py
Python
src/Rumor/models.py
HarleyCody/rumor
9642f2fcb19c2dd486e7a18258f9d1948fa1f8b2
[ "bzip2-1.0.6" ]
null
null
null
src/Rumor/models.py
HarleyCody/rumor
9642f2fcb19c2dd486e7a18258f9d1948fa1f8b2
[ "bzip2-1.0.6" ]
6
2020-06-05T20:55:19.000Z
2021-09-22T18:32:17.000Z
src/Rumor/models.py
HarleyCody/rumor
9642f2fcb19c2dd486e7a18258f9d1948fa1f8b2
[ "bzip2-1.0.6" ]
null
null
null
from django.db import models from RumorValidator import settings # Create your models here. class RumorManager(models.Manager): def all(self, *args, **kwargs): qs = super(RumorManager, self).all(*args, **kwargs).order_by("timestamp") return qs class Rumor(models.Model): rumor = models.CharField(max_length=200, null=True) isReal = models.BooleanField(default=True) province = models.CharField(max_length=4, default = 'CN', choices=settings.PROVINCE_CHOICES) timestamp = models.DateTimeField(auto_now_add=True, null=True) objects = RumorManager() def save(self, *args, **kwargs): super(Rumor, self).save(*args, **kwargs) def __str__(self): return str(self.rumor) def __unicode__(self): return str(self.rumor)
29.925926
99
0.675743
99
808
5.373737
0.474747
0.075188
0.052632
0.090226
0.082707
0
0
0
0
0
0
0.006202
0.201733
808
26
100
31.076923
0.818605
0.029703
0
0.111111
0
0
0.014085
0
0
0
0
0
0
1
0.222222
false
0
0.111111
0.111111
0.888889
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
9b2d16d63bb36dfab3fb46ea12bed9e7db395327
431
py
Python
lib/PyAMF-0.6.1/pyamf/adapters/_sqlalchemy_orm_collections.py
MiCHiLU/google_appengine_sdk
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
[ "Apache-2.0" ]
790
2015-01-03T02:13:39.000Z
2020-05-10T19:53:57.000Z
AppServer/lib/PyAMF-0.6.1/pyamf/adapters/_sqlalchemy_orm_collections.py
nlake44/appscale
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
[ "Apache-2.0" ]
1,361
2015-01-08T23:09:40.000Z
2020-04-14T00:03:04.000Z
AppServer/lib/PyAMF-0.6.1/pyamf/adapters/_sqlalchemy_orm_collections.py
nlake44/appscale
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
[ "Apache-2.0" ]
155
2015-01-08T22:59:31.000Z
2020-04-08T08:01:53.000Z
# Copyright (c) The PyAMF Project. # See LICENSE for details. """ SQLAlchemy adapter module. @see: U{SQLAlchemy homepage<http://www.sqlalchemy.org>} @since: 0.4 """ from sqlalchemy.orm import collections import pyamf from pyamf.adapters import util pyamf.add_type(collections.InstrumentedList, util.to_list) pyamf.add_type(collections.InstrumentedDict, util.to_dict) pyamf.add_type(collections.InstrumentedSet, util.to_set)
20.52381
58
0.788863
59
431
5.661017
0.59322
0.071856
0.107784
0.206587
0
0
0
0
0
0
0
0.005168
0.102088
431
20
59
21.55
0.857881
0.359629
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
9b4051578dfd2b47680640a4661284413bb9fd25
46
py
Python
src/aws_assume_role/version.py
nirmesika/onelogin-sec
cfc5496b1872926375778cf0df38526503839122
[ "MIT" ]
null
null
null
src/aws_assume_role/version.py
nirmesika/onelogin-sec
cfc5496b1872926375778cf0df38526503839122
[ "MIT" ]
null
null
null
src/aws_assume_role/version.py
nirmesika/onelogin-sec
cfc5496b1872926375778cf0df38526503839122
[ "MIT" ]
null
null
null
#! /usr/bin/env python __version__ = '1.6.1'
11.5
22
0.630435
8
46
3.125
0.875
0
0
0
0
0
0
0
0
0
0
0.076923
0.152174
46
3
23
15.333333
0.564103
0.456522
0
0
0
0
0.208333
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
9b63bb833cbad1d58fe8003a42b63d1ef1bf46ef
75,437
py
Python
krysztalki/workDir/proba/tabliczka mnozenia/symetrie_generowane.py
woblob/Crystal_Symmetry
be2984b4487d6075986ef60822a347d0b0e6b885
[ "MIT" ]
null
null
null
krysztalki/workDir/proba/tabliczka mnozenia/symetrie_generowane.py
woblob/Crystal_Symmetry
be2984b4487d6075986ef60822a347d0b0e6b885
[ "MIT" ]
null
null
null
krysztalki/workDir/proba/tabliczka mnozenia/symetrie_generowane.py
woblob/Crystal_Symmetry
be2984b4487d6075986ef60822a347d0b0e6b885
[ "MIT" ]
null
null
null
import podstawa as pod # missing matrices _matrix_inv_000_h00_miss = pod.matrices_dict["inv_000"] + pod._translation_h00 #0 _matrix_m_0yz_h00_miss = pod.matrices_dict["m_0yz"] + pod._translation_h00 #1 _matrix_m_x0z_h00_miss = pod.matrices_dict["m_x0z"] + pod._translation_h00 #2 _matrix_m_xy0_h00_miss = pod.matrices_dict["m_xy0"] + pod._translation_h00 #3 _matrix_m_xmxz_h00_miss = pod.matrices_dict["m_xmxz"] + pod._translation_h00 #4 _matrix_m_xymy_h00_miss = pod.matrices_dict["m_xymy"] + pod._translation_h00 #5 _matrix_m_xymx_h00_miss = pod.matrices_dict["m_xymx"] + pod._translation_h00 #6 _matrix_m_xyx_h00_miss = pod.matrices_dict["m_xyx"] + pod._translation_h00 #7 _matrix_m_xxz_h00_miss = pod.matrices_dict["m_xxz"] + pod._translation_h00 #8 _matrix_m_xyy_h00_miss = pod.matrices_dict["m_xyy"] + pod._translation_h00 #9 _matrix_2_x00_h00_miss = pod.matrices_dict["2_x00"] + pod._translation_h00 #10 _matrix_2_0y0_h00_miss = pod.matrices_dict["2_0y0"] + pod._translation_h00 #11 _matrix_2_00z_h00_miss = pod.matrices_dict["2_00z"] + pod._translation_h00 #12 _matrix_2_xx0_h00_miss = pod.matrices_dict["2_xx0"] + pod._translation_h00 #13 _matrix_2_x0x_h00_miss = pod.matrices_dict["2_x0x"] + pod._translation_h00 #14 _matrix_2_0yy_h00_miss = pod.matrices_dict["2_0yy"] + pod._translation_h00 #15 _matrix_2_xmx0_h00_miss = pod.matrices_dict["2_xmx0"] + pod._translation_h00 #16 _matrix_2_mx0x_h00_miss = pod.matrices_dict["2_mx0x"] + pod._translation_h00 #17 _matrix_2_0myy_h00_miss = pod.matrices_dict["2_0myy"] + pod._translation_h00 #18 _matrix_3_xxx_h00_miss = pod.matrices_dict["3_xxx"] + pod._translation_h00 #19 _matrix_3_xmxmx_h00_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_h00 #20 _matrix_3_mxxmx_h00_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_h00 #21 _matrix_3_mxmxx_h00_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_h00 #22 _matrix_m3_xxx_h00_miss = pod.matrices_dict["m3_xxx"] + pod._translation_h00 #23 _matrix_m3_xmxmx_h00_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_h00 #24 _matrix_m3_mxxmx_h00_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_h00 #25 _matrix_m3_mxmxx_h00_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_h00 #26 _matrix_4_x00_h00_miss = pod.matrices_dict["4_x00"] + pod._translation_h00 #27 _matrix_4_0y0_h00_miss = pod.matrices_dict["4_0y0"] + pod._translation_h00 #28 _matrix_4_00z_h00_miss = pod.matrices_dict["4_00z"] + pod._translation_h00 #29 _matrix_m4_x00_h00_miss = pod.matrices_dict["-4_x00"] + pod._translation_h00 #30 _matrix_m4_0y0_h00_miss = pod.matrices_dict["-4_0y0"] + pod._translation_h00 #31 _matrix_m4_00z_h00_miss = pod.matrices_dict["-4_00z"] + pod._translation_h00 #32 _matrix_inv_000_0h0_miss = pod.matrices_dict["inv_000"] + pod._translation_0h0 #33 _matrix_m_0yz_0h0_miss = pod.matrices_dict["m_0yz"] + pod._translation_0h0 #34 _matrix_m_x0z_0h0_miss = pod.matrices_dict["m_x0z"] + pod._translation_0h0 #35 _matrix_m_xy0_0h0_miss = pod.matrices_dict["m_xy0"] + pod._translation_0h0 #36 _matrix_m_xmxz_0h0_miss = pod.matrices_dict["m_xmxz"] + pod._translation_0h0 #37 _matrix_m_xymy_0h0_miss = pod.matrices_dict["m_xymy"] + pod._translation_0h0 #38 _matrix_m_xymx_0h0_miss = pod.matrices_dict["m_xymx"] + pod._translation_0h0 #39 _matrix_m_xyx_0h0_miss = pod.matrices_dict["m_xyx"] + pod._translation_0h0 #40 _matrix_m_xxz_0h0_miss = pod.matrices_dict["m_xxz"] + pod._translation_0h0 #41 _matrix_m_xyy_0h0_miss = pod.matrices_dict["m_xyy"] + pod._translation_0h0 #42 _matrix_2_x00_0h0_miss = pod.matrices_dict["2_x00"] + pod._translation_0h0 #43 _matrix_2_0y0_0h0_miss = pod.matrices_dict["2_0y0"] + pod._translation_0h0 #44 _matrix_2_00z_0h0_miss = pod.matrices_dict["2_00z"] + pod._translation_0h0 #45 _matrix_2_xx0_0h0_miss = pod.matrices_dict["2_xx0"] + pod._translation_0h0 #46 _matrix_2_x0x_0h0_miss = pod.matrices_dict["2_x0x"] + pod._translation_0h0 #47 _matrix_2_0yy_0h0_miss = pod.matrices_dict["2_0yy"] + pod._translation_0h0 #48 _matrix_2_xmx0_0h0_miss = pod.matrices_dict["2_xmx0"] + pod._translation_0h0 #49 _matrix_2_mx0x_0h0_miss = pod.matrices_dict["2_mx0x"] + pod._translation_0h0 #50 _matrix_2_0myy_0h0_miss = pod.matrices_dict["2_0myy"] + pod._translation_0h0 #51 _matrix_3_xxx_0h0_miss = pod.matrices_dict["3_xxx"] + pod._translation_0h0 #52 _matrix_3_xmxmx_0h0_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_0h0 #53 _matrix_3_mxxmx_0h0_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_0h0 #54 _matrix_3_mxmxx_0h0_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_0h0 #55 _matrix_m3_xxx_0h0_miss = pod.matrices_dict["m3_xxx"] + pod._translation_0h0 #56 _matrix_m3_xmxmx_0h0_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_0h0 #57 _matrix_m3_mxxmx_0h0_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_0h0 #58 _matrix_m3_mxmxx_0h0_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_0h0 #59 _matrix_4_x00_0h0_miss = pod.matrices_dict["4_x00"] + pod._translation_0h0 #60 _matrix_4_0y0_0h0_miss = pod.matrices_dict["4_0y0"] + pod._translation_0h0 #61 _matrix_4_00z_0h0_miss = pod.matrices_dict["4_00z"] + pod._translation_0h0 #62 _matrix_m4_x00_0h0_miss = pod.matrices_dict["-4_x00"] + pod._translation_0h0 #63 _matrix_m4_0y0_0h0_miss = pod.matrices_dict["-4_0y0"] + pod._translation_0h0 #64 _matrix_m4_00z_0h0_miss = pod.matrices_dict["-4_00z"] + pod._translation_0h0 #65 _matrix_inv_000_00h_miss = pod.matrices_dict["inv_000"] + pod._translation_00h #66 _matrix_m_0yz_00h_miss = pod.matrices_dict["m_0yz"] + pod._translation_00h #67 _matrix_m_x0z_00h_miss = pod.matrices_dict["m_x0z"] + pod._translation_00h #68 _matrix_m_xy0_00h_miss = pod.matrices_dict["m_xy0"] + pod._translation_00h #69 _matrix_m_xmxz_00h_miss = pod.matrices_dict["m_xmxz"] + pod._translation_00h #70 _matrix_m_xymy_00h_miss = pod.matrices_dict["m_xymy"] + pod._translation_00h #71 _matrix_m_xymx_00h_miss = pod.matrices_dict["m_xymx"] + pod._translation_00h #72 _matrix_m_xyx_00h_miss = pod.matrices_dict["m_xyx"] + pod._translation_00h #73 _matrix_m_xxz_00h_miss = pod.matrices_dict["m_xxz"] + pod._translation_00h #74 _matrix_m_xyy_00h_miss = pod.matrices_dict["m_xyy"] + pod._translation_00h #75 _matrix_2_x00_00h_miss = pod.matrices_dict["2_x00"] + pod._translation_00h #76 _matrix_2_0y0_00h_miss = pod.matrices_dict["2_0y0"] + pod._translation_00h #77 _matrix_2_00z_00h_miss = pod.matrices_dict["2_00z"] + pod._translation_00h #78 _matrix_2_xx0_00h_miss = pod.matrices_dict["2_xx0"] + pod._translation_00h #79 _matrix_2_x0x_00h_miss = pod.matrices_dict["2_x0x"] + pod._translation_00h #80 _matrix_2_0yy_00h_miss = pod.matrices_dict["2_0yy"] + pod._translation_00h #81 _matrix_2_xmx0_00h_miss = pod.matrices_dict["2_xmx0"] + pod._translation_00h #82 _matrix_2_mx0x_00h_miss = pod.matrices_dict["2_mx0x"] + pod._translation_00h #83 _matrix_2_0myy_00h_miss = pod.matrices_dict["2_0myy"] + pod._translation_00h #84 _matrix_3_xxx_00h_miss = pod.matrices_dict["3_xxx"] + pod._translation_00h #85 _matrix_3_xmxmx_00h_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_00h #86 _matrix_3_mxxmx_00h_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_00h #87 _matrix_3_mxmxx_00h_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_00h #88 _matrix_m3_xxx_00h_miss = pod.matrices_dict["m3_xxx"] + pod._translation_00h #89 _matrix_m3_xmxmx_00h_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_00h #90 _matrix_m3_mxxmx_00h_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_00h #91 _matrix_m3_mxmxx_00h_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_00h #92 _matrix_4_x00_00h_miss = pod.matrices_dict["4_x00"] + pod._translation_00h #93 _matrix_4_0y0_00h_miss = pod.matrices_dict["4_0y0"] + pod._translation_00h #94 _matrix_4_00z_00h_miss = pod.matrices_dict["4_00z"] + pod._translation_00h #95 _matrix_m4_x00_00h_miss = pod.matrices_dict["-4_x00"] + pod._translation_00h #96 _matrix_m4_0y0_00h_miss = pod.matrices_dict["-4_0y0"] + pod._translation_00h #97 _matrix_m4_00z_00h_miss = pod.matrices_dict["-4_00z"] + pod._translation_00h #98 _matrix_inv_000_0hh_miss = pod.matrices_dict["inv_000"] + pod._translation_0hh #99 _matrix_m_0yz_0hh_miss = pod.matrices_dict["m_0yz"] + pod._translation_0hh #100 _matrix_m_x0z_0hh_miss = pod.matrices_dict["m_x0z"] + pod._translation_0hh #101 _matrix_m_xy0_0hh_miss = pod.matrices_dict["m_xy0"] + pod._translation_0hh #102 _matrix_m_xmxz_0hh_miss = pod.matrices_dict["m_xmxz"] + pod._translation_0hh #103 _matrix_m_xymy_0hh_miss = pod.matrices_dict["m_xymy"] + pod._translation_0hh #104 _matrix_m_xymx_0hh_miss = pod.matrices_dict["m_xymx"] + pod._translation_0hh #105 _matrix_m_xyx_0hh_miss = pod.matrices_dict["m_xyx"] + pod._translation_0hh #106 _matrix_m_xxz_0hh_miss = pod.matrices_dict["m_xxz"] + pod._translation_0hh #107 _matrix_m_xyy_0hh_miss = pod.matrices_dict["m_xyy"] + pod._translation_0hh #108 _matrix_2_x00_0hh_miss = pod.matrices_dict["2_x00"] + pod._translation_0hh #109 _matrix_2_0y0_0hh_miss = pod.matrices_dict["2_0y0"] + pod._translation_0hh #110 _matrix_2_00z_0hh_miss = pod.matrices_dict["2_00z"] + pod._translation_0hh #111 _matrix_2_xx0_0hh_miss = pod.matrices_dict["2_xx0"] + pod._translation_0hh #112 _matrix_2_x0x_0hh_miss = pod.matrices_dict["2_x0x"] + pod._translation_0hh #113 _matrix_2_0yy_0hh_miss = pod.matrices_dict["2_0yy"] + pod._translation_0hh #114 _matrix_2_xmx0_0hh_miss = pod.matrices_dict["2_xmx0"] + pod._translation_0hh #115 _matrix_2_mx0x_0hh_miss = pod.matrices_dict["2_mx0x"] + pod._translation_0hh #116 _matrix_2_0myy_0hh_miss = pod.matrices_dict["2_0myy"] + pod._translation_0hh #117 _matrix_3_xxx_0hh_miss = pod.matrices_dict["3_xxx"] + pod._translation_0hh #118 _matrix_3_xmxmx_0hh_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_0hh #119 _matrix_3_mxxmx_0hh_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_0hh #120 _matrix_3_mxmxx_0hh_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_0hh #121 _matrix_m3_xxx_0hh_miss = pod.matrices_dict["m3_xxx"] + pod._translation_0hh #122 _matrix_m3_xmxmx_0hh_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_0hh #123 _matrix_m3_mxxmx_0hh_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_0hh #124 _matrix_m3_mxmxx_0hh_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_0hh #125 _matrix_4_x00_0hh_miss = pod.matrices_dict["4_x00"] + pod._translation_0hh #126 _matrix_4_0y0_0hh_miss = pod.matrices_dict["4_0y0"] + pod._translation_0hh #127 _matrix_4_00z_0hh_miss = pod.matrices_dict["4_00z"] + pod._translation_0hh #128 _matrix_m4_x00_0hh_miss = pod.matrices_dict["-4_x00"] + pod._translation_0hh #129 _matrix_m4_0y0_0hh_miss = pod.matrices_dict["-4_0y0"] + pod._translation_0hh #130 _matrix_m4_00z_0hh_miss = pod.matrices_dict["-4_00z"] + pod._translation_0hh #131 _matrix_inv_000_h0h_miss = pod.matrices_dict["inv_000"] + pod._translation_h0h #132 _matrix_m_0yz_h0h_miss = pod.matrices_dict["m_0yz"] + pod._translation_h0h #133 _matrix_m_x0z_h0h_miss = pod.matrices_dict["m_x0z"] + pod._translation_h0h #134 _matrix_m_xy0_h0h_miss = pod.matrices_dict["m_xy0"] + pod._translation_h0h #135 _matrix_m_xmxz_h0h_miss = pod.matrices_dict["m_xmxz"] + pod._translation_h0h #136 _matrix_m_xymy_h0h_miss = pod.matrices_dict["m_xymy"] + pod._translation_h0h #137 _matrix_m_xymx_h0h_miss = pod.matrices_dict["m_xymx"] + pod._translation_h0h #138 _matrix_m_xyx_h0h_miss = pod.matrices_dict["m_xyx"] + pod._translation_h0h #139 _matrix_m_xxz_h0h_miss = pod.matrices_dict["m_xxz"] + pod._translation_h0h #140 _matrix_m_xyy_h0h_miss = pod.matrices_dict["m_xyy"] + pod._translation_h0h #141 _matrix_2_x00_h0h_miss = pod.matrices_dict["2_x00"] + pod._translation_h0h #142 _matrix_2_0y0_h0h_miss = pod.matrices_dict["2_0y0"] + pod._translation_h0h #143 _matrix_2_00z_h0h_miss = pod.matrices_dict["2_00z"] + pod._translation_h0h #144 _matrix_2_xx0_h0h_miss = pod.matrices_dict["2_xx0"] + pod._translation_h0h #145 _matrix_2_x0x_h0h_miss = pod.matrices_dict["2_x0x"] + pod._translation_h0h #146 _matrix_2_0yy_h0h_miss = pod.matrices_dict["2_0yy"] + pod._translation_h0h #147 _matrix_2_xmx0_h0h_miss = pod.matrices_dict["2_xmx0"] + pod._translation_h0h #148 _matrix_2_mx0x_h0h_miss = pod.matrices_dict["2_mx0x"] + pod._translation_h0h #149 _matrix_2_0myy_h0h_miss = pod.matrices_dict["2_0myy"] + pod._translation_h0h #150 _matrix_3_xxx_h0h_miss = pod.matrices_dict["3_xxx"] + pod._translation_h0h #151 _matrix_3_xmxmx_h0h_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_h0h #152 _matrix_3_mxxmx_h0h_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_h0h #153 _matrix_3_mxmxx_h0h_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_h0h #154 _matrix_m3_xxx_h0h_miss = pod.matrices_dict["m3_xxx"] + pod._translation_h0h #155 _matrix_m3_xmxmx_h0h_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_h0h #156 _matrix_m3_mxxmx_h0h_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_h0h #157 _matrix_m3_mxmxx_h0h_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_h0h #158 _matrix_4_x00_h0h_miss = pod.matrices_dict["4_x00"] + pod._translation_h0h #159 _matrix_4_0y0_h0h_miss = pod.matrices_dict["4_0y0"] + pod._translation_h0h #160 _matrix_4_00z_h0h_miss = pod.matrices_dict["4_00z"] + pod._translation_h0h #161 _matrix_m4_x00_h0h_miss = pod.matrices_dict["-4_x00"] + pod._translation_h0h #162 _matrix_m4_0y0_h0h_miss = pod.matrices_dict["-4_0y0"] + pod._translation_h0h #163 _matrix_m4_00z_h0h_miss = pod.matrices_dict["-4_00z"] + pod._translation_h0h #164 _matrix_inv_000_hh0_miss = pod.matrices_dict["inv_000"] + pod._translation_hh0 #165 _matrix_m_0yz_hh0_miss = pod.matrices_dict["m_0yz"] + pod._translation_hh0 #166 _matrix_m_x0z_hh0_miss = pod.matrices_dict["m_x0z"] + pod._translation_hh0 #167 _matrix_m_xy0_hh0_miss = pod.matrices_dict["m_xy0"] + pod._translation_hh0 #168 _matrix_m_xmxz_hh0_miss = pod.matrices_dict["m_xmxz"] + pod._translation_hh0 #169 _matrix_m_xymy_hh0_miss = pod.matrices_dict["m_xymy"] + pod._translation_hh0 #170 _matrix_m_xymx_hh0_miss = pod.matrices_dict["m_xymx"] + pod._translation_hh0 #171 _matrix_m_xyx_hh0_miss = pod.matrices_dict["m_xyx"] + pod._translation_hh0 #172 _matrix_m_xxz_hh0_miss = pod.matrices_dict["m_xxz"] + pod._translation_hh0 #173 _matrix_m_xyy_hh0_miss = pod.matrices_dict["m_xyy"] + pod._translation_hh0 #174 _matrix_2_x00_hh0_miss = pod.matrices_dict["2_x00"] + pod._translation_hh0 #175 _matrix_2_0y0_hh0_miss = pod.matrices_dict["2_0y0"] + pod._translation_hh0 #176 _matrix_2_00z_hh0_miss = pod.matrices_dict["2_00z"] + pod._translation_hh0 #177 _matrix_2_xx0_hh0_miss = pod.matrices_dict["2_xx0"] + pod._translation_hh0 #178 _matrix_2_x0x_hh0_miss = pod.matrices_dict["2_x0x"] + pod._translation_hh0 #179 _matrix_2_0yy_hh0_miss = pod.matrices_dict["2_0yy"] + pod._translation_hh0 #180 _matrix_2_xmx0_hh0_miss = pod.matrices_dict["2_xmx0"] + pod._translation_hh0 #181 _matrix_2_mx0x_hh0_miss = pod.matrices_dict["2_mx0x"] + pod._translation_hh0 #182 _matrix_2_0myy_hh0_miss = pod.matrices_dict["2_0myy"] + pod._translation_hh0 #183 _matrix_3_xxx_hh0_miss = pod.matrices_dict["3_xxx"] + pod._translation_hh0 #184 _matrix_3_xmxmx_hh0_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_hh0 #185 _matrix_3_mxxmx_hh0_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_hh0 #186 _matrix_3_mxmxx_hh0_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_hh0 #187 _matrix_m3_xxx_hh0_miss = pod.matrices_dict["m3_xxx"] + pod._translation_hh0 #188 _matrix_m3_xmxmx_hh0_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_hh0 #189 _matrix_m3_mxxmx_hh0_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_hh0 #190 _matrix_m3_mxmxx_hh0_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_hh0 #191 _matrix_4_x00_hh0_miss = pod.matrices_dict["4_x00"] + pod._translation_hh0 #192 _matrix_4_0y0_hh0_miss = pod.matrices_dict["4_0y0"] + pod._translation_hh0 #193 _matrix_4_00z_hh0_miss = pod.matrices_dict["4_00z"] + pod._translation_hh0 #194 _matrix_m4_x00_hh0_miss = pod.matrices_dict["-4_x00"] + pod._translation_hh0 #195 _matrix_m4_0y0_hh0_miss = pod.matrices_dict["-4_0y0"] + pod._translation_hh0 #196 _matrix_m4_00z_hh0_miss = pod.matrices_dict["-4_00z"] + pod._translation_hh0 #197 _matrix_inv_000_hhh_miss = pod.matrices_dict["inv_000"] + pod._translation_hhh #198 _matrix_m_0yz_hhh_miss = pod.matrices_dict["m_0yz"] + pod._translation_hhh #199 _matrix_m_x0z_hhh_miss = pod.matrices_dict["m_x0z"] + pod._translation_hhh #200 _matrix_m_xy0_hhh_miss = pod.matrices_dict["m_xy0"] + pod._translation_hhh #201 _matrix_m_xmxz_hhh_miss = pod.matrices_dict["m_xmxz"] + pod._translation_hhh #202 _matrix_m_xymy_hhh_miss = pod.matrices_dict["m_xymy"] + pod._translation_hhh #203 _matrix_m_xymx_hhh_miss = pod.matrices_dict["m_xymx"] + pod._translation_hhh #204 _matrix_m_xyx_hhh_miss = pod.matrices_dict["m_xyx"] + pod._translation_hhh #205 _matrix_m_xxz_hhh_miss = pod.matrices_dict["m_xxz"] + pod._translation_hhh #206 _matrix_m_xyy_hhh_miss = pod.matrices_dict["m_xyy"] + pod._translation_hhh #207 _matrix_2_x00_hhh_miss = pod.matrices_dict["2_x00"] + pod._translation_hhh #208 _matrix_2_0y0_hhh_miss = pod.matrices_dict["2_0y0"] + pod._translation_hhh #209 _matrix_2_00z_hhh_miss = pod.matrices_dict["2_00z"] + pod._translation_hhh #210 _matrix_2_xx0_hhh_miss = pod.matrices_dict["2_xx0"] + pod._translation_hhh #211 _matrix_2_x0x_hhh_miss = pod.matrices_dict["2_x0x"] + pod._translation_hhh #212 _matrix_2_0yy_hhh_miss = pod.matrices_dict["2_0yy"] + pod._translation_hhh #213 _matrix_2_xmx0_hhh_miss = pod.matrices_dict["2_xmx0"] + pod._translation_hhh #214 _matrix_2_mx0x_hhh_miss = pod.matrices_dict["2_mx0x"] + pod._translation_hhh #215 _matrix_2_0myy_hhh_miss = pod.matrices_dict["2_0myy"] + pod._translation_hhh #216 _matrix_3_xxx_hhh_miss = pod.matrices_dict["3_xxx"] + pod._translation_hhh #217 _matrix_3_xmxmx_hhh_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_hhh #218 _matrix_3_mxxmx_hhh_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_hhh #219 _matrix_3_mxmxx_hhh_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_hhh #220 _matrix_m3_xxx_hhh_miss = pod.matrices_dict["m3_xxx"] + pod._translation_hhh #221 _matrix_m3_xmxmx_hhh_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_hhh #222 _matrix_m3_mxxmx_hhh_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_hhh #223 _matrix_m3_mxmxx_hhh_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_hhh #224 _matrix_4_x00_hhh_miss = pod.matrices_dict["4_x00"] + pod._translation_hhh #225 _matrix_4_0y0_hhh_miss = pod.matrices_dict["4_0y0"] + pod._translation_hhh #226 _matrix_4_00z_hhh_miss = pod.matrices_dict["4_00z"] + pod._translation_hhh #227 _matrix_m4_x00_hhh_miss = pod.matrices_dict["-4_x00"] + pod._translation_hhh #228 _matrix_m4_0y0_hhh_miss = pod.matrices_dict["-4_0y0"] + pod._translation_hhh #229 _matrix_m4_00z_hhh_miss = pod.matrices_dict["-4_00z"] + pod._translation_hhh #230 _matrix_inv_000_qqq_miss = pod.matrices_dict["inv_000"] + pod._translation_qqq #231 _matrix_m_0yz_qqq_miss = pod.matrices_dict["m_0yz"] + pod._translation_qqq #232 _matrix_m_x0z_qqq_miss = pod.matrices_dict["m_x0z"] + pod._translation_qqq #233 _matrix_m_xy0_qqq_miss = pod.matrices_dict["m_xy0"] + pod._translation_qqq #234 _matrix_m_xmxz_qqq_miss = pod.matrices_dict["m_xmxz"] + pod._translation_qqq #235 _matrix_m_xymy_qqq_miss = pod.matrices_dict["m_xymy"] + pod._translation_qqq #236 _matrix_m_xymx_qqq_miss = pod.matrices_dict["m_xymx"] + pod._translation_qqq #237 _matrix_m_xyx_qqq_miss = pod.matrices_dict["m_xyx"] + pod._translation_qqq #238 _matrix_m_xxz_qqq_miss = pod.matrices_dict["m_xxz"] + pod._translation_qqq #239 _matrix_m_xyy_qqq_miss = pod.matrices_dict["m_xyy"] + pod._translation_qqq #240 _matrix_2_x00_qqq_miss = pod.matrices_dict["2_x00"] + pod._translation_qqq #241 _matrix_2_0y0_qqq_miss = pod.matrices_dict["2_0y0"] + pod._translation_qqq #242 _matrix_2_00z_qqq_miss = pod.matrices_dict["2_00z"] + pod._translation_qqq #243 _matrix_2_xx0_qqq_miss = pod.matrices_dict["2_xx0"] + pod._translation_qqq #244 _matrix_2_x0x_qqq_miss = pod.matrices_dict["2_x0x"] + pod._translation_qqq #245 _matrix_2_0yy_qqq_miss = pod.matrices_dict["2_0yy"] + pod._translation_qqq #246 _matrix_2_xmx0_qqq_miss = pod.matrices_dict["2_xmx0"] + pod._translation_qqq #247 _matrix_2_mx0x_qqq_miss = pod.matrices_dict["2_mx0x"] + pod._translation_qqq #248 _matrix_2_0myy_qqq_miss = pod.matrices_dict["2_0myy"] + pod._translation_qqq #249 _matrix_3_xxx_qqq_miss = pod.matrices_dict["3_xxx"] + pod._translation_qqq #250 _matrix_3_xmxmx_qqq_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_qqq #251 _matrix_3_mxxmx_qqq_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_qqq #252 _matrix_3_mxmxx_qqq_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_qqq #253 _matrix_m3_xxx_qqq_miss = pod.matrices_dict["m3_xxx"] + pod._translation_qqq #254 _matrix_m3_xmxmx_qqq_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_qqq #255 _matrix_m3_mxxmx_qqq_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_qqq #256 _matrix_m3_mxmxx_qqq_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_qqq #257 _matrix_4_x00_qqq_miss = pod.matrices_dict["4_x00"] + pod._translation_qqq #258 _matrix_4_0y0_qqq_miss = pod.matrices_dict["4_0y0"] + pod._translation_qqq #259 _matrix_4_00z_qqq_miss = pod.matrices_dict["4_00z"] + pod._translation_qqq #260 _matrix_m4_x00_qqq_miss = pod.matrices_dict["-4_x00"] + pod._translation_qqq #261 _matrix_m4_0y0_qqq_miss = pod.matrices_dict["-4_0y0"] + pod._translation_qqq #262 _matrix_m4_00z_qqq_miss = pod.matrices_dict["-4_00z"] + pod._translation_qqq #263 _matrix_inv_000_3qqq_miss = pod.matrices_dict["inv_000"] + pod._translation_h00 + pod._translation_qqq #264 _matrix_m_0yz_3qqq_miss = pod.matrices_dict["m_0yz"] + pod._translation_h00 + pod._translation_qqq #265 _matrix_m_x0z_3qqq_miss = pod.matrices_dict["m_x0z"] + pod._translation_h00 + pod._translation_qqq #266 _matrix_m_xy0_3qqq_miss = pod.matrices_dict["m_xy0"] + pod._translation_h00 + pod._translation_qqq #267 _matrix_m_xmxz_3qqq_miss = pod.matrices_dict["m_xmxz"] + pod._translation_h00 + pod._translation_qqq #268 _matrix_m_xymy_3qqq_miss = pod.matrices_dict["m_xymy"] + pod._translation_h00 + pod._translation_qqq #269 _matrix_m_xymx_3qqq_miss = pod.matrices_dict["m_xymx"] + pod._translation_h00 + pod._translation_qqq #270 _matrix_m_xyx_3qqq_miss = pod.matrices_dict["m_xyx"] + pod._translation_h00 + pod._translation_qqq #271 _matrix_m_xxz_3qqq_miss = pod.matrices_dict["m_xxz"] + pod._translation_h00 + pod._translation_qqq #272 _matrix_m_xyy_3qqq_miss = pod.matrices_dict["m_xyy"] + pod._translation_h00 + pod._translation_qqq #273 _matrix_2_x00_3qqq_miss = pod.matrices_dict["2_x00"] + pod._translation_h00 + pod._translation_qqq #274 _matrix_2_0y0_3qqq_miss = pod.matrices_dict["2_0y0"] + pod._translation_h00 + pod._translation_qqq #275 _matrix_2_00z_3qqq_miss = pod.matrices_dict["2_00z"] + pod._translation_h00 + pod._translation_qqq #276 _matrix_2_xx0_3qqq_miss = pod.matrices_dict["2_xx0"] + pod._translation_h00 + pod._translation_qqq #277 _matrix_2_x0x_3qqq_miss = pod.matrices_dict["2_x0x"] + pod._translation_h00 + pod._translation_qqq #278 _matrix_2_0yy_3qqq_miss = pod.matrices_dict["2_0yy"] + pod._translation_h00 + pod._translation_qqq #279 _matrix_2_xmx0_3qqq_miss = pod.matrices_dict["2_xmx0"] + pod._translation_h00 + pod._translation_qqq #280 _matrix_2_mx0x_3qqq_miss = pod.matrices_dict["2_mx0x"] + pod._translation_h00 + pod._translation_qqq #281 _matrix_2_0myy_3qqq_miss = pod.matrices_dict["2_0myy"] + pod._translation_h00 + pod._translation_qqq #282 _matrix_3_xxx_3qqq_miss = pod.matrices_dict["3_xxx"] + pod._translation_h00 + pod._translation_qqq #283 _matrix_3_xmxmx_3qqq_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_h00 + pod._translation_qqq #284 _matrix_3_mxxmx_3qqq_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_h00 + pod._translation_qqq #285 _matrix_3_mxmxx_3qqq_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_h00 + pod._translation_qqq #286 _matrix_m3_xxx_3qqq_miss = pod.matrices_dict["m3_xxx"] + pod._translation_h00 + pod._translation_qqq #287 _matrix_m3_xmxmx_3qqq_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_h00 + pod._translation_qqq #288 _matrix_m3_mxxmx_3qqq_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_h00 + pod._translation_qqq #289 _matrix_m3_mxmxx_3qqq_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_h00 + pod._translation_qqq #290 _matrix_4_x00_3qqq_miss = pod.matrices_dict["4_x00"] + pod._translation_h00 + pod._translation_qqq #291 _matrix_4_0y0_3qqq_miss = pod.matrices_dict["4_0y0"] + pod._translation_h00 + pod._translation_qqq #292 _matrix_4_00z_3qqq_miss = pod.matrices_dict["4_00z"] + pod._translation_h00 + pod._translation_qqq #293 _matrix_m4_x00_3qqq_miss = pod.matrices_dict["-4_x00"] + pod._translation_h00 + pod._translation_qqq #294 _matrix_m4_0y0_3qqq_miss = pod.matrices_dict["-4_0y0"] + pod._translation_h00 + pod._translation_qqq #295 _matrix_m4_00z_3qqq_miss = pod.matrices_dict["-4_00z"] + pod._translation_h00 + pod._translation_qqq #296 _matrix_inv_000_q3qq_miss = pod.matrices_dict["inv_000"] + pod._translation_0h0 + pod._translation_qqq #297 _matrix_m_0yz_q3qq_miss = pod.matrices_dict["m_0yz"] + pod._translation_0h0 + pod._translation_qqq #298 _matrix_m_x0z_q3qq_miss = pod.matrices_dict["m_x0z"] + pod._translation_0h0 + pod._translation_qqq #299 _matrix_m_xy0_q3qq_miss = pod.matrices_dict["m_xy0"] + pod._translation_0h0 + pod._translation_qqq #300 _matrix_m_xmxz_q3qq_miss = pod.matrices_dict["m_xmxz"] + pod._translation_0h0 + pod._translation_qqq #301 _matrix_m_xymy_q3qq_miss = pod.matrices_dict["m_xymy"] + pod._translation_0h0 + pod._translation_qqq #302 _matrix_m_xymx_q3qq_miss = pod.matrices_dict["m_xymx"] + pod._translation_0h0 + pod._translation_qqq #303 _matrix_m_xyx_q3qq_miss = pod.matrices_dict["m_xyx"] + pod._translation_0h0 + pod._translation_qqq #304 _matrix_m_xxz_q3qq_miss = pod.matrices_dict["m_xxz"] + pod._translation_0h0 + pod._translation_qqq #305 _matrix_m_xyy_q3qq_miss = pod.matrices_dict["m_xyy"] + pod._translation_0h0 + pod._translation_qqq #306 _matrix_2_x00_q3qq_miss = pod.matrices_dict["2_x00"] + pod._translation_0h0 + pod._translation_qqq #307 _matrix_2_0y0_q3qq_miss = pod.matrices_dict["2_0y0"] + pod._translation_0h0 + pod._translation_qqq #308 _matrix_2_00z_q3qq_miss = pod.matrices_dict["2_00z"] + pod._translation_0h0 + pod._translation_qqq #309 _matrix_2_xx0_q3qq_miss = pod.matrices_dict["2_xx0"] + pod._translation_0h0 + pod._translation_qqq #310 _matrix_2_x0x_q3qq_miss = pod.matrices_dict["2_x0x"] + pod._translation_0h0 + pod._translation_qqq #311 _matrix_2_0yy_q3qq_miss = pod.matrices_dict["2_0yy"] + pod._translation_0h0 + pod._translation_qqq #312 _matrix_2_xmx0_q3qq_miss = pod.matrices_dict["2_xmx0"] + pod._translation_0h0 + pod._translation_qqq #313 _matrix_2_mx0x_q3qq_miss = pod.matrices_dict["2_mx0x"] + pod._translation_0h0 + pod._translation_qqq #314 _matrix_2_0myy_q3qq_miss = pod.matrices_dict["2_0myy"] + pod._translation_0h0 + pod._translation_qqq #315 _matrix_3_xxx_q3qq_miss = pod.matrices_dict["3_xxx"] + pod._translation_0h0 + pod._translation_qqq #316 _matrix_3_xmxmx_q3qq_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_0h0 + pod._translation_qqq #317 _matrix_3_mxxmx_q3qq_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_0h0 + pod._translation_qqq #318 _matrix_3_mxmxx_q3qq_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_0h0 + pod._translation_qqq #319 _matrix_m3_xxx_q3qq_miss = pod.matrices_dict["m3_xxx"] + pod._translation_0h0 + pod._translation_qqq #320 _matrix_m3_xmxmx_q3qq_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_0h0 + pod._translation_qqq #321 _matrix_m3_mxxmx_q3qq_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_0h0 + pod._translation_qqq #322 _matrix_m3_mxmxx_q3qq_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_0h0 + pod._translation_qqq #323 _matrix_4_x00_q3qq_miss = pod.matrices_dict["4_x00"] + pod._translation_0h0 + pod._translation_qqq #324 _matrix_4_0y0_q3qq_miss = pod.matrices_dict["4_0y0"] + pod._translation_0h0 + pod._translation_qqq #325 _matrix_4_00z_q3qq_miss = pod.matrices_dict["4_00z"] + pod._translation_0h0 + pod._translation_qqq #326 _matrix_m4_x00_q3qq_miss = pod.matrices_dict["-4_x00"] + pod._translation_0h0 + pod._translation_qqq #327 _matrix_m4_0y0_q3qq_miss = pod.matrices_dict["-4_0y0"] + pod._translation_0h0 + pod._translation_qqq #328 _matrix_m4_00z_q3qq_miss = pod.matrices_dict["-4_00z"] + pod._translation_0h0 + pod._translation_qqq #329 _matrix_inv_000_qq3q_miss = pod.matrices_dict["inv_000"] + pod._translation_00h + pod._translation_qqq #330 _matrix_m_0yz_qq3q_miss = pod.matrices_dict["m_0yz"] + pod._translation_00h + pod._translation_qqq #331 _matrix_m_x0z_qq3q_miss = pod.matrices_dict["m_x0z"] + pod._translation_00h + pod._translation_qqq #332 _matrix_m_xy0_qq3q_miss = pod.matrices_dict["m_xy0"] + pod._translation_00h + pod._translation_qqq #333 _matrix_m_xmxz_qq3q_miss = pod.matrices_dict["m_xmxz"] + pod._translation_00h + pod._translation_qqq #334 _matrix_m_xymy_qq3q_miss = pod.matrices_dict["m_xymy"] + pod._translation_00h + pod._translation_qqq #335 _matrix_m_xymx_qq3q_miss = pod.matrices_dict["m_xymx"] + pod._translation_00h + pod._translation_qqq #336 _matrix_m_xyx_qq3q_miss = pod.matrices_dict["m_xyx"] + pod._translation_00h + pod._translation_qqq #337 _matrix_m_xxz_qq3q_miss = pod.matrices_dict["m_xxz"] + pod._translation_00h + pod._translation_qqq #338 _matrix_m_xyy_qq3q_miss = pod.matrices_dict["m_xyy"] + pod._translation_00h + pod._translation_qqq #339 _matrix_2_x00_qq3q_miss = pod.matrices_dict["2_x00"] + pod._translation_00h + pod._translation_qqq #340 _matrix_2_0y0_qq3q_miss = pod.matrices_dict["2_0y0"] + pod._translation_00h + pod._translation_qqq #341 _matrix_2_00z_qq3q_miss = pod.matrices_dict["2_00z"] + pod._translation_00h + pod._translation_qqq #342 _matrix_2_xx0_qq3q_miss = pod.matrices_dict["2_xx0"] + pod._translation_00h + pod._translation_qqq #343 _matrix_2_x0x_qq3q_miss = pod.matrices_dict["2_x0x"] + pod._translation_00h + pod._translation_qqq #344 _matrix_2_0yy_qq3q_miss = pod.matrices_dict["2_0yy"] + pod._translation_00h + pod._translation_qqq #345 _matrix_2_xmx0_qq3q_miss = pod.matrices_dict["2_xmx0"] + pod._translation_00h + pod._translation_qqq #346 _matrix_2_mx0x_qq3q_miss = pod.matrices_dict["2_mx0x"] + pod._translation_00h + pod._translation_qqq #347 _matrix_2_0myy_qq3q_miss = pod.matrices_dict["2_0myy"] + pod._translation_00h + pod._translation_qqq #348 _matrix_3_xxx_qq3q_miss = pod.matrices_dict["3_xxx"] + pod._translation_00h + pod._translation_qqq #349 _matrix_3_xmxmx_qq3q_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_00h + pod._translation_qqq #350 _matrix_3_mxxmx_qq3q_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_00h + pod._translation_qqq #351 _matrix_3_mxmxx_qq3q_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_00h + pod._translation_qqq #352 _matrix_m3_xxx_qq3q_miss = pod.matrices_dict["m3_xxx"] + pod._translation_00h + pod._translation_qqq #353 _matrix_m3_xmxmx_qq3q_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_00h + pod._translation_qqq #354 _matrix_m3_mxxmx_qq3q_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_00h + pod._translation_qqq #355 _matrix_m3_mxmxx_qq3q_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_00h + pod._translation_qqq #356 _matrix_4_x00_qq3q_miss = pod.matrices_dict["4_x00"] + pod._translation_00h + pod._translation_qqq #357 _matrix_4_0y0_qq3q_miss = pod.matrices_dict["4_0y0"] + pod._translation_00h + pod._translation_qqq #358 _matrix_4_00z_qq3q_miss = pod.matrices_dict["4_00z"] + pod._translation_00h + pod._translation_qqq #359 _matrix_m4_x00_qq3q_miss = pod.matrices_dict["-4_x00"] + pod._translation_00h + pod._translation_qqq #360 _matrix_m4_0y0_qq3q_miss = pod.matrices_dict["-4_0y0"] + pod._translation_00h + pod._translation_qqq #361 _matrix_m4_00z_qq3q_miss = pod.matrices_dict["-4_00z"] + pod._translation_00h + pod._translation_qqq #362 _matrix_inv_000_q3q3q_miss = pod.matrices_dict["inv_000"] + pod._translation_0hh + pod._translation_qqq #363 _matrix_m_0yz_q3q3q_miss = pod.matrices_dict["m_0yz"] + pod._translation_0hh + pod._translation_qqq #364 _matrix_m_x0z_q3q3q_miss = pod.matrices_dict["m_x0z"] + pod._translation_0hh + pod._translation_qqq #365 _matrix_m_xy0_q3q3q_miss = pod.matrices_dict["m_xy0"] + pod._translation_0hh + pod._translation_qqq #366 _matrix_m_xmxz_q3q3q_miss = pod.matrices_dict["m_xmxz"] + pod._translation_0hh + pod._translation_qqq #367 _matrix_m_xymy_q3q3q_miss = pod.matrices_dict["m_xymy"] + pod._translation_0hh + pod._translation_qqq #368 _matrix_m_xymx_q3q3q_miss = pod.matrices_dict["m_xymx"] + pod._translation_0hh + pod._translation_qqq #369 _matrix_m_xyx_q3q3q_miss = pod.matrices_dict["m_xyx"] + pod._translation_0hh + pod._translation_qqq #370 _matrix_m_xxz_q3q3q_miss = pod.matrices_dict["m_xxz"] + pod._translation_0hh + pod._translation_qqq #371 _matrix_m_xyy_q3q3q_miss = pod.matrices_dict["m_xyy"] + pod._translation_0hh + pod._translation_qqq #372 _matrix_2_x00_q3q3q_miss = pod.matrices_dict["2_x00"] + pod._translation_0hh + pod._translation_qqq #373 _matrix_2_0y0_q3q3q_miss = pod.matrices_dict["2_0y0"] + pod._translation_0hh + pod._translation_qqq #374 _matrix_2_00z_q3q3q_miss = pod.matrices_dict["2_00z"] + pod._translation_0hh + pod._translation_qqq #375 _matrix_2_xx0_q3q3q_miss = pod.matrices_dict["2_xx0"] + pod._translation_0hh + pod._translation_qqq #376 _matrix_2_x0x_q3q3q_miss = pod.matrices_dict["2_x0x"] + pod._translation_0hh + pod._translation_qqq #377 _matrix_2_0yy_q3q3q_miss = pod.matrices_dict["2_0yy"] + pod._translation_0hh + pod._translation_qqq #378 _matrix_2_xmx0_q3q3q_miss = pod.matrices_dict["2_xmx0"] + pod._translation_0hh + pod._translation_qqq #379 _matrix_2_mx0x_q3q3q_miss = pod.matrices_dict["2_mx0x"] + pod._translation_0hh + pod._translation_qqq #380 _matrix_2_0myy_q3q3q_miss = pod.matrices_dict["2_0myy"] + pod._translation_0hh + pod._translation_qqq #381 _matrix_3_xxx_q3q3q_miss = pod.matrices_dict["3_xxx"] + pod._translation_0hh + pod._translation_qqq #382 _matrix_3_xmxmx_q3q3q_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_0hh + pod._translation_qqq #383 _matrix_3_mxxmx_q3q3q_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_0hh + pod._translation_qqq #384 _matrix_3_mxmxx_q3q3q_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_0hh + pod._translation_qqq #385 _matrix_m3_xxx_q3q3q_miss = pod.matrices_dict["m3_xxx"] + pod._translation_0hh + pod._translation_qqq #386 _matrix_m3_xmxmx_q3q3q_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_0hh + pod._translation_qqq #387 _matrix_m3_mxxmx_q3q3q_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_0hh + pod._translation_qqq #388 _matrix_m3_mxmxx_q3q3q_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_0hh + pod._translation_qqq #389 _matrix_4_x00_q3q3q_miss = pod.matrices_dict["4_x00"] + pod._translation_0hh + pod._translation_qqq #390 _matrix_4_0y0_q3q3q_miss = pod.matrices_dict["4_0y0"] + pod._translation_0hh + pod._translation_qqq #391 _matrix_4_00z_q3q3q_miss = pod.matrices_dict["4_00z"] + pod._translation_0hh + pod._translation_qqq #392 _matrix_m4_x00_q3q3q_miss = pod.matrices_dict["-4_x00"] + pod._translation_0hh + pod._translation_qqq #393 _matrix_m4_0y0_q3q3q_miss = pod.matrices_dict["-4_0y0"] + pod._translation_0hh + pod._translation_qqq #394 _matrix_m4_00z_q3q3q_miss = pod.matrices_dict["-4_00z"] + pod._translation_0hh + pod._translation_qqq #395 _matrix_inv_000_3qq3q_miss = pod.matrices_dict["inv_000"] + pod._translation_h0h + pod._translation_qqq #396 _matrix_m_0yz_3qq3q_miss = pod.matrices_dict["m_0yz"] + pod._translation_h0h + pod._translation_qqq #397 _matrix_m_x0z_3qq3q_miss = pod.matrices_dict["m_x0z"] + pod._translation_h0h + pod._translation_qqq #398 _matrix_m_xy0_3qq3q_miss = pod.matrices_dict["m_xy0"] + pod._translation_h0h + pod._translation_qqq #399 _matrix_m_xmxz_3qq3q_miss = pod.matrices_dict["m_xmxz"] + pod._translation_h0h + pod._translation_qqq #400 _matrix_m_xymy_3qq3q_miss = pod.matrices_dict["m_xymy"] + pod._translation_h0h + pod._translation_qqq #401 _matrix_m_xymx_3qq3q_miss = pod.matrices_dict["m_xymx"] + pod._translation_h0h + pod._translation_qqq #402 _matrix_m_xyx_3qq3q_miss = pod.matrices_dict["m_xyx"] + pod._translation_h0h + pod._translation_qqq #403 _matrix_m_xxz_3qq3q_miss = pod.matrices_dict["m_xxz"] + pod._translation_h0h + pod._translation_qqq #404 _matrix_m_xyy_3qq3q_miss = pod.matrices_dict["m_xyy"] + pod._translation_h0h + pod._translation_qqq #405 _matrix_2_x00_3qq3q_miss = pod.matrices_dict["2_x00"] + pod._translation_h0h + pod._translation_qqq #406 _matrix_2_0y0_3qq3q_miss = pod.matrices_dict["2_0y0"] + pod._translation_h0h + pod._translation_qqq #407 _matrix_2_00z_3qq3q_miss = pod.matrices_dict["2_00z"] + pod._translation_h0h + pod._translation_qqq #408 _matrix_2_xx0_3qq3q_miss = pod.matrices_dict["2_xx0"] + pod._translation_h0h + pod._translation_qqq #409 _matrix_2_x0x_3qq3q_miss = pod.matrices_dict["2_x0x"] + pod._translation_h0h + pod._translation_qqq #410 _matrix_2_0yy_3qq3q_miss = pod.matrices_dict["2_0yy"] + pod._translation_h0h + pod._translation_qqq #411 _matrix_2_xmx0_3qq3q_miss = pod.matrices_dict["2_xmx0"] + pod._translation_h0h + pod._translation_qqq #412 _matrix_2_mx0x_3qq3q_miss = pod.matrices_dict["2_mx0x"] + pod._translation_h0h + pod._translation_qqq #413 _matrix_2_0myy_3qq3q_miss = pod.matrices_dict["2_0myy"] + pod._translation_h0h + pod._translation_qqq #414 _matrix_3_xxx_3qq3q_miss = pod.matrices_dict["3_xxx"] + pod._translation_h0h + pod._translation_qqq #415 _matrix_3_xmxmx_3qq3q_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_h0h + pod._translation_qqq #416 _matrix_3_mxxmx_3qq3q_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_h0h + pod._translation_qqq #417 _matrix_3_mxmxx_3qq3q_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_h0h + pod._translation_qqq #418 _matrix_m3_xxx_3qq3q_miss = pod.matrices_dict["m3_xxx"] + pod._translation_h0h + pod._translation_qqq #419 _matrix_m3_xmxmx_3qq3q_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_h0h + pod._translation_qqq #420 _matrix_m3_mxxmx_3qq3q_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_h0h + pod._translation_qqq #421 _matrix_m3_mxmxx_3qq3q_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_h0h + pod._translation_qqq #422 _matrix_4_x00_3qq3q_miss = pod.matrices_dict["4_x00"] + pod._translation_h0h + pod._translation_qqq #423 _matrix_4_0y0_3qq3q_miss = pod.matrices_dict["4_0y0"] + pod._translation_h0h + pod._translation_qqq #424 _matrix_4_00z_3qq3q_miss = pod.matrices_dict["4_00z"] + pod._translation_h0h + pod._translation_qqq #425 _matrix_m4_x00_3qq3q_miss = pod.matrices_dict["-4_x00"] + pod._translation_h0h + pod._translation_qqq #426 _matrix_m4_0y0_3qq3q_miss = pod.matrices_dict["-4_0y0"] + pod._translation_h0h + pod._translation_qqq #427 _matrix_m4_00z_3qq3q_miss = pod.matrices_dict["-4_00z"] + pod._translation_h0h + pod._translation_qqq #428 _matrix_inv_000_3q3qq_miss = pod.matrices_dict["inv_000"] + pod._translation_hh0 + pod._translation_qqq #429 _matrix_m_0yz_3q3qq_miss = pod.matrices_dict["m_0yz"] + pod._translation_hh0 + pod._translation_qqq #430 _matrix_m_x0z_3q3qq_miss = pod.matrices_dict["m_x0z"] + pod._translation_hh0 + pod._translation_qqq #431 _matrix_m_xy0_3q3qq_miss = pod.matrices_dict["m_xy0"] + pod._translation_hh0 + pod._translation_qqq #432 _matrix_m_xmxz_3q3qq_miss = pod.matrices_dict["m_xmxz"] + pod._translation_hh0 + pod._translation_qqq #433 _matrix_m_xymy_3q3qq_miss = pod.matrices_dict["m_xymy"] + pod._translation_hh0 + pod._translation_qqq #434 _matrix_m_xymx_3q3qq_miss = pod.matrices_dict["m_xymx"] + pod._translation_hh0 + pod._translation_qqq #435 _matrix_m_xyx_3q3qq_miss = pod.matrices_dict["m_xyx"] + pod._translation_hh0 + pod._translation_qqq #436 _matrix_m_xxz_3q3qq_miss = pod.matrices_dict["m_xxz"] + pod._translation_hh0 + pod._translation_qqq #437 _matrix_m_xyy_3q3qq_miss = pod.matrices_dict["m_xyy"] + pod._translation_hh0 + pod._translation_qqq #438 _matrix_2_x00_3q3qq_miss = pod.matrices_dict["2_x00"] + pod._translation_hh0 + pod._translation_qqq #439 _matrix_2_0y0_3q3qq_miss = pod.matrices_dict["2_0y0"] + pod._translation_hh0 + pod._translation_qqq #440 _matrix_2_00z_3q3qq_miss = pod.matrices_dict["2_00z"] + pod._translation_hh0 + pod._translation_qqq #441 _matrix_2_xx0_3q3qq_miss = pod.matrices_dict["2_xx0"] + pod._translation_hh0 + pod._translation_qqq #442 _matrix_2_x0x_3q3qq_miss = pod.matrices_dict["2_x0x"] + pod._translation_hh0 + pod._translation_qqq #443 _matrix_2_0yy_3q3qq_miss = pod.matrices_dict["2_0yy"] + pod._translation_hh0 + pod._translation_qqq #444 _matrix_2_xmx0_3q3qq_miss = pod.matrices_dict["2_xmx0"] + pod._translation_hh0 + pod._translation_qqq #445 _matrix_2_mx0x_3q3qq_miss = pod.matrices_dict["2_mx0x"] + pod._translation_hh0 + pod._translation_qqq #446 _matrix_2_0myy_3q3qq_miss = pod.matrices_dict["2_0myy"] + pod._translation_hh0 + pod._translation_qqq #447 _matrix_3_xxx_3q3qq_miss = pod.matrices_dict["3_xxx"] + pod._translation_hh0 + pod._translation_qqq #448 _matrix_3_xmxmx_3q3qq_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_hh0 + pod._translation_qqq #449 _matrix_3_mxxmx_3q3qq_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_hh0 + pod._translation_qqq #450 _matrix_3_mxmxx_3q3qq_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_hh0 + pod._translation_qqq #451 _matrix_m3_xxx_3q3qq_miss = pod.matrices_dict["m3_xxx"] + pod._translation_hh0 + pod._translation_qqq #452 _matrix_m3_xmxmx_3q3qq_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_hh0 + pod._translation_qqq #453 _matrix_m3_mxxmx_3q3qq_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_hh0 + pod._translation_qqq #454 _matrix_m3_mxmxx_3q3qq_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_hh0 + pod._translation_qqq #455 _matrix_4_x00_3q3qq_miss = pod.matrices_dict["4_x00"] + pod._translation_hh0 + pod._translation_qqq #456 _matrix_4_0y0_3q3qq_miss = pod.matrices_dict["4_0y0"] + pod._translation_hh0 + pod._translation_qqq #457 _matrix_4_00z_3q3qq_miss = pod.matrices_dict["4_00z"] + pod._translation_hh0 + pod._translation_qqq #458 _matrix_m4_x00_3q3qq_miss = pod.matrices_dict["-4_x00"] + pod._translation_hh0 + pod._translation_qqq #459 _matrix_m4_0y0_3q3qq_miss = pod.matrices_dict["-4_0y0"] + pod._translation_hh0 + pod._translation_qqq #460 _matrix_m4_00z_3q3qq_miss = pod.matrices_dict["-4_00z"] + pod._translation_hh0 + pod._translation_qqq #461 _matrix_inv_000_3q3q3q_miss = pod.matrices_dict["inv_000"] + pod._translation_hhh + pod._translation_qqq #462 _matrix_m_0yz_3q3q3q_miss = pod.matrices_dict["m_0yz"] + pod._translation_hhh + pod._translation_qqq #463 _matrix_m_x0z_3q3q3q_miss = pod.matrices_dict["m_x0z"] + pod._translation_hhh + pod._translation_qqq #464 _matrix_m_xy0_3q3q3q_miss = pod.matrices_dict["m_xy0"] + pod._translation_hhh + pod._translation_qqq #465 _matrix_m_xmxz_3q3q3q_miss = pod.matrices_dict["m_xmxz"] + pod._translation_hhh + pod._translation_qqq #466 _matrix_m_xymy_3q3q3q_miss = pod.matrices_dict["m_xymy"] + pod._translation_hhh + pod._translation_qqq #467 _matrix_m_xymx_3q3q3q_miss = pod.matrices_dict["m_xymx"] + pod._translation_hhh + pod._translation_qqq #468 _matrix_m_xyx_3q3q3q_miss = pod.matrices_dict["m_xyx"] + pod._translation_hhh + pod._translation_qqq #469 _matrix_m_xxz_3q3q3q_miss = pod.matrices_dict["m_xxz"] + pod._translation_hhh + pod._translation_qqq #470 _matrix_m_xyy_3q3q3q_miss = pod.matrices_dict["m_xyy"] + pod._translation_hhh + pod._translation_qqq #471 _matrix_2_x00_3q3q3q_miss = pod.matrices_dict["2_x00"] + pod._translation_hhh + pod._translation_qqq #472 _matrix_2_0y0_3q3q3q_miss = pod.matrices_dict["2_0y0"] + pod._translation_hhh + pod._translation_qqq #473 _matrix_2_00z_3q3q3q_miss = pod.matrices_dict["2_00z"] + pod._translation_hhh + pod._translation_qqq #474 _matrix_2_xx0_3q3q3q_miss = pod.matrices_dict["2_xx0"] + pod._translation_hhh + pod._translation_qqq #475 _matrix_2_x0x_3q3q3q_miss = pod.matrices_dict["2_x0x"] + pod._translation_hhh + pod._translation_qqq #476 _matrix_2_0yy_3q3q3q_miss = pod.matrices_dict["2_0yy"] + pod._translation_hhh + pod._translation_qqq #477 _matrix_2_xmx0_3q3q3q_miss = pod.matrices_dict["2_xmx0"] + pod._translation_hhh + pod._translation_qqq #478 _matrix_2_mx0x_3q3q3q_miss = pod.matrices_dict["2_mx0x"] + pod._translation_hhh + pod._translation_qqq #479 _matrix_2_0myy_3q3q3q_miss = pod.matrices_dict["2_0myy"] + pod._translation_hhh + pod._translation_qqq #480 _matrix_3_xxx_3q3q3q_miss = pod.matrices_dict["3_xxx"] + pod._translation_hhh + pod._translation_qqq #481 _matrix_3_xmxmx_3q3q3q_miss = pod.matrices_dict["3_xmxmx"] + pod._translation_hhh + pod._translation_qqq #482 _matrix_3_mxxmx_3q3q3q_miss = pod.matrices_dict["3_mxxmx"] + pod._translation_hhh + pod._translation_qqq #483 _matrix_3_mxmxx_3q3q3q_miss = pod.matrices_dict["3_mxmxx"] + pod._translation_hhh + pod._translation_qqq #484 _matrix_m3_xxx_3q3q3q_miss = pod.matrices_dict["m3_xxx"] + pod._translation_hhh + pod._translation_qqq #485 _matrix_m3_xmxmx_3q3q3q_miss = pod.matrices_dict["m3_xmxmx"] + pod._translation_hhh + pod._translation_qqq #486 _matrix_m3_mxxmx_3q3q3q_miss = pod.matrices_dict["m3_mxxmx"] + pod._translation_hhh + pod._translation_qqq #487 _matrix_m3_mxmxx_3q3q3q_miss = pod.matrices_dict["m3_mxmxx"] + pod._translation_hhh + pod._translation_qqq #488 _matrix_4_x00_3q3q3q_miss = pod.matrices_dict["4_x00"] + pod._translation_hhh + pod._translation_qqq #489 _matrix_4_0y0_3q3q3q_miss = pod.matrices_dict["4_0y0"] + pod._translation_hhh + pod._translation_qqq #490 _matrix_4_00z_3q3q3q_miss = pod.matrices_dict["4_00z"] + pod._translation_hhh + pod._translation_qqq #491 _matrix_m4_x00_3q3q3q_miss = pod.matrices_dict["-4_x00"] + pod._translation_hhh + pod._translation_qqq #492 _matrix_m4_0y0_3q3q3q_miss = pod.matrices_dict["-4_0y0"] + pod._translation_hhh + pod._translation_qqq #493 _matrix_m4_00z_3q3q3q_miss = pod.matrices_dict["-4_00z"] + pod._translation_hhh + pod._translation_qqq #494 all_missing_matrices = [ _matrix_inv_000_h00_miss, _matrix_m_0yz_h00_miss, _matrix_m_x0z_h00_miss, _matrix_m_xy0_h00_miss, _matrix_m_xmxz_h00_miss, _matrix_m_xymy_h00_miss, _matrix_m_xymx_h00_miss, _matrix_m_xyx_h00_miss, _matrix_m_xxz_h00_miss, _matrix_m_xyy_h00_miss, _matrix_2_x00_h00_miss, _matrix_2_0y0_h00_miss, _matrix_2_00z_h00_miss, _matrix_2_xx0_h00_miss, _matrix_2_x0x_h00_miss, _matrix_2_0yy_h00_miss, _matrix_2_xmx0_h00_miss, _matrix_2_mx0x_h00_miss, _matrix_2_0myy_h00_miss, _matrix_3_xxx_h00_miss, _matrix_3_xmxmx_h00_miss, _matrix_3_mxxmx_h00_miss, _matrix_3_mxmxx_h00_miss, _matrix_m3_xxx_h00_miss, _matrix_m3_xmxmx_h00_miss, _matrix_m3_mxxmx_h00_miss, _matrix_m3_mxmxx_h00_miss, _matrix_4_x00_h00_miss, _matrix_4_0y0_h00_miss, _matrix_4_00z_h00_miss, _matrix_m4_x00_h00_miss, _matrix_m4_0y0_h00_miss, _matrix_m4_00z_h00_miss, _matrix_inv_000_0h0_miss, _matrix_m_0yz_0h0_miss, _matrix_m_x0z_0h0_miss, _matrix_m_xy0_0h0_miss, _matrix_m_xmxz_0h0_miss, _matrix_m_xymy_0h0_miss, _matrix_m_xymx_0h0_miss, _matrix_m_xyx_0h0_miss, _matrix_m_xxz_0h0_miss, _matrix_m_xyy_0h0_miss, _matrix_2_x00_0h0_miss, _matrix_2_0y0_0h0_miss, _matrix_2_00z_0h0_miss, _matrix_2_xx0_0h0_miss, _matrix_2_x0x_0h0_miss, _matrix_2_0yy_0h0_miss, _matrix_2_xmx0_0h0_miss, _matrix_2_mx0x_0h0_miss, _matrix_2_0myy_0h0_miss, _matrix_3_xxx_0h0_miss, _matrix_3_xmxmx_0h0_miss, _matrix_3_mxxmx_0h0_miss, _matrix_3_mxmxx_0h0_miss, _matrix_m3_xxx_0h0_miss, _matrix_m3_xmxmx_0h0_miss, _matrix_m3_mxxmx_0h0_miss, _matrix_m3_mxmxx_0h0_miss, _matrix_4_x00_0h0_miss, _matrix_4_0y0_0h0_miss, _matrix_4_00z_0h0_miss, _matrix_m4_x00_0h0_miss, _matrix_m4_0y0_0h0_miss, _matrix_m4_00z_0h0_miss, _matrix_inv_000_00h_miss, _matrix_m_0yz_00h_miss, _matrix_m_x0z_00h_miss, _matrix_m_xy0_00h_miss, _matrix_m_xmxz_00h_miss, _matrix_m_xymy_00h_miss, _matrix_m_xymx_00h_miss, _matrix_m_xyx_00h_miss, _matrix_m_xxz_00h_miss, _matrix_m_xyy_00h_miss, _matrix_2_x00_00h_miss, _matrix_2_0y0_00h_miss, _matrix_2_00z_00h_miss, _matrix_2_xx0_00h_miss, _matrix_2_x0x_00h_miss, _matrix_2_0yy_00h_miss, _matrix_2_xmx0_00h_miss, _matrix_2_mx0x_00h_miss, _matrix_2_0myy_00h_miss, _matrix_3_xxx_00h_miss, _matrix_3_xmxmx_00h_miss, _matrix_3_mxxmx_00h_miss, _matrix_3_mxmxx_00h_miss, _matrix_m3_xxx_00h_miss, _matrix_m3_xmxmx_00h_miss, _matrix_m3_mxxmx_00h_miss, _matrix_m3_mxmxx_00h_miss, _matrix_4_x00_00h_miss, _matrix_4_0y0_00h_miss, _matrix_4_00z_00h_miss, _matrix_m4_x00_00h_miss, _matrix_m4_0y0_00h_miss, _matrix_m4_00z_00h_miss, _matrix_inv_000_0hh_miss, _matrix_m_0yz_0hh_miss, _matrix_m_x0z_0hh_miss, _matrix_m_xy0_0hh_miss, _matrix_m_xmxz_0hh_miss, _matrix_m_xymy_0hh_miss, _matrix_m_xymx_0hh_miss, _matrix_m_xyx_0hh_miss, _matrix_m_xxz_0hh_miss, _matrix_m_xyy_0hh_miss, _matrix_2_x00_0hh_miss, _matrix_2_0y0_0hh_miss, _matrix_2_00z_0hh_miss, _matrix_2_xx0_0hh_miss, _matrix_2_x0x_0hh_miss, _matrix_2_0yy_0hh_miss, _matrix_2_xmx0_0hh_miss, _matrix_2_mx0x_0hh_miss, _matrix_2_0myy_0hh_miss, _matrix_3_xxx_0hh_miss, _matrix_3_xmxmx_0hh_miss, _matrix_3_mxxmx_0hh_miss, _matrix_3_mxmxx_0hh_miss, _matrix_m3_xxx_0hh_miss, _matrix_m3_xmxmx_0hh_miss, _matrix_m3_mxxmx_0hh_miss, _matrix_m3_mxmxx_0hh_miss, _matrix_4_x00_0hh_miss, _matrix_4_0y0_0hh_miss, _matrix_4_00z_0hh_miss, _matrix_m4_x00_0hh_miss, _matrix_m4_0y0_0hh_miss, _matrix_m4_00z_0hh_miss, _matrix_inv_000_h0h_miss, _matrix_m_0yz_h0h_miss, _matrix_m_x0z_h0h_miss, _matrix_m_xy0_h0h_miss, _matrix_m_xmxz_h0h_miss, _matrix_m_xymy_h0h_miss, _matrix_m_xymx_h0h_miss, _matrix_m_xyx_h0h_miss, _matrix_m_xxz_h0h_miss, _matrix_m_xyy_h0h_miss, _matrix_2_x00_h0h_miss, _matrix_2_0y0_h0h_miss, _matrix_2_00z_h0h_miss, _matrix_2_xx0_h0h_miss, _matrix_2_x0x_h0h_miss, _matrix_2_0yy_h0h_miss, _matrix_2_xmx0_h0h_miss, _matrix_2_mx0x_h0h_miss, _matrix_2_0myy_h0h_miss, _matrix_3_xxx_h0h_miss, _matrix_3_xmxmx_h0h_miss, _matrix_3_mxxmx_h0h_miss, _matrix_3_mxmxx_h0h_miss, _matrix_m3_xxx_h0h_miss, _matrix_m3_xmxmx_h0h_miss, _matrix_m3_mxxmx_h0h_miss, _matrix_m3_mxmxx_h0h_miss, _matrix_4_x00_h0h_miss, _matrix_4_0y0_h0h_miss, _matrix_4_00z_h0h_miss, _matrix_m4_x00_h0h_miss, _matrix_m4_0y0_h0h_miss, _matrix_m4_00z_h0h_miss, _matrix_inv_000_hh0_miss, _matrix_m_0yz_hh0_miss, _matrix_m_x0z_hh0_miss, _matrix_m_xy0_hh0_miss, _matrix_m_xmxz_hh0_miss, _matrix_m_xymy_hh0_miss, _matrix_m_xymx_hh0_miss, _matrix_m_xyx_hh0_miss, _matrix_m_xxz_hh0_miss, _matrix_m_xyy_hh0_miss, _matrix_2_x00_hh0_miss, _matrix_2_0y0_hh0_miss, _matrix_2_00z_hh0_miss, _matrix_2_xx0_hh0_miss, _matrix_2_x0x_hh0_miss, _matrix_2_0yy_hh0_miss, _matrix_2_xmx0_hh0_miss, _matrix_2_mx0x_hh0_miss, _matrix_2_0myy_hh0_miss, _matrix_3_xxx_hh0_miss, _matrix_3_xmxmx_hh0_miss, _matrix_3_mxxmx_hh0_miss, _matrix_3_mxmxx_hh0_miss, _matrix_m3_xxx_hh0_miss, _matrix_m3_xmxmx_hh0_miss, _matrix_m3_mxxmx_hh0_miss, _matrix_m3_mxmxx_hh0_miss, _matrix_4_x00_hh0_miss, _matrix_4_0y0_hh0_miss, _matrix_4_00z_hh0_miss, _matrix_m4_x00_hh0_miss, _matrix_m4_0y0_hh0_miss, _matrix_m4_00z_hh0_miss, _matrix_inv_000_hhh_miss, _matrix_m_0yz_hhh_miss, _matrix_m_x0z_hhh_miss, _matrix_m_xy0_hhh_miss, _matrix_m_xmxz_hhh_miss, _matrix_m_xymy_hhh_miss, _matrix_m_xymx_hhh_miss, _matrix_m_xyx_hhh_miss, _matrix_m_xxz_hhh_miss, _matrix_m_xyy_hhh_miss, _matrix_2_x00_hhh_miss, _matrix_2_0y0_hhh_miss, _matrix_2_00z_hhh_miss, _matrix_2_xx0_hhh_miss, _matrix_2_x0x_hhh_miss, _matrix_2_0yy_hhh_miss, _matrix_2_xmx0_hhh_miss, _matrix_2_mx0x_hhh_miss, _matrix_2_0myy_hhh_miss, _matrix_3_xxx_hhh_miss, _matrix_3_xmxmx_hhh_miss, _matrix_3_mxxmx_hhh_miss, _matrix_3_mxmxx_hhh_miss, _matrix_m3_xxx_hhh_miss, _matrix_m3_xmxmx_hhh_miss, _matrix_m3_mxxmx_hhh_miss, _matrix_m3_mxmxx_hhh_miss, _matrix_4_x00_hhh_miss, _matrix_4_0y0_hhh_miss, _matrix_4_00z_hhh_miss, _matrix_m4_x00_hhh_miss, _matrix_m4_0y0_hhh_miss, _matrix_m4_00z_hhh_miss, _matrix_inv_000_qqq_miss, _matrix_m_0yz_qqq_miss, _matrix_m_x0z_qqq_miss, _matrix_m_xy0_qqq_miss, _matrix_m_xmxz_qqq_miss, _matrix_m_xymy_qqq_miss, _matrix_m_xymx_qqq_miss, _matrix_m_xyx_qqq_miss, _matrix_m_xxz_qqq_miss, _matrix_m_xyy_qqq_miss, _matrix_2_x00_qqq_miss, _matrix_2_0y0_qqq_miss, _matrix_2_00z_qqq_miss, _matrix_2_xx0_qqq_miss, _matrix_2_x0x_qqq_miss, _matrix_2_0yy_qqq_miss, _matrix_2_xmx0_qqq_miss, _matrix_2_mx0x_qqq_miss, _matrix_2_0myy_qqq_miss, _matrix_3_xxx_qqq_miss, _matrix_3_xmxmx_qqq_miss, _matrix_3_mxxmx_qqq_miss, _matrix_3_mxmxx_qqq_miss, _matrix_m3_xxx_qqq_miss, _matrix_m3_xmxmx_qqq_miss, _matrix_m3_mxxmx_qqq_miss, _matrix_m3_mxmxx_qqq_miss, _matrix_4_x00_qqq_miss, _matrix_4_0y0_qqq_miss, _matrix_4_00z_qqq_miss, _matrix_m4_x00_qqq_miss, _matrix_m4_0y0_qqq_miss, _matrix_m4_00z_qqq_miss, _matrix_inv_000_3qqq_miss, _matrix_m_0yz_3qqq_miss, _matrix_m_x0z_3qqq_miss, _matrix_m_xy0_3qqq_miss, _matrix_m_xmxz_3qqq_miss, _matrix_m_xymy_3qqq_miss, _matrix_m_xymx_3qqq_miss, _matrix_m_xyx_3qqq_miss, _matrix_m_xxz_3qqq_miss, _matrix_m_xyy_3qqq_miss, _matrix_2_x00_3qqq_miss, _matrix_2_0y0_3qqq_miss, _matrix_2_00z_3qqq_miss, _matrix_2_xx0_3qqq_miss, _matrix_2_x0x_3qqq_miss, _matrix_2_0yy_3qqq_miss, _matrix_2_xmx0_3qqq_miss, _matrix_2_mx0x_3qqq_miss, _matrix_2_0myy_3qqq_miss, _matrix_3_xxx_3qqq_miss, _matrix_3_xmxmx_3qqq_miss, _matrix_3_mxxmx_3qqq_miss, _matrix_3_mxmxx_3qqq_miss, _matrix_m3_xxx_3qqq_miss, _matrix_m3_xmxmx_3qqq_miss, _matrix_m3_mxxmx_3qqq_miss, _matrix_m3_mxmxx_3qqq_miss, _matrix_4_x00_3qqq_miss, _matrix_4_0y0_3qqq_miss, _matrix_4_00z_3qqq_miss, _matrix_m4_x00_3qqq_miss, _matrix_m4_0y0_3qqq_miss, _matrix_m4_00z_3qqq_miss, _matrix_inv_000_q3qq_miss, _matrix_m_0yz_q3qq_miss, _matrix_m_x0z_q3qq_miss, _matrix_m_xy0_q3qq_miss, _matrix_m_xmxz_q3qq_miss, _matrix_m_xymy_q3qq_miss, _matrix_m_xymx_q3qq_miss, _matrix_m_xyx_q3qq_miss, _matrix_m_xxz_q3qq_miss, _matrix_m_xyy_q3qq_miss, _matrix_2_x00_q3qq_miss, _matrix_2_0y0_q3qq_miss, _matrix_2_00z_q3qq_miss, _matrix_2_xx0_q3qq_miss, _matrix_2_x0x_q3qq_miss, _matrix_2_0yy_q3qq_miss, _matrix_2_xmx0_q3qq_miss, _matrix_2_mx0x_q3qq_miss, _matrix_2_0myy_q3qq_miss, _matrix_3_xxx_q3qq_miss, _matrix_3_xmxmx_q3qq_miss, _matrix_3_mxxmx_q3qq_miss, _matrix_3_mxmxx_q3qq_miss, _matrix_m3_xxx_q3qq_miss, _matrix_m3_xmxmx_q3qq_miss, _matrix_m3_mxxmx_q3qq_miss, _matrix_m3_mxmxx_q3qq_miss, _matrix_4_x00_q3qq_miss, _matrix_4_0y0_q3qq_miss, _matrix_4_00z_q3qq_miss, _matrix_m4_x00_q3qq_miss, _matrix_m4_0y0_q3qq_miss, _matrix_m4_00z_q3qq_miss, _matrix_inv_000_qq3q_miss, _matrix_m_0yz_qq3q_miss, _matrix_m_x0z_qq3q_miss, _matrix_m_xy0_qq3q_miss, _matrix_m_xmxz_qq3q_miss, _matrix_m_xymy_qq3q_miss, _matrix_m_xymx_qq3q_miss, _matrix_m_xyx_qq3q_miss, _matrix_m_xxz_qq3q_miss, _matrix_m_xyy_qq3q_miss, _matrix_2_x00_qq3q_miss, _matrix_2_0y0_qq3q_miss, _matrix_2_00z_qq3q_miss, _matrix_2_xx0_qq3q_miss, _matrix_2_x0x_qq3q_miss, _matrix_2_0yy_qq3q_miss, _matrix_2_xmx0_qq3q_miss, _matrix_2_mx0x_qq3q_miss, _matrix_2_0myy_qq3q_miss, _matrix_3_xxx_qq3q_miss, _matrix_3_xmxmx_qq3q_miss, _matrix_3_mxxmx_qq3q_miss, _matrix_3_mxmxx_qq3q_miss, _matrix_m3_xxx_qq3q_miss, _matrix_m3_xmxmx_qq3q_miss, _matrix_m3_mxxmx_qq3q_miss, _matrix_m3_mxmxx_qq3q_miss, _matrix_4_x00_qq3q_miss, _matrix_4_0y0_qq3q_miss, _matrix_4_00z_qq3q_miss, _matrix_m4_x00_qq3q_miss, _matrix_m4_0y0_qq3q_miss, _matrix_m4_00z_qq3q_miss, _matrix_inv_000_q3q3q_miss, _matrix_m_0yz_q3q3q_miss, _matrix_m_x0z_q3q3q_miss, _matrix_m_xy0_q3q3q_miss, _matrix_m_xmxz_q3q3q_miss, _matrix_m_xymy_q3q3q_miss, _matrix_m_xymx_q3q3q_miss, _matrix_m_xyx_q3q3q_miss, _matrix_m_xxz_q3q3q_miss, _matrix_m_xyy_q3q3q_miss, _matrix_2_x00_q3q3q_miss, _matrix_2_0y0_q3q3q_miss, _matrix_2_00z_q3q3q_miss, _matrix_2_xx0_q3q3q_miss, _matrix_2_x0x_q3q3q_miss, _matrix_2_0yy_q3q3q_miss, _matrix_2_xmx0_q3q3q_miss, _matrix_2_mx0x_q3q3q_miss, _matrix_2_0myy_q3q3q_miss, _matrix_3_xxx_q3q3q_miss, _matrix_3_xmxmx_q3q3q_miss, _matrix_3_mxxmx_q3q3q_miss, _matrix_3_mxmxx_q3q3q_miss, _matrix_m3_xxx_q3q3q_miss, _matrix_m3_xmxmx_q3q3q_miss, _matrix_m3_mxxmx_q3q3q_miss, _matrix_m3_mxmxx_q3q3q_miss, _matrix_4_x00_q3q3q_miss, _matrix_4_0y0_q3q3q_miss, _matrix_4_00z_q3q3q_miss, _matrix_m4_x00_q3q3q_miss, _matrix_m4_0y0_q3q3q_miss, _matrix_m4_00z_q3q3q_miss, _matrix_inv_000_3qq3q_miss, _matrix_m_0yz_3qq3q_miss, _matrix_m_x0z_3qq3q_miss, _matrix_m_xy0_3qq3q_miss, _matrix_m_xmxz_3qq3q_miss, _matrix_m_xymy_3qq3q_miss, _matrix_m_xymx_3qq3q_miss, _matrix_m_xyx_3qq3q_miss, _matrix_m_xxz_3qq3q_miss, _matrix_m_xyy_3qq3q_miss, _matrix_2_x00_3qq3q_miss, _matrix_2_0y0_3qq3q_miss, _matrix_2_00z_3qq3q_miss, _matrix_2_xx0_3qq3q_miss, _matrix_2_x0x_3qq3q_miss, _matrix_2_0yy_3qq3q_miss, _matrix_2_xmx0_3qq3q_miss, _matrix_2_mx0x_3qq3q_miss, _matrix_2_0myy_3qq3q_miss, _matrix_3_xxx_3qq3q_miss, _matrix_3_xmxmx_3qq3q_miss, _matrix_3_mxxmx_3qq3q_miss, _matrix_3_mxmxx_3qq3q_miss, _matrix_m3_xxx_3qq3q_miss, _matrix_m3_xmxmx_3qq3q_miss, _matrix_m3_mxxmx_3qq3q_miss, _matrix_m3_mxmxx_3qq3q_miss, _matrix_4_x00_3qq3q_miss, _matrix_4_0y0_3qq3q_miss, _matrix_4_00z_3qq3q_miss, _matrix_m4_x00_3qq3q_miss, _matrix_m4_0y0_3qq3q_miss, _matrix_m4_00z_3qq3q_miss, _matrix_inv_000_3q3qq_miss, _matrix_m_0yz_3q3qq_miss, _matrix_m_x0z_3q3qq_miss, _matrix_m_xy0_3q3qq_miss, _matrix_m_xmxz_3q3qq_miss, _matrix_m_xymy_3q3qq_miss, _matrix_m_xymx_3q3qq_miss, _matrix_m_xyx_3q3qq_miss, _matrix_m_xxz_3q3qq_miss, _matrix_m_xyy_3q3qq_miss, _matrix_2_x00_3q3qq_miss, _matrix_2_0y0_3q3qq_miss, _matrix_2_00z_3q3qq_miss, _matrix_2_xx0_3q3qq_miss, _matrix_2_x0x_3q3qq_miss, _matrix_2_0yy_3q3qq_miss, _matrix_2_xmx0_3q3qq_miss, _matrix_2_mx0x_3q3qq_miss, _matrix_2_0myy_3q3qq_miss, _matrix_3_xxx_3q3qq_miss, _matrix_3_xmxmx_3q3qq_miss, _matrix_3_mxxmx_3q3qq_miss, _matrix_3_mxmxx_3q3qq_miss, _matrix_m3_xxx_3q3qq_miss, _matrix_m3_xmxmx_3q3qq_miss, _matrix_m3_mxxmx_3q3qq_miss, _matrix_m3_mxmxx_3q3qq_miss, _matrix_4_x00_3q3qq_miss, _matrix_4_0y0_3q3qq_miss, _matrix_4_00z_3q3qq_miss, _matrix_m4_x00_3q3qq_miss, _matrix_m4_0y0_3q3qq_miss, _matrix_m4_00z_3q3qq_miss, _matrix_inv_000_3q3q3q_miss, _matrix_m_0yz_3q3q3q_miss, _matrix_m_x0z_3q3q3q_miss, _matrix_m_xy0_3q3q3q_miss, _matrix_m_xmxz_3q3q3q_miss, _matrix_m_xymy_3q3q3q_miss, _matrix_m_xymx_3q3q3q_miss, _matrix_m_xyx_3q3q3q_miss, _matrix_m_xxz_3q3q3q_miss, _matrix_m_xyy_3q3q3q_miss, _matrix_2_x00_3q3q3q_miss, _matrix_2_0y0_3q3q3q_miss, _matrix_2_00z_3q3q3q_miss, _matrix_2_xx0_3q3q3q_miss, _matrix_2_x0x_3q3q3q_miss, _matrix_2_0yy_3q3q3q_miss, _matrix_2_xmx0_3q3q3q_miss, _matrix_2_mx0x_3q3q3q_miss, _matrix_2_0myy_3q3q3q_miss, _matrix_3_xxx_3q3q3q_miss, _matrix_3_xmxmx_3q3q3q_miss, _matrix_3_mxxmx_3q3q3q_miss, _matrix_3_mxmxx_3q3q3q_miss, _matrix_m3_xxx_3q3q3q_miss, _matrix_m3_xmxmx_3q3q3q_miss, _matrix_m3_mxxmx_3q3q3q_miss, _matrix_m3_mxmxx_3q3q3q_miss, _matrix_4_x00_3q3q3q_miss, _matrix_4_0y0_3q3q3q_miss, _matrix_4_00z_3q3q3q_miss, _matrix_m4_x00_3q3q3q_miss, _matrix_m4_0y0_3q3q3q_miss, _matrix_m4_00z_3q3q3q_miss, ] all_missing_matrices_labels = [ "miss_inv_000_h00_miss", "miss_m_0yz_h00_miss", "miss_m_x0z_h00_miss", "miss_m_xy0_h00_miss", "miss_m_xmxz_h00_miss", "miss_m_xymy_h00_miss", "miss_m_xymx_h00_miss", "miss_m_xyx_h00_miss", "miss_m_xxz_h00_miss", "miss_m_xyy_h00_miss", "miss_2_x00_h00_miss", "miss_2_0y0_h00_miss", "miss_2_00z_h00_miss", "miss_2_xx0_h00_miss", "miss_2_x0x_h00_miss", "miss_2_0yy_h00_miss", "miss_2_xmx0_h00_miss", "miss_2_mx0x_h00_miss", "miss_2_0myy_h00_miss", "miss_3_xxx_h00_miss", "miss_3_xmxmx_h00_miss", "miss_3_mxxmx_h00_miss", "miss_3_mxmxx_h00_miss", "miss_m3_xxx_h00_miss", "miss_m3_xmxmx_h00_miss", "miss_m3_mxxmx_h00_miss", "miss_m3_mxmxx_h00_miss", "miss_4_x00_h00_miss", "miss_4_0y0_h00_miss", "miss_4_00z_h00_miss", "miss_m4_x00_h00_miss", "miss_m4_0y0_h00_miss", "miss_m4_00z_h00_miss", "miss_inv_000_0h0_miss", "miss_m_0yz_0h0_miss", "miss_m_x0z_0h0_miss", "miss_m_xy0_0h0_miss", "miss_m_xmxz_0h0_miss", "miss_m_xymy_0h0_miss", "miss_m_xymx_0h0_miss", "miss_m_xyx_0h0_miss", "miss_m_xxz_0h0_miss", "miss_m_xyy_0h0_miss", "miss_2_x00_0h0_miss", "miss_2_0y0_0h0_miss", "miss_2_00z_0h0_miss", "miss_2_xx0_0h0_miss", "miss_2_x0x_0h0_miss", "miss_2_0yy_0h0_miss", "miss_2_xmx0_0h0_miss", "miss_2_mx0x_0h0_miss", "miss_2_0myy_0h0_miss", "miss_3_xxx_0h0_miss", "miss_3_xmxmx_0h0_miss", "miss_3_mxxmx_0h0_miss", "miss_3_mxmxx_0h0_miss", "miss_m3_xxx_0h0_miss", "miss_m3_xmxmx_0h0_miss", "miss_m3_mxxmx_0h0_miss", "miss_m3_mxmxx_0h0_miss", "miss_4_x00_0h0_miss", "miss_4_0y0_0h0_miss", "miss_4_00z_0h0_miss", "miss_m4_x00_0h0_miss", "miss_m4_0y0_0h0_miss", "miss_m4_00z_0h0_miss", "miss_inv_000_00h_miss", "miss_m_0yz_00h_miss", "miss_m_x0z_00h_miss", "miss_m_xy0_00h_miss", "miss_m_xmxz_00h_miss", "miss_m_xymy_00h_miss", "miss_m_xymx_00h_miss", "miss_m_xyx_00h_miss", "miss_m_xxz_00h_miss", "miss_m_xyy_00h_miss", "miss_2_x00_00h_miss", "miss_2_0y0_00h_miss", "miss_2_00z_00h_miss", "miss_2_xx0_00h_miss", "miss_2_x0x_00h_miss", "miss_2_0yy_00h_miss", "miss_2_xmx0_00h_miss", "miss_2_mx0x_00h_miss", "miss_2_0myy_00h_miss", "miss_3_xxx_00h_miss", "miss_3_xmxmx_00h_miss", "miss_3_mxxmx_00h_miss", "miss_3_mxmxx_00h_miss", "miss_m3_xxx_00h_miss", "miss_m3_xmxmx_00h_miss", "miss_m3_mxxmx_00h_miss", "miss_m3_mxmxx_00h_miss", "miss_4_x00_00h_miss", "miss_4_0y0_00h_miss", "miss_4_00z_00h_miss", "miss_m4_x00_00h_miss", "miss_m4_0y0_00h_miss", "miss_m4_00z_00h_miss", "miss_inv_000_0hh_miss", "miss_m_0yz_0hh_miss", "miss_m_x0z_0hh_miss", "miss_m_xy0_0hh_miss", "miss_m_xmxz_0hh_miss", "miss_m_xymy_0hh_miss", "miss_m_xymx_0hh_miss", "miss_m_xyx_0hh_miss", "miss_m_xxz_0hh_miss", "miss_m_xyy_0hh_miss", "miss_2_x00_0hh_miss", "miss_2_0y0_0hh_miss", "miss_2_00z_0hh_miss", "miss_2_xx0_0hh_miss", "miss_2_x0x_0hh_miss", "miss_2_0yy_0hh_miss", "miss_2_xmx0_0hh_miss", "miss_2_mx0x_0hh_miss", "miss_2_0myy_0hh_miss", "miss_3_xxx_0hh_miss", "miss_3_xmxmx_0hh_miss", "miss_3_mxxmx_0hh_miss", "miss_3_mxmxx_0hh_miss", "miss_m3_xxx_0hh_miss", "miss_m3_xmxmx_0hh_miss", "miss_m3_mxxmx_0hh_miss", "miss_m3_mxmxx_0hh_miss", "miss_4_x00_0hh_miss", "miss_4_0y0_0hh_miss", "miss_4_00z_0hh_miss", "miss_m4_x00_0hh_miss", "miss_m4_0y0_0hh_miss", "miss_m4_00z_0hh_miss", "miss_inv_000_h0h_miss", "miss_m_0yz_h0h_miss", "miss_m_x0z_h0h_miss", "miss_m_xy0_h0h_miss", "miss_m_xmxz_h0h_miss", "miss_m_xymy_h0h_miss", "miss_m_xymx_h0h_miss", "miss_m_xyx_h0h_miss", "miss_m_xxz_h0h_miss", "miss_m_xyy_h0h_miss", "miss_2_x00_h0h_miss", "miss_2_0y0_h0h_miss", "miss_2_00z_h0h_miss", "miss_2_xx0_h0h_miss", "miss_2_x0x_h0h_miss", "miss_2_0yy_h0h_miss", "miss_2_xmx0_h0h_miss", "miss_2_mx0x_h0h_miss", "miss_2_0myy_h0h_miss", "miss_3_xxx_h0h_miss", "miss_3_xmxmx_h0h_miss", "miss_3_mxxmx_h0h_miss", "miss_3_mxmxx_h0h_miss", "miss_m3_xxx_h0h_miss", "miss_m3_xmxmx_h0h_miss", "miss_m3_mxxmx_h0h_miss", "miss_m3_mxmxx_h0h_miss", "miss_4_x00_h0h_miss", "miss_4_0y0_h0h_miss", "miss_4_00z_h0h_miss", "miss_m4_x00_h0h_miss", "miss_m4_0y0_h0h_miss", "miss_m4_00z_h0h_miss", "miss_inv_000_hh0_miss", "miss_m_0yz_hh0_miss", "miss_m_x0z_hh0_miss", "miss_m_xy0_hh0_miss", "miss_m_xmxz_hh0_miss", "miss_m_xymy_hh0_miss", "miss_m_xymx_hh0_miss", "miss_m_xyx_hh0_miss", "miss_m_xxz_hh0_miss", "miss_m_xyy_hh0_miss", "miss_2_x00_hh0_miss", "miss_2_0y0_hh0_miss", "miss_2_00z_hh0_miss", "miss_2_xx0_hh0_miss", "miss_2_x0x_hh0_miss", "miss_2_0yy_hh0_miss", "miss_2_xmx0_hh0_miss", "miss_2_mx0x_hh0_miss", "miss_2_0myy_hh0_miss", "miss_3_xxx_hh0_miss", "miss_3_xmxmx_hh0_miss", "miss_3_mxxmx_hh0_miss", "miss_3_mxmxx_hh0_miss", "miss_m3_xxx_hh0_miss", "miss_m3_xmxmx_hh0_miss", "miss_m3_mxxmx_hh0_miss", "miss_m3_mxmxx_hh0_miss", "miss_4_x00_hh0_miss", "miss_4_0y0_hh0_miss", "miss_4_00z_hh0_miss", "miss_m4_x00_hh0_miss", "miss_m4_0y0_hh0_miss", "miss_m4_00z_hh0_miss", "miss_inv_000_hhh_miss", "miss_m_0yz_hhh_miss", "miss_m_x0z_hhh_miss", "miss_m_xy0_hhh_miss", "miss_m_xmxz_hhh_miss", "miss_m_xymy_hhh_miss", "miss_m_xymx_hhh_miss", "miss_m_xyx_hhh_miss", "miss_m_xxz_hhh_miss", "miss_m_xyy_hhh_miss", "miss_2_x00_hhh_miss", "miss_2_0y0_hhh_miss", "miss_2_00z_hhh_miss", "miss_2_xx0_hhh_miss", "miss_2_x0x_hhh_miss", "miss_2_0yy_hhh_miss", "miss_2_xmx0_hhh_miss", "miss_2_mx0x_hhh_miss", "miss_2_0myy_hhh_miss", "miss_3_xxx_hhh_miss", "miss_3_xmxmx_hhh_miss", "miss_3_mxxmx_hhh_miss", "miss_3_mxmxx_hhh_miss", "miss_m3_xxx_hhh_miss", "miss_m3_xmxmx_hhh_miss", "miss_m3_mxxmx_hhh_miss", "miss_m3_mxmxx_hhh_miss", "miss_4_x00_hhh_miss", "miss_4_0y0_hhh_miss", "miss_4_00z_hhh_miss", "miss_m4_x00_hhh_miss", "miss_m4_0y0_hhh_miss", "miss_m4_00z_hhh_miss", "miss_inv_000_qqq_miss", "miss_m_0yz_qqq_miss", "miss_m_x0z_qqq_miss", "miss_m_xy0_qqq_miss", "miss_m_xmxz_qqq_miss", "miss_m_xymy_qqq_miss", "miss_m_xymx_qqq_miss", "miss_m_xyx_qqq_miss", "miss_m_xxz_qqq_miss", "miss_m_xyy_qqq_miss", "miss_2_x00_qqq_miss", "miss_2_0y0_qqq_miss", "miss_2_00z_qqq_miss", "miss_2_xx0_qqq_miss", "miss_2_x0x_qqq_miss", "miss_2_0yy_qqq_miss", "miss_2_xmx0_qqq_miss", "miss_2_mx0x_qqq_miss", "miss_2_0myy_qqq_miss", "miss_3_xxx_qqq_miss", "miss_3_xmxmx_qqq_miss", "miss_3_mxxmx_qqq_miss", "miss_3_mxmxx_qqq_miss", "miss_m3_xxx_qqq_miss", "miss_m3_xmxmx_qqq_miss", "miss_m3_mxxmx_qqq_miss", "miss_m3_mxmxx_qqq_miss", "miss_4_x00_qqq_miss", "miss_4_0y0_qqq_miss", "miss_4_00z_qqq_miss", "miss_m4_x00_qqq_miss", "miss_m4_0y0_qqq_miss", "miss_m4_00z_qqq_miss", "miss_inv_000_3qqq_miss", "miss_m_0yz_3qqq_miss", "miss_m_x0z_3qqq_miss", "miss_m_xy0_3qqq_miss", "miss_m_xmxz_3qqq_miss", "miss_m_xymy_3qqq_miss", "miss_m_xymx_3qqq_miss", "miss_m_xyx_3qqq_miss", "miss_m_xxz_3qqq_miss", "miss_m_xyy_3qqq_miss", "miss_2_x00_3qqq_miss", "miss_2_0y0_3qqq_miss", "miss_2_00z_3qqq_miss", "miss_2_xx0_3qqq_miss", "miss_2_x0x_3qqq_miss", "miss_2_0yy_3qqq_miss", "miss_2_xmx0_3qqq_miss", "miss_2_mx0x_3qqq_miss", "miss_2_0myy_3qqq_miss", "miss_3_xxx_3qqq_miss", "miss_3_xmxmx_3qqq_miss", "miss_3_mxxmx_3qqq_miss", "miss_3_mxmxx_3qqq_miss", "miss_m3_xxx_3qqq_miss", "miss_m3_xmxmx_3qqq_miss", "miss_m3_mxxmx_3qqq_miss", "miss_m3_mxmxx_3qqq_miss", "miss_4_x00_3qqq_miss", "miss_4_0y0_3qqq_miss", "miss_4_00z_3qqq_miss", "miss_m4_x00_3qqq_miss", "miss_m4_0y0_3qqq_miss", "miss_m4_00z_3qqq_miss", "miss_inv_000_q3qq_miss", "miss_m_0yz_q3qq_miss", "miss_m_x0z_q3qq_miss", "miss_m_xy0_q3qq_miss", "miss_m_xmxz_q3qq_miss", "miss_m_xymy_q3qq_miss", "miss_m_xymx_q3qq_miss", "miss_m_xyx_q3qq_miss", "miss_m_xxz_q3qq_miss", "miss_m_xyy_q3qq_miss", "miss_2_x00_q3qq_miss", "miss_2_0y0_q3qq_miss", "miss_2_00z_q3qq_miss", "miss_2_xx0_q3qq_miss", "miss_2_x0x_q3qq_miss", "miss_2_0yy_q3qq_miss", "miss_2_xmx0_q3qq_miss", "miss_2_mx0x_q3qq_miss", "miss_2_0myy_q3qq_miss", "miss_3_xxx_q3qq_miss", "miss_3_xmxmx_q3qq_miss", "miss_3_mxxmx_q3qq_miss", "miss_3_mxmxx_q3qq_miss", "miss_m3_xxx_q3qq_miss", "miss_m3_xmxmx_q3qq_miss", "miss_m3_mxxmx_q3qq_miss", "miss_m3_mxmxx_q3qq_miss", "miss_4_x00_q3qq_miss", "miss_4_0y0_q3qq_miss", "miss_4_00z_q3qq_miss", "miss_m4_x00_q3qq_miss", "miss_m4_0y0_q3qq_miss", "miss_m4_00z_q3qq_miss", "miss_inv_000_qq3q_miss", "miss_m_0yz_qq3q_miss", "miss_m_x0z_qq3q_miss", "miss_m_xy0_qq3q_miss", "miss_m_xmxz_qq3q_miss", "miss_m_xymy_qq3q_miss", "miss_m_xymx_qq3q_miss", "miss_m_xyx_qq3q_miss", "miss_m_xxz_qq3q_miss", "miss_m_xyy_qq3q_miss", "miss_2_x00_qq3q_miss", "miss_2_0y0_qq3q_miss", "miss_2_00z_qq3q_miss", "miss_2_xx0_qq3q_miss", "miss_2_x0x_qq3q_miss", "miss_2_0yy_qq3q_miss", "miss_2_xmx0_qq3q_miss", "miss_2_mx0x_qq3q_miss", "miss_2_0myy_qq3q_miss", "miss_3_xxx_qq3q_miss", "miss_3_xmxmx_qq3q_miss", "miss_3_mxxmx_qq3q_miss", "miss_3_mxmxx_qq3q_miss", "miss_m3_xxx_qq3q_miss", "miss_m3_xmxmx_qq3q_miss", "miss_m3_mxxmx_qq3q_miss", "miss_m3_mxmxx_qq3q_miss", "miss_4_x00_qq3q_miss", "miss_4_0y0_qq3q_miss", "miss_4_00z_qq3q_miss", "miss_m4_x00_qq3q_miss", "miss_m4_0y0_qq3q_miss", "miss_m4_00z_qq3q_miss", "miss_inv_000_q3q3q_miss", "miss_m_0yz_q3q3q_miss", "miss_m_x0z_q3q3q_miss", "miss_m_xy0_q3q3q_miss", "miss_m_xmxz_q3q3q_miss", "miss_m_xymy_q3q3q_miss", "miss_m_xymx_q3q3q_miss", "miss_m_xyx_q3q3q_miss", "miss_m_xxz_q3q3q_miss", "miss_m_xyy_q3q3q_miss", "miss_2_x00_q3q3q_miss", "miss_2_0y0_q3q3q_miss", "miss_2_00z_q3q3q_miss", "miss_2_xx0_q3q3q_miss", "miss_2_x0x_q3q3q_miss", "miss_2_0yy_q3q3q_miss", "miss_2_xmx0_q3q3q_miss", "miss_2_mx0x_q3q3q_miss", "miss_2_0myy_q3q3q_miss", "miss_3_xxx_q3q3q_miss", "miss_3_xmxmx_q3q3q_miss", "miss_3_mxxmx_q3q3q_miss", "miss_3_mxmxx_q3q3q_miss", "miss_m3_xxx_q3q3q_miss", "miss_m3_xmxmx_q3q3q_miss", "miss_m3_mxxmx_q3q3q_miss", "miss_m3_mxmxx_q3q3q_miss", "miss_4_x00_q3q3q_miss", "miss_4_0y0_q3q3q_miss", "miss_4_00z_q3q3q_miss", "miss_m4_x00_q3q3q_miss", "miss_m4_0y0_q3q3q_miss", "miss_m4_00z_q3q3q_miss", "miss_inv_000_3qq3q_miss", "miss_m_0yz_3qq3q_miss", "miss_m_x0z_3qq3q_miss", "miss_m_xy0_3qq3q_miss", "miss_m_xmxz_3qq3q_miss", "miss_m_xymy_3qq3q_miss", "miss_m_xymx_3qq3q_miss", "miss_m_xyx_3qq3q_miss", "miss_m_xxz_3qq3q_miss", "miss_m_xyy_3qq3q_miss", "miss_2_x00_3qq3q_miss", "miss_2_0y0_3qq3q_miss", "miss_2_00z_3qq3q_miss", "miss_2_xx0_3qq3q_miss", "miss_2_x0x_3qq3q_miss", "miss_2_0yy_3qq3q_miss", "miss_2_xmx0_3qq3q_miss", "miss_2_mx0x_3qq3q_miss", "miss_2_0myy_3qq3q_miss", "miss_3_xxx_3qq3q_miss", "miss_3_xmxmx_3qq3q_miss", "miss_3_mxxmx_3qq3q_miss", "miss_3_mxmxx_3qq3q_miss", "miss_m3_xxx_3qq3q_miss", "miss_m3_xmxmx_3qq3q_miss", "miss_m3_mxxmx_3qq3q_miss", "miss_m3_mxmxx_3qq3q_miss", "miss_4_x00_3qq3q_miss", "miss_4_0y0_3qq3q_miss", "miss_4_00z_3qq3q_miss", "miss_m4_x00_3qq3q_miss", "miss_m4_0y0_3qq3q_miss", "miss_m4_00z_3qq3q_miss", "miss_inv_000_3q3qq_miss", "miss_m_0yz_3q3qq_miss", "miss_m_x0z_3q3qq_miss", "miss_m_xy0_3q3qq_miss", "miss_m_xmxz_3q3qq_miss", "miss_m_xymy_3q3qq_miss", "miss_m_xymx_3q3qq_miss", "miss_m_xyx_3q3qq_miss", "miss_m_xxz_3q3qq_miss", "miss_m_xyy_3q3qq_miss", "miss_2_x00_3q3qq_miss", "miss_2_0y0_3q3qq_miss", "miss_2_00z_3q3qq_miss", "miss_2_xx0_3q3qq_miss", "miss_2_x0x_3q3qq_miss", "miss_2_0yy_3q3qq_miss", "miss_2_xmx0_3q3qq_miss", "miss_2_mx0x_3q3qq_miss", "miss_2_0myy_3q3qq_miss", "miss_3_xxx_3q3qq_miss", "miss_3_xmxmx_3q3qq_miss", "miss_3_mxxmx_3q3qq_miss", "miss_3_mxmxx_3q3qq_miss", "miss_m3_xxx_3q3qq_miss", "miss_m3_xmxmx_3q3qq_miss", "miss_m3_mxxmx_3q3qq_miss", "miss_m3_mxmxx_3q3qq_miss", "miss_4_x00_3q3qq_miss", "miss_4_0y0_3q3qq_miss", "miss_4_00z_3q3qq_miss", "miss_m4_x00_3q3qq_miss", "miss_m4_0y0_3q3qq_miss", "miss_m4_00z_3q3qq_miss", "miss_inv_000_3q3q3q_miss", "miss_m_0yz_3q3q3q_miss", "miss_m_x0z_3q3q3q_miss", "miss_m_xy0_3q3q3q_miss", "miss_m_xmxz_3q3q3q_miss", "miss_m_xymy_3q3q3q_miss", "miss_m_xymx_3q3q3q_miss", "miss_m_xyx_3q3q3q_miss", "miss_m_xxz_3q3q3q_miss", "miss_m_xyy_3q3q3q_miss", "miss_2_x00_3q3q3q_miss", "miss_2_0y0_3q3q3q_miss", "miss_2_00z_3q3q3q_miss", "miss_2_xx0_3q3q3q_miss", "miss_2_x0x_3q3q3q_miss", "miss_2_0yy_3q3q3q_miss", "miss_2_xmx0_3q3q3q_miss", "miss_2_mx0x_3q3q3q_miss", "miss_2_0myy_3q3q3q_miss", "miss_3_xxx_3q3q3q_miss", "miss_3_xmxmx_3q3q3q_miss", "miss_3_mxxmx_3q3q3q_miss", "miss_3_mxmxx_3q3q3q_miss", "miss_m3_xxx_3q3q3q_miss", "miss_m3_xmxmx_3q3q3q_miss", "miss_m3_mxxmx_3q3q3q_miss", "miss_m3_mxmxx_3q3q3q_miss", "miss_4_x00_3q3q3q_miss", "miss_4_0y0_3q3q3q_miss", "miss_4_00z_3q3q3q_miss", "miss_m4_x00_3q3q3q_miss", "miss_m4_0y0_3q3q3q_miss", "miss_m4_00z_3q3q3q_miss", ]
50.459532
112
0.78997
12,586
75,437
3.98983
0.04362
0.202406
0.147861
0.187291
0.806277
0.534471
0.4536
0.426259
0.405887
0.098096
0
0.108623
0.117913
75,437
1,494
113
50.493307
0.646028
0.018439
0
0
0
0
0.177922
0.070892
0
0
0
0
0
1
0
false
0
0.000671
0
0.000671
0
0
0
0
null
1
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
9b6bf7b206d50a8a2c5094e4b7d3be29dfc6bdd4
350
py
Python
stubs.min/System/Windows/__init___parts/DpiChangedEventArgs.py
ricardyn/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
1
2021-02-02T13:39:16.000Z
2021-02-02T13:39:16.000Z
stubs.min/System/Windows/__init___parts/DpiChangedEventArgs.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
null
null
null
stubs.min/System/Windows/__init___parts/DpiChangedEventArgs.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
null
null
null
class DpiChangedEventArgs(RoutedEventArgs): # no doc NewDpi=property(lambda self: object(),lambda self,v: None,lambda self: None) """Get: NewDpi(self: DpiChangedEventArgs) -> DpiScale """ OldDpi=property(lambda self: object(),lambda self,v: None,lambda self: None) """Get: OldDpi(self: DpiChangedEventArgs) -> DpiScale """
25
78
0.691429
39
350
6.205128
0.384615
0.247934
0.14876
0.198347
0.46281
0.46281
0.46281
0.46281
0.46281
0.46281
0
0
0.165714
350
13
79
26.923077
0.828767
0.017143
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
1
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
9b775e0edfe332b3a1377a629abd5be759ad46cd
159
py
Python
pygears/rtl/__init__.py
Risto97/pygears
19393e85101a16762cb3bbbf3010946ef69217f2
[ "MIT" ]
null
null
null
pygears/rtl/__init__.py
Risto97/pygears
19393e85101a16762cb3bbbf3010946ef69217f2
[ "MIT" ]
null
null
null
pygears/rtl/__init__.py
Risto97/pygears
19393e85101a16762cb3bbbf3010946ef69217f2
[ "MIT" ]
null
null
null
from .connect import rtl_connect from .inst import rtl_inst from .channel import RTLChannelVisitor __all__ = ['rtl_inst', 'rtl_connect', 'RTLChannelVisitor']
26.5
58
0.798742
20
159
5.95
0.4
0.151261
0
0
0
0
0
0
0
0
0
0
0.113208
159
5
59
31.8
0.843972
0
0
0
0
0
0.226415
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
9b7bf08a21231ba8ac67650869a14b89c576e3e5
1,056
py
Python
friendly_states/exceptions.py
alexmojaki/friendly_states
22fc338e8b56e25635e456f0370cd216022db304
[ "MIT" ]
20
2019-10-10T22:36:46.000Z
2021-08-20T17:55:03.000Z
friendly_states/exceptions.py
alexmojaki/friendly_states
22fc338e8b56e25635e456f0370cd216022db304
[ "MIT" ]
1
2019-09-24T07:24:10.000Z
2019-09-24T07:24:10.000Z
friendly_states/exceptions.py
alexmojaki/friendly_states
22fc338e8b56e25635e456f0370cd216022db304
[ "MIT" ]
3
2019-09-23T08:58:08.000Z
2021-04-19T04:18:30.000Z
class StateMachineException(Exception): def __init__(self, message_format, **kwargs): if kwargs: self.message = message_format.format(**kwargs) else: self.message = message_format self.__dict__.update(**kwargs) def __str__(self): return self.message class IncorrectInitialState(StateMachineException): pass class StateChangedElsewhere(StateMachineException): pass class MultipleMachineAncestors(StateMachineException): pass class IncorrectSummary(StateMachineException): pass class InheritedFromState(StateMachineException): pass class CannotInferOutputState(StateMachineException): pass class DuplicateStateNames(StateMachineException): pass class DuplicateOutputStates(StateMachineException): pass class UnknownOutputState(StateMachineException): pass class ReturnedInvalidState(StateMachineException): pass class GetStateDidNotReturnState(StateMachineException): pass class DjangoStateAttrNameWarning(Warning): pass
17.898305
58
0.753788
80
1,056
9.7625
0.3625
0.352113
0.422535
0.06146
0
0
0
0
0
0
0
0
0.182765
1,056
58
59
18.206897
0.904983
0
0
0.363636
0
0
0
0
0
0
0
0
0
1
0.060606
false
0.363636
0
0.030303
0.484848
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
9b8471539b1108aa28a393210028e0ce8b922ba7
312
py
Python
src/backend/database_migrations/versions/20210218_175604_merge_heads.py
chanzuckerberg/czgenepi
87bd2b1739acdfe2c7c25663fafb01dc24c5e2fd
[ "MIT" ]
5
2021-02-04T20:18:46.000Z
2021-09-09T13:42:42.000Z
src/backend/database_migrations/versions/20210218_175604_merge_heads.py
chanzuckerberg/aspen
9853778a7ef68b0446751657af5a835f98dde3dc
[ "MIT" ]
422
2021-01-30T04:16:00.000Z
2022-01-31T23:18:44.000Z
src/backend/database_migrations/versions/20210218_175604_merge_heads.py
chanzuckerberg/covidr
afe05d703d30ec18ac83944bfb551c313cb216c4
[ "MIT" ]
1
2021-05-20T14:54:39.000Z
2021-05-20T14:54:39.000Z
"""merge heads Create Date: 2021-02-18 17:56:04.778686 """ import enumtables # noqa: F401 # revision identifiers, used by Alembic. revision = "20210218_175604" down_revision = ("20210218_104830", "20210218_110710") branch_labels = None depends_on = None def upgrade(): pass def downgrade(): pass
14.857143
54
0.714744
41
312
5.292683
0.829268
0.147465
0
0
0
0
0
0
0
0
0
0.250965
0.169872
312
20
55
15.6
0.586873
0.330128
0
0.222222
0
0
0.225
0
0
0
0
0
0
1
0.222222
false
0.222222
0.111111
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
3
9b88d6322ff43feb47b4024531b608741579f09d
204
py
Python
challenges/urls.py
arijit79/pythonhub
b22ac5d13fcd700a9fad1c1a9976082526e29e7d
[ "MIT" ]
null
null
null
challenges/urls.py
arijit79/pythonhub
b22ac5d13fcd700a9fad1c1a9976082526e29e7d
[ "MIT" ]
null
null
null
challenges/urls.py
arijit79/pythonhub
b22ac5d13fcd700a9fad1c1a9976082526e29e7d
[ "MIT" ]
null
null
null
from django.urls import path from . import views urlpatterns = [ path("<int:pk>", views.answer, name="challenge-answer"), path("previous/<int:pk>", views.last_challenge, name="last-challenge") ]
25.5
74
0.696078
27
204
5.222222
0.518519
0.070922
0.141844
0
0
0
0
0
0
0
0
0
0.132353
204
7
75
29.142857
0.79661
0
0
0
0
0
0.269608
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
9bae5fb2af461b5498aa7f9a23f5106edde74583
164
py
Python
src/currency/api/pagination.py
qmzrjf/currency_exchange_new
41c9947405e3fdcf60eab6ad7d573a68590c6a9f
[ "MIT" ]
null
null
null
src/currency/api/pagination.py
qmzrjf/currency_exchange_new
41c9947405e3fdcf60eab6ad7d573a68590c6a9f
[ "MIT" ]
5
2021-03-19T08:50:23.000Z
2022-02-10T13:49:42.000Z
src/currency/api/pagination.py
qmzrjf/currency_exchange_new
41c9947405e3fdcf60eab6ad7d573a68590c6a9f
[ "MIT" ]
null
null
null
from rest_framework.pagination import PageNumberPagination class RateResultsSetPagination(PageNumberPagination): page_size = 20 page_query_param = 'page'
23.428571
58
0.817073
16
164
8.125
0.8125
0
0
0
0
0
0
0
0
0
0
0.014085
0.134146
164
6
59
27.333333
0.901408
0
0
0
0
0
0.02439
0
0
0
0
0
0
1
0
false
0
0.25
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
32d1dccb36b2c057008bbd514c3f8ae5a00fef56
258
py
Python
project/apps/schools/admin.py
skoczen/pdxschoolhack
6780e96d3cd1fe7230e6692531e0a371ddd5bf0c
[ "BSD-2-Clause" ]
null
null
null
project/apps/schools/admin.py
skoczen/pdxschoolhack
6780e96d3cd1fe7230e6692531e0a371ddd5bf0c
[ "BSD-2-Clause" ]
2
2020-06-05T18:39:06.000Z
2021-06-10T20:35:03.000Z
project/apps/schools/admin.py
skoczen/pdxschoolhack
6780e96d3cd1fe7230e6692531e0a371ddd5bf0c
[ "BSD-2-Clause" ]
null
null
null
from django.contrib import admin from schools.models import SchoolType, School # class EmailSubscriptionOptions(admin.ModelAdmin): # list_display = ('email',) # search_fields = ('email',) admin.site.register(SchoolType) admin.site.register(School)
25.8
51
0.75969
29
258
6.689655
0.655172
0.092784
0.175258
0
0
0
0
0
0
0
0
0
0.124031
258
10
52
25.8
0.858407
0.426357
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
fd2e930bdcd3ce39203553949273e7ea060f8981
58
py
Python
gobbli/model/transformer/__init__.py
awesome-archive/gobbli
71aacbdc1184871b164185dc0c9f615f07b83173
[ "Apache-2.0" ]
276
2019-09-13T08:25:51.000Z
2022-03-05T13:07:55.000Z
gobbli/model/transformer/__init__.py
awesome-archive/gobbli
71aacbdc1184871b164185dc0c9f615f07b83173
[ "Apache-2.0" ]
15
2019-09-06T14:05:30.000Z
2022-01-01T20:15:06.000Z
gobbli/model/transformer/__init__.py
awesome-archive/gobbli
71aacbdc1184871b164185dc0c9f615f07b83173
[ "Apache-2.0" ]
24
2019-09-18T15:11:42.000Z
2021-12-23T18:59:55.000Z
from .model import Transformer __all__ = ["Transformer"]
14.5
30
0.758621
6
58
6.666667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.137931
58
3
31
19.333333
0.8
0
0
0
0
0
0.189655
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
fd41e81488f1dc90a9fdd783f5ecb977294184fa
58
py
Python
other/ipypublish/__init__.py
KGerring/metaproj
e957de611f5268978df10184e4cedbd229ef617a
[ "MIT" ]
2
2021-04-11T01:43:09.000Z
2021-07-08T00:17:57.000Z
other/ipypublish/__init__.py
KGerring/metaproj
e957de611f5268978df10184e4cedbd229ef617a
[ "MIT" ]
1
2021-08-21T23:39:26.000Z
2021-08-21T23:39:26.000Z
other/ipypublish/__init__.py
KGerring/metaproj
e957de611f5268978df10184e4cedbd229ef617a
[ "MIT" ]
null
null
null
from __future__ import annotations __version__ = '0.6.5'
14.5
34
0.775862
8
58
4.625
1
0
0
0
0
0
0
0
0
0
0
0.06
0.137931
58
3
35
19.333333
0.68
0
0
0
0
0
0.086207
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
fd502771b00d719e10ce20be634d28bb9f6498a6
272
py
Python
rental/rents/apps.py
manuelen12/bike_test
1300cb4ebae7022dd468b46bd1ee64147f658ba6
[ "MIT" ]
null
null
null
rental/rents/apps.py
manuelen12/bike_test
1300cb4ebae7022dd468b46bd1ee64147f658ba6
[ "MIT" ]
null
null
null
rental/rents/apps.py
manuelen12/bike_test
1300cb4ebae7022dd468b46bd1ee64147f658ba6
[ "MIT" ]
null
null
null
from django.apps import AppConfig class RentsConfig(AppConfig): name = 'rental.rents' verbose_name = "Rents" def ready(self): """Override this to put in: Users system checks Users signal registration """ pass
19.428571
37
0.591912
29
272
5.517241
0.862069
0
0
0
0
0
0
0
0
0
0
0
0.330882
272
13
38
20.923077
0.879121
0.257353
0
0
0
0
0.104294
0
0
0
0
0
0
1
0.166667
false
0.166667
0.166667
0
0.833333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
3
fd61f6d0b664ca0d99a5f26cb889ca1a0485b756
4,372
py
Python
tests/i18n_custom_test.py
FelixSchwarz/pycerberus
d38376a52e61516023a3de61d43ef3d225161c7e
[ "MIT" ]
null
null
null
tests/i18n_custom_test.py
FelixSchwarz/pycerberus
d38376a52e61516023a3de61d43ef3d225161c7e
[ "MIT" ]
4
2018-10-21T16:00:59.000Z
2019-09-14T21:05:32.000Z
tests/i18n_custom_test.py
FelixSchwarz/pycerberus
d38376a52e61516023a3de61d43ef3d225161c7e
[ "MIT" ]
null
null
null
# -*- coding: UTF-8 -*- # This file is a part of pycerberus. # The source code contained in this file is licensed under the MIT license. # See LICENSE.txt in the main project directory, for more information. # SPDX-License-Identifier: MIT from __future__ import absolute_import, print_function, unicode_literals from pythonic_testcase import * from pycerberus.test_util import ValidationTest from pycerberus.validators import IntegerValidator class FrameworkValidator(IntegerValidator): def translation_parameters(self, context): return {'domain': 'framework'} class ValidatorWithAdditionalKeys(FrameworkValidator): def messages(self): return {'foo': 'bar'} def translation_parameters(self, context): return {'domain': 'fnord'} def translate_message(self, key, native_message, translation_parameters, context): assert key == 'foo' return 'A message from an application validator.' class SimpleDerivedValidator(ValidatorWithAdditionalKeys): pass class ValidatorRedefiningKeys(FrameworkValidator): def messages(self): return {'empty': 'fnord'} def translation_parameters(self, context): # We need to change back the domain as this validator is used to get # a real message - if the .mo file for the gettext domain does not # exist, gettext will raise an error. return {'domain': 'pycerberus'} class ValidatorWithNonGettextTranslation(FrameworkValidator): def translation_parameters(self, context): # we change the domain here on purpose - if gettext would check for # locale files for this domain, it would raise an exception because the # file is not there... return {'domain': 'application'} def translate_message(self, key, native_message, translation_parameters, context): assert key == 'inactive' if context['locale'] == 'de': return u'db Übersetzung' return 'db translation' def messages(self): return {'inactive': 'Untranslated message'} class CustomizedI18NBehaviorTest(ValidationTest): validator_class = ValidatorWithAdditionalKeys def domain_for_key(self, key): gettext_args = self.validator()._implementation(key, 'translation_parameters', {})() return gettext_args.get('domain') def test_validator_can_define_more_translations_while_keeping_existing_ones(self): assert_equals('Bitte geben Sie einen Wert ein.', self.message_for_key('empty')) assert_equals('A message from an application validator.', self.message_for_key('foo')) def test_validator_can_define_custom_parameters_for_translation_mechanism(self): assert_equals('pycerberus', self.domain_for_key('empty')) assert_equals('fnord', self.domain_for_key('foo')) def test_parameters_for_translation_are_inherited_from_super_class(self): assert_equals('fnord', self.domain_for_key('foo')) self.init_validator(SimpleDerivedValidator()) assert_equals('fnord', self.domain_for_key('foo')) def test_use_parameters_for_translation_from_class_where_key_is_defined(self): self.init_validator(SimpleDerivedValidator()) assert_equals('framework', self.domain_for_key('invalid_type')) assert_equals('fnord', self.domain_for_key('foo')) def test_validators_can_use_their_own_translations_for_existing_keys(self): assert_equals(u'Bitte geben Sie einen Wert ein.', self.message_for_key('empty')) self.init_validator(ValidatorRedefiningKeys()) assert_equals('fnord', self.message_for_key('empty')) def test_validators_can_use_other_translation_systems_than_gettext(self): self.init_validator(ValidatorWithNonGettextTranslation()) assert_equals('db translation', self.message_for_key('inactive', locale='en')) assert_equals(u'db Übersetzung', self.message_for_key('inactive', locale='de')) def test_different_translation_system_is_only_applied_to_messages_declared_in_that_class(self): self.init_validator(ValidatorWithNonGettextTranslation()) # This translation is present in the included mo files but not returned # by the custom translation method. assert_equals(u'Bitte geben Sie einen Wert ein.', self.message_for_key('empty'))
39.387387
99
0.72667
515
4,372
5.906796
0.299029
0.027613
0.027613
0.039119
0.395464
0.286654
0.190007
0.159106
0.147272
0.147272
0
0.000842
0.184812
4,372
110
100
39.745455
0.852694
0.150732
0
0.306452
0
0
0.133892
0.005951
0
0
0
0
0.241935
1
0.274194
false
0.016129
0.064516
0.112903
0.629032
0.016129
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
b5e9f2d6adf39e9daa2bb87050d1fb88a82ea568
954
py
Python
members_only/migrations/0002_auto_20190324_0319.py
TamasPalfi/FixedDB
be3e4e830b05099d33031759f4a7fc8a42f1e733
[ "BSD-2-Clause" ]
null
null
null
members_only/migrations/0002_auto_20190324_0319.py
TamasPalfi/FixedDB
be3e4e830b05099d33031759f4a7fc8a42f1e733
[ "BSD-2-Clause" ]
null
null
null
members_only/migrations/0002_auto_20190324_0319.py
TamasPalfi/FixedDB
be3e4e830b05099d33031759f4a7fc8a42f1e733
[ "BSD-2-Clause" ]
null
null
null
# Generated by Django 2.1.7 on 2019-03-24 03:19 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('members_only', '0001_initial'), ] operations = [ migrations.AlterField( model_name='post', name='photo', field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='members_only.Photo'), ), migrations.AlterField( model_name='user', name='address', field=models.TextField(default=''), ), migrations.AlterField( model_name='user', name='points_balance', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='user', name='stripe_card', field=models.CharField(default='', max_length=100), ), ]
27.257143
118
0.575472
95
954
5.663158
0.536842
0.148699
0.185874
0.215613
0.20632
0.20632
0
0
0
0
0
0.034483
0.300839
954
34
119
28.058824
0.772114
0.04717
0
0.392857
1
0
0.104741
0
0
0
0
0
0
1
0
false
0
0.071429
0
0.178571
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
b5ed686d58c97de92ced1d373cb6320c70ea2fb4
68,420
py
Python
autolens/pipeline/phase.py
AshKelly/PyAutoLens
043795966338a655339e61782253ad67cc3c14e6
[ "MIT" ]
null
null
null
autolens/pipeline/phase.py
AshKelly/PyAutoLens
043795966338a655339e61782253ad67cc3c14e6
[ "MIT" ]
null
null
null
autolens/pipeline/phase.py
AshKelly/PyAutoLens
043795966338a655339e61782253ad67cc3c14e6
[ "MIT" ]
null
null
null
import logging import os import warnings import numpy as np from astropy import cosmology as cosmo from autofit import conf from autofit.tools import phase from autofit.tools.phase_property import PhasePropertyCollection from autofit.optimize import non_linear from autolens import exc from autolens.data.array import mask as msk from autolens.data.plotters import ccd_plotters from autolens.lens import lens_data as li, lens_fit from autolens.lens import ray_tracing from autolens.lens import sensitivity_fit from autolens.lens.plotters import sensitivity_fit_plotters, ray_tracing_plotters, lens_fit_plotters from autolens.model.galaxy import galaxy as g, galaxy_model as gm, galaxy_fit, galaxy_data as gd from autolens.model.galaxy.plotters import galaxy_fit_plotters logger = logging.getLogger(__name__) logger.level = logging.DEBUG def default_mask_function(image): return msk.Mask.circular(shape=image.shape, pixel_scale=image.pixel_scale, radius_arcsec=3.0) def setup_phase_mask(data, mask, mask_function, inner_circular_mask_radii): if mask_function is not None: mask = mask_function(image=data.image) elif mask is None and mask_function is None: mask = default_mask_function(image=data.image) if inner_circular_mask_radii is not None: inner_mask = msk.Mask.circular(shape=mask.shape, pixel_scale=mask.pixel_scale, radius_arcsec=inner_circular_mask_radii, invert=True) mask = mask + inner_mask return mask class ResultsCollection(list): def __init__(self, results): super().__init__(results) @property def last(self): if len(self) > 0: return self[-1] return None @property def first(self): if len(self) > 0: return self[0] return None class AbstractPhase(phase.AbstractPhase): def __init__(self, phase_name, optimizer_class=non_linear.MultiNest, cosmology=cosmo.Planck15, auto_link_priors=False): """ A phase in an lens pipeline. Uses the set non_linear optimizer to try to fit models and hyper passed to it. Parameters ---------- optimizer_class: class The class of a non_linear optimizer phase_name: str The name of this phase """ self.optimizer = optimizer_class(name=phase_name) self.cosmology = cosmology self.phase_name = phase_name self.auto_link_priors = auto_link_priors @property def constant(self): """ Convenience method Returns ------- ModelInstance A model instance comprising all the constant objects in this lens """ return self.optimizer.constant @property def variable(self): """ Convenience method Returns ------- ModelMapper A model mapper comprising all the variable (prior) objects in this lens """ return self.optimizer.variable @property def galaxy_model_tuples(self): """ Returns ------- galaxy_model_tuples: [(String, GalaxyModel)] A list of tuples containing galaxy model names and instances. """ return [tup for tup in self.optimizer.variable.prior_model_tuples if isinstance(tup.prior_model, gm.GalaxyModel)] def match_instance_to_models(self, instance): """ Matches named galaxies associated with the instance to named galaxies associated with this phase. Parameters ---------- instance: ModelInstance An instance with named galaxy attributes. Returns ------- tuples: [(String, Galaxy, GalaxyModel)] A list of tuples associating galaxy instances from the model instance object with galaxy models in this phase. """ galaxy_dict = dict(instance.name_instance_tuples_for_class(g.Galaxy)) return [(key, galaxy_dict[key], value) for key, value in self.galaxy_model_tuples if key in galaxy_dict] def fit_priors(self, instance, fitting_function): """ Update the priors in this phase by fitting each galaxy model to a galaxy with the same name from a previous phase if such a galaxy exists. Parameters ---------- instance: ModelInstance An object with named galaxy attributes fitting_function: (Galaxy, GalaxyModel) -> GalaxyModel A function that takes a galaxy and a galaxy model and returns a GalaxyModel produced by combining a best fit between the original galaxy and galaxy model with prior widths given by the configuration. """ tuples = self.match_instance_to_models(instance) for t in tuples: name = t[0] galaxy = t[1] galaxy_model = t[2] new_galaxy_model = fitting_function(galaxy, galaxy_model) for phase_property_collection in self.phase_property_collections: if hasattr(phase_property_collection, name): setattr(phase_property_collection, name, new_galaxy_model) def fit_priors_with_results(self, results, fitting_function): """ Update the priors in this phase by fitting each galaxy model to a galaxy with the same name from a previous phase if such a galaxy exists. Results later in the list take precedence, with the last instance of any galaxies that share a name being kept. Parameters ---------- results: [Results] A list of results from previous phases. fitting_function: (Galaxy, GalaxyModel) -> GalaxyModel A function that takes a galaxy and a galaxy model and returns a GalaxyModel produced by combining a best fit between the original galaxy and galaxy model with prior widths given by the configuration. """ if results is not None and len(results) > 0: instances = [r.constant for r in results] instance = instances[0] for next_instance in instances[1:]: instance += next_instance self.fit_priors(instance, fitting_function) @property def phase_property_collections(self): """ Returns ------- phase_property_collections: [PhasePropertyCollection] A list of phase property collections associated with this phase. This is used in automated prior passing and should be overridden for any phase that contains its own PhasePropertyCollections. """ return [] @property def path(self): return self.optimizer.path @property def doc(self): if self.__doc__ is not None: return self.__doc__.replace(" ", "").replace("\n", " ") def pass_priors(self, previous_results): """ Perform any prior or constant passing. This could involve setting model attributes equal to priors or constants from a previous phase. Parameters ---------- previous_results: ResultsCollection The result of the previous phase """ pass # noinspection PyAbstractClass class Analysis(non_linear.Analysis): def __init__(self, cosmology, phase_name, previous_results=None): """ An lens object Parameters ---------- phase_name: str The name of the phase to which this analysis belongs previous_results: ResultsCollection The results of all previous phases """ self.previous_results = previous_results self.cosmology = cosmology self.phase_name = phase_name self.phase_output_path = "{}/{}".format(conf.instance.output_path, self.phase_name) log_file = conf.instance.general.get('output', 'log_file', str) if not len(log_file.replace(" ", "")) == 0: log_path = "{}/{}".format(self.phase_output_path, log_file) logger.handlers = [logging.FileHandler(log_path)] logger.propagate = False self.position_threshold = conf.instance.general.get('positions', 'position_threshold', float) self.plot_count = 0 self.output_image_path = "{}/image/".format(self.phase_output_path) make_path_if_does_not_exist(path=self.output_image_path) self.output_fits_path = "{}/image/fits/".format(self.phase_output_path) make_path_if_does_not_exist(path=self.output_fits_path) @property def last_results(self): if self.previous_results is not None: return self.previous_results.last def tracer_for_instance(self, instance): raise NotImplementedError() def padded_tracer_for_instance(self, instance): raise NotImplementedError() def fit_for_tracers(self, tracer, padded_tracer): raise NotImplementedError() def figure_of_merit_for_fit(self, tracer): raise NotImplementedError() def make_result(self, result, analysis): return self.__class__.Result(constant=result.constant, figure_of_merit=result.figure_of_merit, variable=result.variable, analysis=analysis, optimizer=self.optimizer) class Result(non_linear.Result): def __init__(self, constant, figure_of_merit, variable, analysis, optimizer): """ The result of a phase """ super(Phase.Result, self).__init__(constant=constant, figure_of_merit=figure_of_merit, variable=variable) self.analysis = analysis self.optimizer = optimizer @property def most_likely_tracer(self): return self.analysis.tracer_for_instance(instance=self.constant) @property def most_likely_padded_tracer(self): return self.analysis.padded_tracer_for_instance(instance=self.constant) @property def most_likely_fit(self): return self.analysis.fit_for_tracers(tracer=self.most_likely_tracer, padded_tracer=self.most_likely_padded_tracer) @property def unmasked_model_image(self): return self.most_likely_fit.unmasked_model_image @property def unmasked_model_image_of_planes(self): return self.most_likely_fit.unmasked_model_image_of_planes @property def unmasked_model_image_of_planes_and_galaxies(self): return self.most_likely_fit.unmasked_model_image_of_planes_and_galaxies class Phase(AbstractPhase): def run(self, image, previous_results=None, mask=None): raise NotImplementedError() # noinspection PyAbstractClass class Analysis(AbstractPhase.Analysis): def __init__(self, cosmology, phase_name, previous_results=None): super(Phase.Analysis, self).__init__(cosmology=cosmology, phase_name=phase_name, previous_results=previous_results) self.should_plot_mask = \ conf.instance.general.get('output', 'plot_mask_on_images', bool) self.extract_array_from_mask = \ conf.instance.general.get('output', 'extract_images_from_mask', bool) self.zoom_around_mask = \ conf.instance.general.get('output', 'zoom_around_mask_of_images', bool) self.should_plot_positions = \ conf.instance.general.get('output', 'plot_positions_on_images', bool) self.plot_units = \ conf.instance.general.get('output', 'plot_units', str).strip() self.plot_ray_tracing_all_at_end_png = \ conf.instance.general.get('output', 'plot_ray_tracing_all_at_end_png', bool) self.plot_ray_tracing_all_at_end_fits = \ conf.instance.general.get('output', 'plot_ray_tracing_all_at_end_fits', bool) self.plot_ray_tracing_as_subplot = \ conf.instance.general.get('output', 'plot_ray_tracing_as_subplot', bool) self.plot_ray_tracing_image_plane_image = \ conf.instance.general.get('output', 'plot_ray_tracing_image_plane_image', bool) self.plot_ray_tracing_source_plane = \ conf.instance.general.get('output', 'plot_ray_tracing_source_plane_image', bool) self.plot_ray_tracing_surface_density = \ conf.instance.general.get('output', 'plot_ray_tracing_surface_density', bool) self.plot_ray_tracing_potential = \ conf.instance.general.get('output', 'plot_ray_tracing_potential', bool) self.plot_ray_tracing_deflections = \ conf.instance.general.get('output', 'plot_ray_tracing_deflections', bool) class PhasePositions(AbstractPhase): lens_galaxies = PhasePropertyCollection("lens_galaxies") @property def phase_property_collections(self): return [self.lens_galaxies] def __init__(self, phase_name, lens_galaxies=None, optimizer_class=non_linear.MultiNest, cosmology=cosmo.Planck15, auto_link_priors=False): super().__init__(optimizer_class=optimizer_class, cosmology=cosmology, phase_name=phase_name, auto_link_priors=auto_link_priors) self.lens_galaxies = lens_galaxies def run(self, positions, pixel_scale, previous_results=None): """ Run this phase. Parameters ---------- pixel_scale positions previous_results: ResultsCollection An object describing the results of the last phase or None if no phase has been executed Returns ------- result: AbstractPhase.Result A result object comprising the best fit model and other hyper. """ analysis = self.make_analysis(positions=positions, pixel_scale=pixel_scale, previous_results=previous_results) result = self.run_analysis(analysis) return self.make_result(result, analysis) def make_analysis(self, positions, pixel_scale, previous_results=None): """ Create an lens object. Also calls the prior passing and lens_data modifying functions to allow child classes to change the behaviour of the phase. Parameters ---------- pixel_scale positions previous_results: ResultsCollection The result from the previous phase Returns ------- lens: Analysis An lens object that the non-linear optimizer calls to determine the fit of a set of values """ self.pass_priors(previous_results) analysis = self.__class__.Analysis(positions=positions, pixel_scale=pixel_scale, cosmology=self.cosmology, phase_name=self.phase_name, previous_results=previous_results) return analysis # noinspection PyAbstractClass class Analysis(Phase.Analysis): def __init__(self, positions, pixel_scale, cosmology, phase_name, previous_results=None): super().__init__(cosmology=cosmology, phase_name=phase_name, previous_results=previous_results) self.positions = list(map(lambda position_set: np.asarray(position_set), positions)) self.pixel_scale = pixel_scale def visualize(self, instance, suffix, during_analysis): pass def fit(self, instance): """ Determine the fit of a lens galaxy and source galaxy to the lens_data in this lens. Parameters ---------- instance A model instance with attributes Returns ------- fit: Fit A fractional value indicating how well this model fit and the model lens_data itself """ tracer = self.tracer_for_instance(instance) fit = self.fit_for_tracer(tracer) return fit.figure_of_merit def tracer_for_instance(self, instance): return ray_tracing.TracerImageSourcePlanesPositions(lens_galaxies=instance.lens_galaxies, image_plane_positions=self.positions, cosmology=self.cosmology) def fit_for_tracer(self, tracer): return lens_fit.LensPositionFit(positions=tracer.source_plane.positions, noise_map=self.pixel_scale) @classmethod def log(cls, instance): logger.debug( "\nRunning lens lens for... \n\nLens Galaxy::\n{}\n\n".format(instance.lens_galaxies)) class PhaseImaging(Phase): def __init__(self, phase_name, optimizer_class=non_linear.MultiNest, sub_grid_size=2, image_psf_shape=None, pixelization_psf_shape=None, use_positions=False, mask_function=None, inner_circular_mask_radii=None, cosmology=cosmo.Planck15, auto_link_priors=False): """ A phase in an lens pipeline. Uses the set non_linear optimizer to try to fit models and hyper passed to it. Parameters ---------- optimizer_class: class The class of a non_linear optimizer sub_grid_size: int The side length of the subgrid """ super(PhaseImaging, self).__init__(optimizer_class=optimizer_class, cosmology=cosmology, phase_name=phase_name, auto_link_priors=auto_link_priors) self.sub_grid_size = sub_grid_size self.image_psf_shape = image_psf_shape self.pixelization_psf_shape = pixelization_psf_shape self.use_positions = use_positions self.mask_function = mask_function self.inner_circular_mask_radii = inner_circular_mask_radii # noinspection PyMethodMayBeStatic,PyUnusedLocal def modify_image(self, image, previous_results): """ Customize an lens_data. e.g. removing lens light. Parameters ---------- image: scaled_array.ScaledSquarePixelArray An lens_data that has been masked previous_results: ResultsCollection The result of the previous lens Returns ------- lens_data: scaled_array.ScaledSquarePixelArray The modified image (not changed by default) """ return image def run(self, data, previous_results=None, mask=None, positions=None): """ Run this phase. Parameters ---------- mask: Mask The default masks passed in by the pipeline previous_results: ResultsCollection An object describing the results of the last phase or None if no phase has been executed data: scaled_array.ScaledSquarePixelArray An lens_data that has been masked Returns ------- result: AbstractPhase.Result A result object comprising the best fit model and other hyper. """ analysis = self.make_analysis(data=data, previous_results=previous_results, mask=mask, positions=positions) result = self.run_analysis(analysis) return self.make_result(result, analysis) def make_analysis(self, data, previous_results=None, mask=None, positions=None): """ Create an lens object. Also calls the prior passing and lens_data modifying functions to allow child classes to change the behaviour of the phase. Parameters ---------- mask: Mask The default masks passed in by the pipeline data: im.CCD An lens_data that has been masked previous_results: ResultsCollection The result from the previous phase Returns ------- lens : Analysis An lens object that the non-linear optimizer calls to determine the fit of a set of values """ mask = setup_phase_mask(data=data, mask=mask, mask_function=self.mask_function, inner_circular_mask_radii=self.inner_circular_mask_radii) if self.use_positions and positions is not None: positions = list(map(lambda position_set: np.asarray(position_set), positions)) elif not self.use_positions: positions = None elif self.use_positions and positions is None: raise exc.PhaseException('You have specified for a phase to use positions, but not input positions to the ' 'pipeline when you ran it.') lens_data = li.LensData(ccd_data=data, mask=mask, sub_grid_size=self.sub_grid_size, image_psf_shape=self.image_psf_shape, positions=positions) modified_image = self.modify_image(image=lens_data.image, previous_results=previous_results) lens_data = lens_data.new_lens_data_with_modified_image(modified_image=modified_image) self.pass_priors(previous_results) self.output_phase_info() analysis = self.__class__.Analysis(lens_data=lens_data, cosmology=self.cosmology, phase_name=self.phase_name, previous_results=previous_results) return analysis def output_phase_info(self): file_phase_info = "{}/{}/{}".format(conf.instance.output_path, self.phase_name, 'phase.info') with open(file_phase_info, 'w') as phase_info: phase_info.write('Optimizer = {} \n'.format(type(self.optimizer).__name__)) phase_info.write('Sub-grid size = {} \n'.format(self.sub_grid_size)) phase_info.write('Image PSF shape = {} \n'.format(self.image_psf_shape)) phase_info.write('Pixelization PSF shape = {} \n'.format(self.pixelization_psf_shape)) phase_info.write('Use positions = {} \n'.format(self.use_positions)) position_threshold = conf.instance.general.get('positions', 'position_threshold', float) phase_info.write('Positions Threshold = {} \n'.format(position_threshold)) phase_info.write('Cosmology = {} \n'.format(self.cosmology)) phase_info.write('Auto Link Priors = {} \n'.format(self.auto_link_priors)) phase_info.close() # noinspection PyAbstractClass class Analysis(Phase.Analysis): def __init__(self, lens_data, cosmology, phase_name, previous_results=None): super(PhaseImaging.Analysis, self).__init__(cosmology=cosmology, phase_name=phase_name, previous_results=previous_results) self.lens_data = lens_data self.should_plot_image_plane_pix = \ conf.instance.general.get('output', 'plot_image_plane_adaptive_pixelization_grid', bool) self.plot_data_as_subplot =\ conf.instance.general.get('output', 'plot_data_as_subplot', bool) self.plot_data_image = \ conf.instance.general.get('output', 'plot_data_image', bool) self.plot_data_noise_map = \ conf.instance.general.get('output', 'plot_data_noise_map', bool) self.plot_data_psf = \ conf.instance.general.get('output', 'plot_data_psf', bool) self.plot_data_signal_to_noise_map = \ conf.instance.general.get('output', 'plot_data_signal_to_noise_map', bool) self.plot_lens_fit_all_at_end_png = \ conf.instance.general.get('output', 'plot_lens_fit_all_at_end_png', bool) self.plot_lens_fit_all_at_end_fits = \ conf.instance.general.get('output', 'plot_lens_fit_all_at_end_fits', bool) self.plot_lens_fit_as_subplot = \ conf.instance.general.get('output', 'plot_lens_fit_as_subplot', bool) self.plot_lens_fit_image = \ conf.instance.general.get('output', 'plot_lens_fit_image', bool) self.plot_lens_fit_noise_map = \ conf.instance.general.get('output', 'plot_lens_fit_noise_map', bool) self.plot_lens_fit_signal_to_noise_map = \ conf.instance.general.get('output', 'plot_lens_fit_signal_to_noise_map', bool) self.plot_lens_fit_lens_subtracted_image = \ conf.instance.general.get('output', 'plot_lens_fit_lens_subtracted_image', bool) self.plot_lens_fit_model_image = \ conf.instance.general.get('output', 'plot_lens_fit_model_image', bool) self.plot_lens_fit_lens_model_image = \ conf.instance.general.get('output', 'plot_lens_fit_lens_model_image', bool) self.plot_lens_fit_source_model_image = \ conf.instance.general.get('output', 'plot_lens_fit_source_model_image', bool) self.plot_lens_fit_source_plane_image = \ conf.instance.general.get('output', 'plot_lens_fit_source_plane_image', bool) self.plot_lens_fit_residual_map = \ conf.instance.general.get('output', 'plot_lens_fit_residual_map', bool) self.plot_lens_fit_chi_squared_map = \ conf.instance.general.get('output', 'plot_lens_fit_chi_squared_map', bool) self.plot_lens_fit_contribution_map = \ conf.instance.general.get('output', 'plot_lens_fit_contribution_map', bool) def fit(self, instance): """ Determine the fit of a lens galaxy and source galaxy to the lens_data in this lens. Parameters ---------- instance A model instance with attributes Returns ------- fit : Fit A fractional value indicating how well this model fit and the model lens_data itself """ self.check_positions_trace_within_threshold(instance) tracer = self.tracer_for_instance(instance) fit = self.fit_for_tracers(tracer=tracer, padded_tracer=None) return fit.figure_of_merit def visualize(self, instance, suffix, during_analysis): self.plot_count += 1 if self.should_plot_mask: mask = self.lens_data.mask else: mask = None if self.should_plot_positions: positions = self.lens_data.positions else: positions = None if self.plot_data_as_subplot: ccd_plotters.plot_ccd_subplot( ccd_data=self.lens_data.ccd_data, mask=mask, extract_array_from_mask=self.extract_array_from_mask, zoom_around_mask=self.zoom_around_mask, positions=positions, units=self.plot_units, output_path=self.output_image_path, output_format='png') ccd_plotters.plot_ccd_individual( ccd_data=self.lens_data.ccd_data, mask=mask, extract_array_from_mask=self.extract_array_from_mask, zoom_around_mask=self.zoom_around_mask, positions=positions, should_plot_image=self.plot_data_image, should_plot_noise_map=self.plot_data_noise_map, should_plot_psf=self.plot_data_psf, should_plot_signal_to_noise_map=self.plot_data_signal_to_noise_map, units=self.plot_units, output_path=self.output_image_path, output_format='png') tracer = self.tracer_for_instance(instance) padded_tracer = self.padded_tracer_for_instance(instance) if self.plot_ray_tracing_as_subplot: ray_tracing_plotters.plot_ray_tracing_subplot( tracer=tracer, mask=mask, extract_array_from_mask=self.extract_array_from_mask, zoom_around_mask=self.zoom_around_mask, positions=positions, units=self.plot_units, output_path=self.output_image_path, output_format='png') fit = self.fit_for_tracers(tracer=tracer, padded_tracer=padded_tracer) if self.plot_lens_fit_as_subplot: lens_fit_plotters.plot_fit_subplot( fit=fit, should_plot_mask=self.should_plot_mask, extract_array_from_mask=self.extract_array_from_mask, zoom_around_mask=self.zoom_around_mask, positions=positions, should_plot_image_plane_pix=self.should_plot_image_plane_pix, units=self.plot_units, output_path=self.output_image_path, output_format='png') if during_analysis: ray_tracing_plotters.plot_ray_tracing_individual( tracer=tracer, mask=mask, extract_array_from_mask=self.extract_array_from_mask, zoom_around_mask=self.zoom_around_mask, positions=positions, should_plot_image_plane_image=self.plot_ray_tracing_image_plane_image, should_plot_source_plane=self.plot_ray_tracing_source_plane, should_plot_surface_density=self.plot_ray_tracing_surface_density, should_plot_potential=self.plot_ray_tracing_potential, should_plot_deflections=self.plot_ray_tracing_deflections, units=self.plot_units, output_path=self.output_image_path, output_format='png') lens_fit_plotters.plot_fit_individuals( fit=fit, should_plot_mask=self.should_plot_mask, extract_array_from_mask=self.extract_array_from_mask,zoom_around_mask=self.zoom_around_mask, positions=positions, should_plot_image_plane_pix=self.should_plot_image_plane_pix, should_plot_image=self.plot_lens_fit_image, should_plot_noise_map=self.plot_lens_fit_noise_map, should_plot_signal_to_noise_map=self.plot_lens_fit_signal_to_noise_map, should_plot_lens_subtracted_image=self.plot_lens_fit_lens_subtracted_image, should_plot_model_image=self.plot_lens_fit_model_image, should_plot_lens_model_image=self.plot_lens_fit_lens_model_image, should_plot_source_model_image=self.plot_lens_fit_source_model_image, should_plot_source_plane_image=self.plot_lens_fit_source_plane_image, should_plot_residual_map=self.plot_lens_fit_residual_map, should_plot_chi_squared_map=self.plot_lens_fit_chi_squared_map, units=self.plot_units, output_path=self.output_image_path, output_format='png') elif not during_analysis: if self.plot_ray_tracing_all_at_end_png: ray_tracing_plotters.plot_ray_tracing_individual( tracer=tracer, mask=mask, extract_array_from_mask=self.extract_array_from_mask, zoom_around_mask=self.zoom_around_mask, positions=positions, should_plot_image_plane_image=True, should_plot_source_plane=True, should_plot_surface_density=True, should_plot_potential=True, should_plot_deflections=True, units=self.plot_units, output_path=self.output_image_path, output_format='png') if self.plot_ray_tracing_all_at_end_fits: ray_tracing_plotters.plot_ray_tracing_individual( tracer=tracer, mask=mask, extract_array_from_mask=self.extract_array_from_mask, zoom_around_mask=self.zoom_around_mask, positions=positions, should_plot_image_plane_image=True, should_plot_source_plane=True, should_plot_surface_density=True, should_plot_potential=True, should_plot_deflections=True, output_path=self.output_fits_path, output_format='fits') if self.plot_lens_fit_all_at_end_png: lens_fit_plotters.plot_fit_individuals( fit=fit, should_plot_mask=self.should_plot_mask, extract_array_from_mask=self.extract_array_from_mask, zoom_around_mask=self.zoom_around_mask, positions=positions, should_plot_image_plane_pix=self.should_plot_image_plane_pix, should_plot_image=True, should_plot_noise_map=True, should_plot_signal_to_noise_map=True, should_plot_lens_subtracted_image=True, should_plot_model_image=True, should_plot_lens_model_image=True, should_plot_source_model_image=True, should_plot_source_plane_image=True, should_plot_residual_map=True, should_plot_chi_squared_map=True, units=self.plot_units, output_path=self.output_image_path, output_format='png') if self.plot_lens_fit_all_at_end_fits: lens_fit_plotters.plot_fit_individuals( fit=fit, should_plot_mask=self.should_plot_mask, extract_array_from_mask=self.extract_array_from_mask, zoom_around_mask=self.zoom_around_mask, positions=positions, should_plot_image_plane_pix=self.should_plot_image_plane_pix, should_plot_image=True, should_plot_noise_map=True, should_plot_signal_to_noise_map=True, should_plot_lens_subtracted_image=True, should_plot_model_image=True, should_plot_lens_model_image=True, should_plot_source_model_image=True, should_plot_source_plane_image=True, should_plot_residual_map=True, should_plot_chi_squared_map=True, output_path=self.output_fits_path, output_format='fits') return fit def fit_for_tracers(self, tracer, padded_tracer): return lens_fit.fit_lens_data_with_tracer(lens_data=self.lens_data, tracer=tracer, padded_tracer=padded_tracer) def check_positions_trace_within_threshold(self, instance): if self.lens_data.positions is not None: tracer = ray_tracing.TracerImageSourcePlanesPositions(lens_galaxies=instance.lens_galaxies, image_plane_positions=self.lens_data.positions) fit = lens_fit.LensPositionFit(positions=tracer.source_plane.positions, noise_map=self.lens_data.pixel_scale) if not fit.maximum_separation_within_threshold(self.position_threshold): return exc.RayTracingException def map_to_1d(self, data): """Convenience method""" return self.lens_data.mask.map_2d_array_to_masked_1d_array(data) class Result(Phase.Result): def __init__(self, constant, figure_of_merit, variable, analysis, optimizer): """ The result of a phase """ super(PhaseImaging.Result, self).__init__(constant=constant, figure_of_merit=figure_of_merit, variable=variable, analysis=analysis, optimizer=optimizer) class LensPlanePhase(PhaseImaging): """ Fit only the lens galaxy light. """ lens_galaxies = PhasePropertyCollection("lens_galaxies") @property def phase_property_collections(self): return [self.lens_galaxies] def __init__(self, phase_name, lens_galaxies=None, optimizer_class=non_linear.MultiNest, sub_grid_size=2, image_psf_shape=None, mask_function=None, inner_circular_mask_radii=None, cosmology=cosmo.Planck15, auto_link_priors=False): super(LensPlanePhase, self).__init__(optimizer_class=optimizer_class, sub_grid_size=sub_grid_size, image_psf_shape=image_psf_shape, mask_function=mask_function, inner_circular_mask_radii=inner_circular_mask_radii, cosmology=cosmology, phase_name=phase_name, auto_link_priors=auto_link_priors) self.lens_galaxies = lens_galaxies class Analysis(PhaseImaging.Analysis): def __init__(self, lens_data, cosmology, phase_name, previous_results=None): super(LensPlanePhase.Analysis, self).__init__(lens_data=lens_data, cosmology=cosmology, phase_name=phase_name, previous_results=previous_results) def tracer_for_instance(self, instance): return ray_tracing.TracerImagePlane(lens_galaxies=instance.lens_galaxies, image_plane_grid_stack=self.lens_data.grid_stack, cosmology=self.cosmology) def padded_tracer_for_instance(self, instance): return ray_tracing.TracerImagePlane(lens_galaxies=instance.lens_galaxies, image_plane_grid_stack=self.lens_data.padded_grid_stack, cosmology=self.cosmology) @classmethod def log(cls, instance): logger.debug( "\nRunning lens lens for... \n\nLens Galaxy::\n{}\n\n".format(instance.lens_galaxies)) class Result(PhaseImaging.Result): def __init__(self, constant, figure_of_merit, variable, analysis, optimizer): """ The result of a phase """ super(LensPlanePhase.Result, self).__init__(constant=constant, figure_of_merit=figure_of_merit, variable=variable, analysis=analysis, optimizer=optimizer) @property def unmasked_lens_plane_model_image(self): return self.most_likely_fit.unmasked_model_image_of_planes[0] class LensSourcePlanePhase(PhaseImaging): """ Fit a simple source and lens system. """ lens_galaxies = PhasePropertyCollection("lens_galaxies") source_galaxies = PhasePropertyCollection("source_galaxies") @property def phase_property_collections(self): return [self.lens_galaxies, self.source_galaxies] def __init__(self, phase_name, lens_galaxies=None, source_galaxies=None, optimizer_class=non_linear.MultiNest, sub_grid_size=2, image_psf_shape=None, use_positions=False, mask_function=None, inner_circular_mask_radii=None, cosmology=cosmo.Planck15, auto_link_priors=False): """ A phase with a simple source/lens model Parameters ---------- lens_galaxies : [g.Galaxy] | [gm.GalaxyModel] A galaxy that acts as a gravitational lens source_galaxies: [g.Galaxy] | [gm.GalaxyModel] A galaxy that is being lensed optimizer_class: class The class of a non-linear optimizer sub_grid_size: int The side length of the subgrid """ super(LensSourcePlanePhase, self).__init__(optimizer_class=optimizer_class, sub_grid_size=sub_grid_size, image_psf_shape=image_psf_shape, use_positions=use_positions, mask_function=mask_function, inner_circular_mask_radii=inner_circular_mask_radii, cosmology=cosmology, phase_name=phase_name, auto_link_priors=auto_link_priors) self.lens_galaxies = lens_galaxies or [] self.source_galaxies = source_galaxies or [] class Analysis(PhaseImaging.Analysis): def __init__(self, lens_data, cosmology, phase_name, previous_results=None): super(LensSourcePlanePhase.Analysis, self).__init__(lens_data=lens_data, cosmology=cosmology, phase_name=phase_name, previous_results=previous_results) def tracer_for_instance(self, instance): return ray_tracing.TracerImageSourcePlanes(lens_galaxies=instance.lens_galaxies, source_galaxies=instance.source_galaxies, image_plane_grid_stack=self.lens_data.grid_stack, border=self.lens_data.border, cosmology=self.cosmology) def padded_tracer_for_instance(self, instance): return ray_tracing.TracerImageSourcePlanes(lens_galaxies=instance.lens_galaxies, source_galaxies=instance.source_galaxies, image_plane_grid_stack=self.lens_data.padded_grid_stack, cosmology=self.cosmology) @classmethod def log(cls, instance): logger.debug( "\nRunning lens/source lens for... \n\nLens Galaxy:\n{}\n\nSource Galaxy:\n{}\n\n".format( instance.lens_galaxies, instance.source_galaxies)) class Result(PhaseImaging.Result): def __init__(self, constant, figure_of_merit, variable, analysis, optimizer): """ The result of a phase """ super(LensSourcePlanePhase.Result, self).__init__(constant=constant, figure_of_merit=figure_of_merit, variable=variable, analysis=analysis, optimizer=optimizer) @property def unmasked_lens_plane_model_image(self): return self.most_likely_fit.unmasked_model_image_of_planes[0] @property def unmasked_source_plane_model_image(self): return self.most_likely_fit.unmasked_model_image_of_planes[1] class MultiPlanePhase(PhaseImaging): """ Fit a simple source and lens system. """ galaxies = PhasePropertyCollection("galaxies") @property def phase_property_collections(self): return [self.galaxies] def __init__(self, phase_name, galaxies=None, optimizer_class=non_linear.MultiNest, sub_grid_size=2, image_psf_shape=None, use_positions=False, mask_function=None, inner_circular_mask_radii=None, cosmology=cosmo.Planck15, auto_link_priors=False): """ A phase with a simple source/lens model Parameters ---------- lens_galaxies : [g.Galaxy] | [gm.GalaxyModel] A galaxy that acts as a gravitational lens source_galaxies: [g.Galaxy] | [gm.GalaxyModel] A galaxy that is being lensed optimizer_class: class The class of a non-linear optimizer sub_grid_size: int The side length of the subgrid """ super(MultiPlanePhase, self).__init__(optimizer_class=optimizer_class, sub_grid_size=sub_grid_size, image_psf_shape=image_psf_shape, use_positions=use_positions, mask_function=mask_function, inner_circular_mask_radii=inner_circular_mask_radii, cosmology=cosmology, phase_name=phase_name, auto_link_priors=auto_link_priors) self.galaxies = galaxies class Analysis(PhaseImaging.Analysis): def __init__(self, lens_data, cosmology, phase_name, previous_results=None): self.lens_data = lens_data super(MultiPlanePhase.Analysis, self).__init__(lens_data=lens_data, cosmology=cosmology, phase_name=phase_name, previous_results=previous_results) def tracer_for_instance(self, instance): return ray_tracing.TracerMultiPlanes(galaxies=instance.galaxies, image_plane_grid_stack=self.lens_data.grid_stack, border=self.lens_data.border, cosmology=self.cosmology) def padded_tracer_for_instance(self, instance): return ray_tracing.TracerMultiPlanes(galaxies=instance.galaxies, image_plane_grid_stack=self.lens_data.padded_grid_stack, cosmology=self.cosmology) @classmethod def log(cls, instance): logger.debug("\nRunning multi-plane for... \n\nGalaxies:\n{}\n\n".format(instance.galaxies)) class GalaxyFitPhase(AbstractPhase): galaxies = PhasePropertyCollection("galaxies") def __init__(self, phase_name, galaxies=None, use_intensities=False, use_surface_density=False, use_potential=False, use_deflections=False, optimizer_class=non_linear.MultiNest, sub_grid_size=2, mask_function=None, cosmology=cosmo.Planck15): """ A phase in an lens pipeline. Uses the set non_linear optimizer to try to fit models and hyper passed to it. Parameters ---------- galaxy_data_class: class<gd.GalaxyData> optimizer_class: class The class of a non_linear optimizer sub_grid_size: int The side length of the subgrid """ super(GalaxyFitPhase, self).__init__(optimizer_class=optimizer_class, cosmology=cosmology, phase_name=phase_name) self.use_intensities = use_intensities self.use_surface_density = use_surface_density self.use_potential = use_potential self.use_deflections = use_deflections self.galaxies = galaxies self.sub_grid_size = sub_grid_size self.mask_function = mask_function def run(self, galaxy_data, previous_results=None, mask=None): """ Run this phase. Parameters ---------- mask: Mask The default masks passed in by the pipeline noise_map array previous_results: ResultsCollection An object describing the results of the last phase or None if no phase has been executed Returns ------- result: AbstractPhase.Result A result object comprising the best fit model and other hyper. """ analysis = self.make_analysis(galaxy_data=galaxy_data, previous_results=previous_results, mask=mask) result = self.run_analysis(analysis) return self.make_result(result, analysis) def make_analysis(self, galaxy_data, previous_results=None, mask=None): """ Create an lens object. Also calls the prior passing and lens_data modifying functions to allow child classes to change the behaviour of the phase. Parameters ---------- mask: Mask The default masks passed in by the pipeline array noise_map previous_results: ResultsCollection The result from the previous phase Returns ------- lens: Analysis An lens object that the non-linear optimizer calls to determine the fit of a set of values """ mask = setup_phase_mask(data=galaxy_data[0], mask=mask, mask_function=self.mask_function, inner_circular_mask_radii=None) self.pass_priors(previous_results) if self.use_intensities or self.use_surface_density or self.use_potential: galaxy_data = gd.GalaxyFitData(galaxy_data=galaxy_data[0], mask=mask, sub_grid_size=self.sub_grid_size, use_intensities=self.use_intensities, use_surface_density=self.use_surface_density, use_potential=self.use_potential, use_deflections_y=self.use_deflections, use_deflections_x=self.use_deflections) return self.__class__.AnalysisSingle(galaxy_data=galaxy_data, phase_name=self.phase_name, cosmology=self.cosmology, previous_results=previous_results) elif self.use_deflections: galaxy_data_y = gd.GalaxyFitData(galaxy_data=galaxy_data[0], mask=mask, sub_grid_size=self.sub_grid_size, use_intensities=self.use_intensities, use_surface_density=self.use_surface_density, use_potential=self.use_potential, use_deflections_y=self.use_deflections, use_deflections_x=False) galaxy_data_x = gd.GalaxyFitData(galaxy_data=galaxy_data[1], mask=mask, sub_grid_size=self.sub_grid_size, use_intensities=self.use_intensities, use_surface_density=self.use_surface_density, use_potential=self.use_potential, use_deflections_y=False, use_deflections_x=self.use_deflections) return self.__class__.AnalysisDeflections(galaxy_data_y=galaxy_data_y, galaxy_data_x=galaxy_data_x, cosmology=self.cosmology, phase_name=self.phase_name, previous_results=previous_results) class Analysis(Phase.Analysis): def __init__(self, cosmology, phase_name, previous_results): super(GalaxyFitPhase.Analysis, self).__init__(cosmology=cosmology, phase_name=phase_name, previous_results=previous_results) self.plot_galaxy_fit_all_at_end_png = \ conf.instance.general.get('output', 'plot_galaxy_fit_all_at_end_png', bool) self.plot_galaxy_fit_all_at_end_fits = \ conf.instance.general.get('output', 'plot_galaxy_fit_all_at_end_fits', bool) self.plot_galaxy_fit_as_subplot = \ conf.instance.general.get('output', 'plot_galaxy_fit_as_subplot', bool) self.plot_galaxy_fit_image = \ conf.instance.general.get('output', 'plot_galaxy_fit_image', bool) self.plot_galaxy_fit_noise_map = \ conf.instance.general.get('output', 'plot_galaxy_fit_noise_map', bool) self.plot_galaxy_fit_model_image = \ conf.instance.general.get('output', 'plot_galaxy_fit_model_image', bool) self.plot_galaxy_fit_residual_map = \ conf.instance.general.get('output', 'plot_galaxy_fit_residual_map', bool) self.plot_galaxy_fit_chi_squared_map = \ conf.instance.general.get('output', 'plot_galaxy_fit_chi_squared_map', bool) @classmethod def log(cls, instance): logger.debug( "\nRunning galaxy fit for... \n\nGalaxies::\n{}\n\n".format(instance.galaxies)) # noinspection PyAbstractClass class AnalysisSingle(Analysis): def __init__(self, galaxy_data, cosmology, phase_name, previous_results=None): super(GalaxyFitPhase.AnalysisSingle, self).__init__(cosmology=cosmology, phase_name=phase_name, previous_results=previous_results) self.galaxy_data = galaxy_data def fit(self, instance): fit = self.fit_for_instance(instance=instance) return fit.figure_of_merit def visualize(self, instance, suffix, during_analysis): self.plot_count += 1 fit = self.fit_for_instance(instance=instance) if self.plot_galaxy_fit_as_subplot: galaxy_fit_plotters.plot_fit_subplot( fit=fit, should_plot_mask=self.should_plot_mask, zoom_around_mask=self.zoom_around_mask, units=self.plot_units, output_path=self.output_image_path, output_format='png') if during_analysis: galaxy_fit_plotters.plot_fit_individuals( fit=fit, should_plot_mask=self.should_plot_mask, zoom_around_mask=self.zoom_around_mask, should_plot_image=self.plot_galaxy_fit_image, should_plot_noise_map=self.plot_galaxy_fit_noise_map, should_plot_model_image=self.plot_galaxy_fit_model_image, should_plot_residual_map=self.plot_galaxy_fit_residual_map, should_plot_chi_squared_map=self.plot_galaxy_fit_chi_squared_map, units=self.plot_units, output_path=self.output_image_path, output_format='png') elif not during_analysis: if self.plot_ray_tracing_all_at_end_png: galaxy_fit_plotters.plot_fit_individuals( fit=fit, should_plot_mask=self.should_plot_mask, zoom_around_mask=self.zoom_around_mask, should_plot_image=True, should_plot_noise_map=True, should_plot_model_image=True, should_plot_residual_map=True, should_plot_chi_squared_map=True, units=self.plot_units, output_path=self.output_image_path, output_format='png') if self.plot_ray_tracing_all_at_end_fits: galaxy_fit_plotters.plot_fit_individuals( fit=fit, should_plot_mask=self.should_plot_mask, zoom_around_mask=self.zoom_around_mask, should_plot_image=True, should_plot_noise_map=True, should_plot_model_image=True, should_plot_residual_map=True, should_plot_chi_squared_map=True, units=self.plot_units, output_path=self.output_fits_path, output_format='fits') return fit def fit_for_instance(self, instance): """ Determine the fit of a lens galaxy and source galaxy to the lens_data in this lens. Parameters ---------- instance A model instance with attributes Returns ------- fit: Fit A fractional value indicating how well this model fit and the model lens_data itself """ return galaxy_fit.GalaxyFit(galaxy_data=self.galaxy_data, model_galaxies=instance.galaxies) # noinspection PyAbstractClass class AnalysisDeflections(Analysis): def __init__(self, galaxy_data_y, galaxy_data_x, cosmology, phase_name, previous_results=None): super(GalaxyFitPhase.AnalysisDeflections, self).__init__(cosmology=cosmology, phase_name=phase_name, previous_results=previous_results) self.galaxy_data_y = galaxy_data_y self.galaxy_data_x = galaxy_data_x self.output_image_y_path = "{}/image/fit_y_".format(self.phase_output_path) self.output_fits_y_path = "{}/image/fits/fit_y".format(self.phase_output_path) self.output_image_x_path = "{}/image/fit_x_".format(self.phase_output_path) self.output_fits_x_path = "{}/image/fits/fit_x".format(self.phase_output_path) def fit(self, instance): fit_y, fit_x = self.fit_for_instance(instance=instance) return fit_y.figure_of_merit + fit_x.figure_of_merit def visualize(self, instance, suffix, during_analysis): self.plot_count += 1 fit_y, fit_x = self.fit_for_instance(instance=instance) if self.plot_galaxy_fit_as_subplot: galaxy_fit_plotters.plot_fit_subplot( fit=fit_y, should_plot_mask=self.should_plot_mask, zoom_around_mask=self.zoom_around_mask, units=self.plot_units, output_path=self.output_image_y_path, output_format='png') galaxy_fit_plotters.plot_fit_subplot( fit=fit_x, should_plot_mask=self.should_plot_mask, zoom_around_mask=self.zoom_around_mask, units=self.plot_units, output_path=self.output_image_x_path, output_format='png') if during_analysis: galaxy_fit_plotters.plot_fit_individuals( fit=fit_y, should_plot_mask=self.should_plot_mask, zoom_around_mask=self.zoom_around_mask, should_plot_image=self.plot_galaxy_fit_image, should_plot_noise_map=self.plot_galaxy_fit_noise_map, should_plot_model_image=self.plot_galaxy_fit_model_image, should_plot_residual_map=self.plot_galaxy_fit_residual_map, should_plot_chi_squared_map=self.plot_galaxy_fit_chi_squared_map, units=self.plot_units, output_path=self.output_image_y_path, output_format='png') galaxy_fit_plotters.plot_fit_individuals( fit=fit_x, should_plot_mask=self.should_plot_mask, zoom_around_mask=self.zoom_around_mask, should_plot_image=self.plot_galaxy_fit_image, should_plot_noise_map=self.plot_galaxy_fit_noise_map, should_plot_model_image=self.plot_galaxy_fit_model_image, should_plot_residual_map=self.plot_galaxy_fit_residual_map, should_plot_chi_squared_map=self.plot_galaxy_fit_chi_squared_map, units=self.plot_units, output_path=self.output_image_x_path, output_format='png') elif not during_analysis: if self.plot_ray_tracing_all_at_end_png: galaxy_fit_plotters.plot_fit_individuals( fit=fit_y, should_plot_mask=self.should_plot_mask, zoom_around_mask=self.zoom_around_mask, should_plot_image=True, should_plot_noise_map=True, should_plot_model_image=True, should_plot_residual_map=True, should_plot_chi_squared_map=True, units=self.plot_units, output_path=self.output_image_y_path, output_format='png') galaxy_fit_plotters.plot_fit_individuals( fit=fit_x, should_plot_mask=self.should_plot_mask, zoom_around_mask=self.zoom_around_mask, should_plot_image=True, should_plot_noise_map=True, should_plot_model_image=True, should_plot_residual_map=True, should_plot_chi_squared_map=True, units=self.plot_units, output_path=self.output_image_x_path, output_format='png') if self.plot_ray_tracing_all_at_end_fits: galaxy_fit_plotters.plot_fit_individuals( fit=fit_y, should_plot_mask=self.should_plot_mask, zoom_around_mask=self.zoom_around_mask, should_plot_image=True, should_plot_noise_map=True, should_plot_model_image=True, should_plot_residual_map=True, should_plot_chi_squared_map=True, units=self.plot_units, output_path=self.output_fits_y_path, output_format='fits') galaxy_fit_plotters.plot_fit_individuals( fit=fit_x, should_plot_mask=self.should_plot_mask, zoom_around_mask=self.zoom_around_mask, should_plot_image=True, should_plot_noise_map=True, should_plot_model_image=True, should_plot_residual_map=True, should_plot_chi_squared_map=True, units=self.plot_units, output_path=self.output_fits_x_path, output_format='fits') return fit_y, fit_x def fit_for_instance(self, instance): fit_y = galaxy_fit.GalaxyFit(galaxy_data=self.galaxy_data_y, model_galaxies=instance.galaxies) fit_x = galaxy_fit.GalaxyFit(galaxy_data=self.galaxy_data_x, model_galaxies=instance.galaxies) return fit_y, fit_x class Result(Phase.Result): def __init__(self, constant, figure_of_merit, variable, analysis, optimizer): """ The result of a phase """ super(GalaxyFitPhase.Result, self).__init__(constant=constant, figure_of_merit=figure_of_merit, variable=variable, analysis=analysis, optimizer=optimizer) class SensitivityPhase(PhaseImaging): lens_galaxies = PhasePropertyCollection("lens_galaxies") source_galaxies = PhasePropertyCollection("source_galaxies") sensitive_galaxies = PhasePropertyCollection("sensitive_galaxies") def __init__(self, phase_name, lens_galaxies=None, source_galaxies=None, sensitive_galaxies=None, optimizer_class=non_linear.MultiNest, sub_grid_size=2, mask_function=None, cosmology=cosmo.Planck15): """ A phase in an lens pipeline. Uses the set non_linear optimizer to try to fit models and hyper passed to it. Parameters ---------- optimizer_class: class The class of a non_linear optimizer sub_grid_size: int The side length of the subgrid """ super(SensitivityPhase, self).__init__(optimizer_class=optimizer_class, sub_grid_size=sub_grid_size, mask_function=mask_function, cosmology=cosmology, phase_name=phase_name) self.lens_galaxies = lens_galaxies or [] self.source_galaxies = source_galaxies or [] self.sensitive_galaxies = sensitive_galaxies or [] # noinspection PyAbstractClass class Analysis(PhaseImaging.Analysis): def __init__(self, lens_data, cosmology, phase_name, previous_results=None): self.lens_data = lens_data super(PhaseImaging.Analysis, self).__init__(cosmology=cosmology, phase_name=phase_name, previous_results=previous_results) def fit(self, instance): """ Determine the fit of a lens galaxy and source galaxy to the lens_data in this lens. Parameters ---------- instance A model instance with attributes Returns ------- fit: Fit A fractional value indicating how well this model fit and the model lens_data itself """ tracer_normal = self.tracer_normal_for_instance(instance) tracer_sensitive = self.tracer_sensitive_for_instance(instance) fit = self.fit_for_tracers(tracer_normal=tracer_normal, tracer_sensitive=tracer_sensitive) return fit.figure_of_merit def visualize(self, instance, suffix, during_analysis): self.plot_count += 1 tracer_normal = self.tracer_normal_for_instance(instance) tracer_sensitive = self.tracer_sensitive_for_instance(instance) fit = self.fit_for_tracers(tracer_normal=tracer_normal, tracer_sensitive=tracer_sensitive) ccd_plotters.plot_ccd_subplot(ccd_data=self.lens_data.ccd_data, mask=self.lens_data.mask, positions=self.lens_data.positions, output_path=self.output_image_path, output_format='png') ccd_plotters.plot_ccd_individual(ccd_data=self.lens_data.ccd_data, mask=self.lens_data.mask, positions=self.lens_data.positions, output_path=self.output_image_path, output_format='png') ray_tracing_plotters.plot_ray_tracing_subplot(tracer=tracer_normal, output_path=self.output_image_path, output_format='png', output_filename='tracer_normal') ray_tracing_plotters.plot_ray_tracing_subplot(tracer=tracer_sensitive, output_path=self.output_image_path, output_format='png', output_filename='tracer_sensitive') sensitivity_fit_plotters.plot_fit_subplot(fit=fit, output_path=self.output_image_path, output_format='png') return fit def tracer_normal_for_instance(self, instance): return ray_tracing.TracerImageSourcePlanes(lens_galaxies=instance.lens_galaxies, source_galaxies=instance.source_galaxies, image_plane_grid_stack=self.lens_data.grid_stack, border=self.lens_data.border) def tracer_sensitive_for_instance(self, instance): return ray_tracing.TracerImageSourcePlanes( lens_galaxies=instance.lens_galaxies + instance.sensitive_galaxies, source_galaxies=instance.source_galaxies, image_plane_grid_stack=self.lens_data.grid_stack, border=self.lens_data.border) def fit_for_tracers(self, tracer_normal, tracer_sensitive): return sensitivity_fit.fit_lens_data_with_sensitivity_tracers(lens_data=self.lens_data, tracer_normal=tracer_normal, tracer_sensitive=tracer_sensitive) @classmethod def log(cls, instance): logger.debug( "\nRunning lens/source lens for... \n\nLens Galaxy:\n{}\n\nSource Galaxy:\n{}\n\n Sensitive " "Galaxy\n{}\n\n " "".format(instance.lens_galaxies, instance.source_galaxies, instance.sensitive_galaxies)) class HyperAnalysis(object): def __init__(self): pass def make_path_if_does_not_exist(path): if not os.path.exists(path): os.makedirs(path)
45.552597
120
0.617027
7,632
68,420
5.157495
0.049528
0.035313
0.017784
0.024592
0.797393
0.758066
0.72194
0.689396
0.642676
0.603552
0
0.001065
0.313899
68,420
1,501
121
45.582945
0.837441
0.124905
0
0.523645
0
0.002307
0.042811
0.018738
0
0
0
0
0
1
0.116494
false
0.008074
0.020761
0.032295
0.249135
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
b5efcf9589973cda7ad5bade65c671818f195949
237
py
Python
robot_control/robot_control/ignition/vehicle.py
blakermchale/robot-control
b2d0109950d5abb4b12055ce7a3645ca9cc07b20
[ "MIT" ]
null
null
null
robot_control/robot_control/ignition/vehicle.py
blakermchale/robot-control
b2d0109950d5abb4b12055ce7a3645ca9cc07b20
[ "MIT" ]
4
2021-06-29T05:14:13.000Z
2021-08-11T05:00:44.000Z
robot_control/robot_control/ignition/vehicle.py
blakermchale/robot-control
b2d0109950d5abb4b12055ce7a3645ca9cc07b20
[ "MIT" ]
null
null
null
#!/usr/bin/env python from robot_control.abstract_vehicle import AVehicle class Vehicle(AVehicle): def __init__(self, instance=0): super().__init__(instance=instance) self.get_logger().debug("Vehicle initialized")
23.7
54
0.725738
29
237
5.551724
0.758621
0
0
0
0
0
0
0
0
0
0
0.005
0.156118
237
9
55
26.333333
0.8
0.084388
0
0
0
0
0.087963
0
0
0
0
0
0
1
0.2
false
0
0.2
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
b5f6ccd17210b5d97832f68468ea2ab28de09ecd
23
py
Python
logic/__init__.py
tingxin/DevIoT_IndoorLocation_Starter_Kit
df03b664f87eba77bb7ad6ed7ed53a97b6365e48
[ "Apache-2.0" ]
null
null
null
logic/__init__.py
tingxin/DevIoT_IndoorLocation_Starter_Kit
df03b664f87eba77bb7ad6ed7ed53a97b6365e48
[ "Apache-2.0" ]
1
2016-11-05T20:18:36.000Z
2016-11-08T10:40:17.000Z
sensors/__init__.py
tingxin/DevIoT_RaspberryPi_Starter_Kit
2ee0b4027e366fc9715e35c08eaac7a9bcc49bc7
[ "Apache-2.0" ]
null
null
null
__author__ = 'tingxxu'
11.5
22
0.73913
2
23
6.5
1
0
0
0
0
0
0
0
0
0
0
0
0.130435
23
1
23
23
0.65
0
0
0
0
0
0.304348
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
bd0435cf428dd88d2e05b070bb8e822e50ab28f2
412
py
Python
django_c/frm_b/mdl_frm/admin.py
mohammad716e/python_training
0654623c603c775ed2cbdc3919dc815891c8fdeb
[ "MIT" ]
null
null
null
django_c/frm_b/mdl_frm/admin.py
mohammad716e/python_training
0654623c603c775ed2cbdc3919dc815891c8fdeb
[ "MIT" ]
null
null
null
django_c/frm_b/mdl_frm/admin.py
mohammad716e/python_training
0654623c603c775ed2cbdc3919dc815891c8fdeb
[ "MIT" ]
null
null
null
from django.contrib import admin from mdl_frm.models import subscriber # Register your models here. admin.site.register(subscriber) '''(django_env_1) A:\ARC\PRJ\PY\python_training\django_c\frm_b>python manage.py createsuperuser Username (leave blank to use 'adminone'): developer Email address: mmohammadianxdev@gmail.com Password: 147741147zxc Password (again): 147741147zxc Superuser created successfully. '''
37.454545
95
0.817961
56
412
5.910714
0.767857
0
0
0
0
0
0
0
0
0
0
0.050667
0.089806
412
11
96
37.454545
0.832
0.063107
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
bd2f5dd244936acac81364d868b7393ac3abebae
186
py
Python
log_errors/models.py
erickdom/restAndroid
87ae8cfb5a4ddc207ab7b2d4e9548d316db14ad0
[ "Apache-2.0" ]
null
null
null
log_errors/models.py
erickdom/restAndroid
87ae8cfb5a4ddc207ab7b2d4e9548d316db14ad0
[ "Apache-2.0" ]
null
null
null
log_errors/models.py
erickdom/restAndroid
87ae8cfb5a4ddc207ab7b2d4e9548d316db14ad0
[ "Apache-2.0" ]
null
null
null
from django.db import models class Log_Errors(models.Model): error = models.TextField() error_human = models.TextField() date_time = models.DateTimeField(auto_now_add=True)
26.571429
55
0.752688
25
186
5.4
0.76
0.222222
0
0
0
0
0
0
0
0
0
0
0.150538
186
7
55
26.571429
0.85443
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.2
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
bd4e50eafccd9f7e3f57039e69696249447a460d
570
py
Python
perftest/read_large_file.py
alinabi/pyhecdss
fb94a0cd1bbc0add873eeb1c91b016d5006424de
[ "MIT" ]
18
2019-09-04T17:42:15.000Z
2021-11-15T12:16:24.000Z
perftest/read_large_file.py
alinabi/pyhecdss
fb94a0cd1bbc0add873eeb1c91b016d5006424de
[ "MIT" ]
27
2019-09-04T14:55:36.000Z
2021-12-08T01:36:08.000Z
perftest/read_large_file.py
alinabi/pyhecdss
fb94a0cd1bbc0add873eeb1c91b016d5006424de
[ "MIT" ]
5
2019-09-04T21:49:22.000Z
2022-03-24T13:41:16.000Z
import pyhecdss import datetime if __name__=='__main__': pyhecdss.set_message_level(0) d=pyhecdss.DSSFile('./ITP_PP_out_ec.dss') s=datetime.datetime.now() catdf=d.read_catalog() print('catalog read in :', datetime.datetime.now()-s ) plist=d.get_pathnames() print('Reading ',len(plist),'...') s=datetime.datetime.now() for path in plist: si=datetime.datetime.now() df,u,p=d.read_rts(path) print('read ',path,' in ',datetime.datetime.now()-si) print('read all in ',datetime.datetime.now()-s)
33.529412
62
0.633333
79
570
4.367089
0.468354
0.278261
0.330435
0.182609
0.127536
0
0
0
0
0
0
0.002193
0.2
570
16
63
35.625
0.754386
0
0
0.125
0
0
0.137184
0
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0.25
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
bd550a4735583e2c6691187540b8e40f2fc9135f
13,794
py
Python
tests/test_user_pays_for_booking_endpoint.py
chidioguejiofor/airtech-api
45d77da0cc4230dd3cb7ab4cbb5168a9239850f5
[ "MIT" ]
1
2019-04-04T12:27:55.000Z
2019-04-04T12:27:55.000Z
tests/test_user_pays_for_booking_endpoint.py
chidioguejiofor/airtech-api
45d77da0cc4230dd3cb7ab4cbb5168a9239850f5
[ "MIT" ]
34
2019-03-26T11:18:17.000Z
2022-02-10T08:12:36.000Z
tests/test_user_pays_for_booking_endpoint.py
chidioguejiofor/airtech-api
45d77da0cc4230dd3cb7ab4cbb5168a9239850f5
[ "MIT" ]
null
null
null
import pytest import requests from airtech_api.utils import success_messages from mock import Mock from airtech_api.utils.constants import PAYSTACK_INITIALIZE_URL from airtech_api.utils.error_messages import serialization_errors from airtech_api.booking.models import Booking from django.utils import timezone from tests.helpers.assertion_helpers import assert_missing_header, assert_invalid_token_format from datetime import datetime from dateutil.parser import parse USER_BOOKING_URL = '/api/v1/user/bookings/{}/payment' class RequestsResponseMock: def __init__(self, status_code, raise_exception=False, exception_msg='An error occured', **kwargs): if raise_exception: raise Exception(exception_msg) self._json = kwargs self.status_code = status_code def json(self): return self._json @pytest.mark.django_db class TestPayForFlightTicketRoute: def test_user_pays_for_booking_ticket_succeeds( self, client, valid_user_one_token, saved_valid_user_one, saved_bulk_inserted_bookings_for_user_one): booking = saved_bulk_inserted_bookings_for_user_one[0] booking.paid_at = None booking.save() paystack_res_mock = { 'status': True, 'data': { 'status': 'success', 'gateway_response': 'Payment was made to account', 'metadata': { 'callbackURL': 'https://test.com', 'bookingId': 'booking-UUID' }, } } paystack_res_mock = { 'status': True, 'data': { 'authorization_url': 'http://payment-link.com', } } paystack_response = RequestsResponseMock(200, False, **paystack_res_mock) requests.post = Mock(return_value=paystack_response) client_callback = 'https://test.com' response = client.post( USER_BOOKING_URL.format(booking.id), content_type='application/json', HTTP_AUTHORIZATION='Bearer {}'.format(valid_user_one_token), data={ 'callbackURL': client_callback, }) response_body = response.data response_data = response_body['data'] (url, ), res = requests.post.call_args data = res['data'] # Reqeust Assetions assert response.status_code == 200 assert response_body['status'] == 'success' assert response_body['message'] == success_messages[ 'payment_url_created'] assert response_data['paymentLink'] == paystack_res_mock['data'][ 'authorization_url'] metadata = eval(data['metadata']) # Post call assertions assert url == PAYSTACK_INITIALIZE_URL assert data['amount'] == booking.ticket_price assert data['email'] == booking.created_by.email assert metadata['callbackURL'] == client_callback assert metadata['bookingId'] == str(booking.id) assert metadata['username'] == saved_valid_user_one.username assert metadata['email'] == saved_valid_user_one.email def test_user_pays_for_ticket_that_has_already_been_bought_fails( self, client, valid_user_one_token, saved_bulk_inserted_bookings_for_user_one): booking = saved_bulk_inserted_bookings_for_user_one[0] booking.paid_at = timezone.now() booking.save() paystack_res_mock = { 'status': True, 'data': { 'authorization_url': 'http://payment-link.com', } } paystack_response = RequestsResponseMock(200, False, **paystack_res_mock) requests.post = Mock(return_value=paystack_response) client_callback = 'https://test.com' response = client.post( USER_BOOKING_URL.format(booking.id), content_type='application/json', HTTP_AUTHORIZATION='Bearer {}'.format(valid_user_one_token), data={ 'callbackURL': client_callback, }) response_body = response.data # Reqeust Assetions assert response.status_code == 400 assert response_body['status'] == 'error' assert response_body['message'] == serialization_errors[ 'booking_already_paid'] assert requests.post.called is False def test_user_pays_for_ticket_that_is_expired_fails( self, client, valid_user_one_token, expired_booking): paystack_res_mock = { 'status': True, 'data': { 'authorization_url': 'http://payment-link.com', } } paystack_response = RequestsResponseMock(200, False, **paystack_res_mock) requests.post = Mock(return_value=paystack_response) client_callback = 'https://test.com' response = client.post( USER_BOOKING_URL.format(expired_booking.id), content_type='application/json', HTTP_AUTHORIZATION='Bearer {}'.format(valid_user_one_token), data={ 'callbackURL': client_callback, }) response_body = response.data # Reqeust Assetions assert response.status_code == 400 assert response_body['status'] == 'error' assert response_body['message'] == serialization_errors[ 'booking_expired'] assert requests.post.called is False def test_make_payment_with_invalid_callback_url_fails( self, client, valid_user_one_token, saved_bulk_inserted_bookings_for_user_one): booking = saved_bulk_inserted_bookings_for_user_one[0] paystack_res_mock = { 'status': True, 'data': { 'authorization_url': 'http://payment-link.com', } } paystack_response = RequestsResponseMock(200, False, **paystack_res_mock) requests.post = Mock(return_value=paystack_response) client_callback = 'utc://test.com' response = client.post( USER_BOOKING_URL.format(booking.id), content_type='application/json', HTTP_AUTHORIZATION='Bearer {}'.format(valid_user_one_token), data={ 'callbackURL': client_callback, }) response_body = response.data # Reqeust Assetions assert response.status_code == 400 assert response_body['status'] == 'error' assert response_body['message'] == serialization_errors[ 'invalid_url'].format('callbackURL') assert requests.post.called is False def test_payment_fails_when_paystack_throws_an_error( self, client, valid_user_one_token, saved_bulk_inserted_bookings_for_user_one): booking = saved_bulk_inserted_bookings_for_user_one[0] requests.post = Mock(return_value=Exception()) client_callback = 'https://test.com' response = client.post( USER_BOOKING_URL.format(booking.id), content_type='application/json', HTTP_AUTHORIZATION='Bearer {}'.format(valid_user_one_token), data={ 'callbackURL': client_callback, }) response_body = response.data # Reqeust Assetions assert response.status_code == 400 assert response_body['status'] == 'error' assert response_body['message'] == serialization_errors[ 'payment_link_error'] assert requests.post.called def test_make_payment_without_invalid_token_fails( self, client, saved_bulk_inserted_bookings_for_user_one): booking = saved_bulk_inserted_bookings_for_user_one[0] response = client.post( USER_BOOKING_URL.format(booking.id), content_type='application/json', HTTP_AUTHORIZATION='{}'.format('invalid-token'), ) assert_invalid_token_format(response) def test_make_payment_with_missing_token_fails( self, client, saved_bulk_inserted_bookings_for_user_one): booking = saved_bulk_inserted_bookings_for_user_one[0] response = client.post( USER_BOOKING_URL.format(booking.id), content_type='application/json', ) assert_missing_header(response) @pytest.mark.django_db class TestPaymentRedirectRoute: def test_get_call_raises_an_exception( self, client, saved_bulk_inserted_bookings_for_user_one): booking = saved_bulk_inserted_bookings_for_user_one[0] requests.get = Mock(side_effect=Exception()) response = client.get( USER_BOOKING_URL.format(booking.id) + '?reference=blah') assert response.status_code == 400 assert response.data['status'] == 'error' assert response.data['message'] == \ serialization_errors['paystack_threw_error'] def test_paystack_returns_failure_when_verifying_user( self, client, saved_bulk_inserted_bookings_for_user_one): booking = saved_bulk_inserted_bookings_for_user_one[0] booking.paid_at = None booking.save() payment_time = datetime.now() client_callback = 'https://test.com' paystack_res_mock = { 'status': False, 'data': { 'status': 'failure', 'gateway_response': 'Payment was made to account', 'paid_at': payment_time, 'metadata': { 'callbackURL': client_callback, 'bookingId': str(booking.id) }, } } paystack_response = RequestsResponseMock(200, False, **paystack_res_mock) requests.get = Mock(return_value=paystack_response) reference_mock = 'reference-101' response = client.get( USER_BOOKING_URL.format(booking.id) + f'?reference={reference_mock}', ) query_params = { query.split('=')[0]: query.split('=')[1] for query in response.url.split('?')[1].split('&') } assert response.status_code == 303 assert response.url.startswith(client_callback) assert query_params['success'] == 'false' assert query_params['bookingId'] == str(booking.id) def test_paystack_returns_insufficient_funds_error( self, client, saved_bulk_inserted_bookings_for_user_one): booking = saved_bulk_inserted_bookings_for_user_one[0] booking.paid_at = None booking.save() payment_time = datetime.now() client_callback = 'https://test.com' paystack_res_mock = { 'status': True, 'data': { 'status': 'failure', 'gateway_response': 'Insufficient funds', 'paid_at': payment_time, 'metadata': { 'callbackURL': client_callback, 'bookingId': str(booking.id) }, } } paystack_response = RequestsResponseMock(200, False, **paystack_res_mock) requests.get = Mock(return_value=paystack_response) reference_mock = 'reference-101' response = client.get( USER_BOOKING_URL.format(booking.id) + f'?reference={reference_mock}', ) query_params = { query.split('=')[0]: query.split('=')[1] for query in response.url.split('?')[1].split('&') } assert response.status_code == 303 assert response.url.startswith(client_callback) assert query_params['success'] == 'false' assert query_params['bookingId'] == str(booking.id) def test_successful_payment_redirects_user_succeeds( self, client, valid_user_one_token, saved_valid_user_one, saved_bulk_inserted_bookings_for_user_one): booking = saved_bulk_inserted_bookings_for_user_one[0] booking.paid_at = None booking.save() payment_time = timezone.now() client_callback = 'https://test.com' paystack_res_mock = { 'status': True, 'data': { 'status': 'success', 'gateway_response': 'Payment was made to account', 'paid_at': payment_time, 'metadata': { 'callbackURL': client_callback, 'bookingId': str(booking.id) }, } } paystack_response = RequestsResponseMock(200, False, **paystack_res_mock) requests.get = Mock(return_value=paystack_response) reference_mock = 'sample-reference' response = client.get( USER_BOOKING_URL.format(booking.id) + f'?reference={reference_mock}', # content_type='application/json', ) query_params = { query.split('=')[0]: query.split('=')[1] for query in response.url.split('?')[1].split('&') } booking = Booking.objects.get(pk=booking.id) # Reqeust Assetions assert response.status_code == 303 assert query_params['success'] == 'true' assert query_params['bookingId'] == str(booking.id) assert response.url.startswith(client_callback) assert booking.paid_at == payment_time
37.281081
94
0.597941
1,388
13,794
5.600865
0.116715
0.031515
0.043736
0.064317
0.744019
0.720093
0.71379
0.681502
0.665423
0.659506
0
0.007737
0.306655
13,794
369
95
37.382114
0.805102
0.011672
0
0.654952
0
0
0.113403
0.008294
0
0
0
0
0.146965
1
0.041534
false
0
0.035144
0.003195
0.089457
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
1fd0fb4dd2db013d543f097f1f61e2ce2b6b1812
2,117
py
Python
simulation/models/diet_models.py
BenLatham/Agricultural-Simulation
23ae2c6ae2ae6d0b1ca318c6e4543ccbe40a0039
[ "MIT" ]
null
null
null
simulation/models/diet_models.py
BenLatham/Agricultural-Simulation
23ae2c6ae2ae6d0b1ca318c6e4543ccbe40a0039
[ "MIT" ]
5
2016-07-08T12:05:31.000Z
2016-07-08T17:31:23.000Z
simulation/models/diet_models.py
BenLatham/FLOSS-Agricultural-Simulation
23ae2c6ae2ae6d0b1ca318c6e4543ccbe40a0039
[ "MIT" ]
null
null
null
from django.db import models from .general_models import ScenarioSpecificBase from django.forms.models import model_to_dict class Feed(ScenarioSpecificBase): me = models.FloatField() fme = models.FloatField() erdp = models.FloatField() dup = models.FloatField() adf = models.FloatField() price = models.FloatField(null=True) avaliable = models.FloatField(null=True) feed_type = models.ForeignKey('FeedType') maxInclusion = models.FloatField(null=True) @property def qm_ruminant(self): """ Yan+agnew 2004 :return: ratio of metabolisable energy to gross energy """ return (-0.000796 * self.adf) + 0.827 aggregable = ["me", "fme", "erdp", "dup", "adf", "price", "qm_ruminant"] # fields which can be meaningfully aggregated def to_dict(self): dict = model_to_dict(self) dict["feed_type"]=self.feed_type.id dict["qm_ruminant"]=self.qm_ruminant() return dict class FeedType(ScenarioSpecificBase): minInclusion = models.FloatField(null=True) maxInclusion = models.FloatField(null=True) def to_dict(self): dict ={} dict["id"]=self.id dict["minInclusion"]=self.minInclusion dict["maxInclusion"]=self.maxInclusion return dict # class BreedDetails(models.Model): # breed = models.CharField(max_length=20, unique=True) # # # Fox et. al. 1998 # MM = models.FloatField(null=True) # maintenance multiplier non-lactating # MML = models.FloatField(null=True) # maintenance multiplier lactating # BW = models.FloatField(null=True) # Calf Birth Weight kg # peak_yield = models.FloatField(null=True) # Average peak milk yield kg # BW_adjustment_Q1 = models.FloatField(null=True) # Q1 Birth weight adjustment/age of dam yr # BW_adjustment_Q2 = models.FloatField(null=True) # Q2 Birth weight adjustment/age of dam yr # BW_adjustment_Q3 = models.FloatField(null=True) # Q3 Birth weight adjustment/age of dam yr # BW_adjustment_Q4 = models.FloatField(null=True) # Q4 Birth weight adjustment/age of dam yr
35.881356
123
0.684459
263
2,117
5.418251
0.330798
0.202105
0.182456
0.218947
0.249825
0.175439
0.112281
0.090526
0.090526
0
0
0.017272
0.206897
2,117
58
124
36.5
0.831447
0.422296
0
0.193548
0
0
0.07265
0
0
0
0
0
0
1
0.096774
false
0
0.096774
0
0.741935
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
1ff4ac19544bf86a8a7056f1e4301ad5c7792ec3
5,579
py
Python
src/utils/print_code.py
BeginnerCompilers/cool-compiler-2021
fe8c4dbbbaff9dc855322c2df2892dcfc5d449f9
[ "MIT" ]
null
null
null
src/utils/print_code.py
BeginnerCompilers/cool-compiler-2021
fe8c4dbbbaff9dc855322c2df2892dcfc5d449f9
[ "MIT" ]
null
null
null
src/utils/print_code.py
BeginnerCompilers/cool-compiler-2021
fe8c4dbbbaff9dc855322c2df2892dcfc5d449f9
[ "MIT" ]
null
null
null
import cmp.visitor as visitor from utils.ast.AST_Nodes import ast_nodes as nodes class PrintCode: @visitor.on('node') def visit(self, node, tabs = 0): pass @visitor.when(nodes.ProgramNode) def visit(self, node, tabs = 0): return '\n'.join(self.visit(dec, tabs) for dec in node.declarations) @visitor.when(nodes.ClassDeclarationNode) def visit(self, node, tabs = 0): parent = '' if node.parent is None else f'inherits {node.parent}' ans = '\t' * tabs + f'class {node.id} {parent}' features = '\n'.join(self.visit(feat, tabs + 1) for feat in node.features) return f'{ans} {{\n{features}\n}};' @visitor.when(nodes.AttrDeclarationNode) def visit(self, node, tabs = 0): expr = '' if node.expr is None else f'<- {self.visit(node.expr)}' return '\t' * tabs + f'{node.id}: {node.type} {expr};' @visitor.when(nodes.MethDeclarationNode) def visit(self, node, tabs = 0): params = ', '.join(': '.join(param) for param in node.params) ans = '\t' * tabs + f'{node.id} ({params}): {node.type}' body = self.visit(node.body, tabs + 1) return f'{ans} {{\n{body}\n' + '\t' * tabs + '};' @visitor.when(nodes.AssignNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'{node.id} <- {self.visit(node.expr)}' @visitor.when(nodes.CallNode) def visit(self, node, tabs = 0): obj = '' if node.obj is None else f'{self.visit(node.obj)}' if node.type else f'{self.visit(node.obj)}.' args = ', '.join(self.visit(arg) for arg in node.args) typex = '' if node.type is None else f'@ {node.type}' return '\t' * tabs + f'{obj}{typex}{node.id} ({args})' @visitor.when(nodes.IfThenElseNode) def visit(self, node, tabs = 0): ifx = self.visit(node.if_expr) then = self.visit(node.then_expr, tabs + 1) elsex = self.visit(node.else_expr, tabs + 1) return '\t' * tabs + f'if {ifx}\n' + '\t' * tabs + f'then\n{then}\n' + '\t' * tabs + f'else\n{elsex}\n' + '\t' * tabs + 'fi' @visitor.when(nodes.WhileNode) def visit(self, node, tabs = 0): ans = f'while {self.visit(node.conditional_expr)} loop' body = self.visit(node.loop_expr, tabs + 1) return '\t' * tabs + f'{ans}\n{body}\n' + '\t' * tabs + 'pool' @visitor.when(nodes.BlockNode) def visit(self, node, tabs = 0): expr_list = ';\n'.join(self.visit(expr, tabs + 1) for expr in node.expr_list) return '\t' * tabs + f'{{\n{expr_list};\n' + '\t' * tabs + '}' @visitor.when(nodes.LetNode) def visit(self, node, tabs = 0): identifiers = [] for idx, typex, id_expr in node.identifiers: if id_expr: identifiers.append(f'{idx}: {typex} <- {self.visit(id_expr)}') else: identifiers.append(f'{idx}: {typex}') identifiers = (',\n' + '\t' * (tabs + 1)).join(identifiers) return '\t' * tabs + f'let {identifiers} in\n{self.visit(node.in_expr, tabs + 1)}' @visitor.when(nodes.CaseNode) def visit(self, node, tabs = 0): predicate = self.visit(node.predicate) branches = '\n'.join(f'\t' * (tabs + 1) + f'{idx}: {typex} =>\n{self.visit(expr, tabs + 2)};' for idx, typex, expr in node.branches) return '\t' * tabs + f'case {predicate} of \n{branches}\n' + '\t' * tabs + 'esac' @visitor.when(nodes.NotNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'not {self.visit(node.expr)}' @visitor.when(nodes.ConstantNumNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'{node.lex}' @visitor.when(nodes.ConstantBoolNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'{node.lex}' @visitor.when(nodes.ConstantStringNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'{node.lex}' @visitor.when(nodes.VariableNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'{node.lex}' @visitor.when(nodes.InstantiateNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'(new {node.lex})' @visitor.when(nodes.IsVoidNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'isvoid {self.visit(node.lex)}' @visitor.when(nodes.ComplementNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'~ {self.visit(node.lex)}' @visitor.when(nodes.PlusNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'{self.visit(node.left)} + {self.visit(node.right)}' @visitor.when(nodes.MinusNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'{self.visit(node.left)} - {self.visit(node.right)}' @visitor.when(nodes.StarNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'{self.visit(node.left)} * {self.visit(node.right)}' @visitor.when(nodes.DivNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'{self.visit(node.left)} / {self.visit(node.right)}' @visitor.when(nodes.LessThanNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'{self.visit(node.left)} < {self.visit(node.right)}' @visitor.when(nodes.LessEqualNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'{self.visit(node.left)} <= {self.visit(node.right)}' @visitor.when(nodes.EqualNode) def visit(self, node, tabs = 0): return '\t' * tabs + f'{self.visit(node.left)} = {self.visit(node.right)}'
36.464052
140
0.572683
781
5,579
4.072983
0.126761
0.099025
0.118516
0.135806
0.511789
0.484439
0.405847
0.30745
0.30745
0.30745
0
0.008751
0.242158
5,579
153
141
36.464052
0.743614
0
0
0.284404
0
0
0.215591
0.110036
0
0
0
0
0
1
0.247706
false
0.009174
0.018349
0.155963
0.513761
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
953077357cfd8f9828b7b4e30f3f1ede02ab6ba1
19
py
Python
main/smpeg/update.py
RoastVeg/cports
803c7f07af341eb32f791b6ec1f237edb2764bd5
[ "BSD-2-Clause" ]
46
2021-06-10T02:27:32.000Z
2022-03-27T11:33:24.000Z
main/smpeg/update.py
RoastVeg/cports
803c7f07af341eb32f791b6ec1f237edb2764bd5
[ "BSD-2-Clause" ]
58
2021-07-03T13:58:20.000Z
2022-03-13T16:45:35.000Z
main/smpeg/update.py
RoastVeg/cports
803c7f07af341eb32f791b6ec1f237edb2764bd5
[ "BSD-2-Clause" ]
6
2021-07-04T10:46:40.000Z
2022-01-09T00:03:59.000Z
pkgname = "smpeg2"
9.5
18
0.684211
2
19
6.5
1
0
0
0
0
0
0
0
0
0
0
0.0625
0.157895
19
1
19
19
0.75
0
0
0
0
0
0.315789
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
1f6d175628efd29dfa07f8931ef6ef6857546d51
183
py
Python
core/fixtures/user.py
eduardomarco/django-auth-react-tutorial
47460a9575d4a82bcb07f7703d189620bddd19b0
[ "Apache-2.0" ]
8
2021-06-28T17:59:10.000Z
2022-03-12T12:24:17.000Z
core/fixtures/user.py
eduardomarco/django-auth-react-tutorial
47460a9575d4a82bcb07f7703d189620bddd19b0
[ "Apache-2.0" ]
1
2021-08-11T17:07:37.000Z
2021-08-13T08:40:35.000Z
core/fixtures/user.py
eduardomarco/django-auth-react-tutorial
47460a9575d4a82bcb07f7703d189620bddd19b0
[ "Apache-2.0" ]
5
2021-06-29T11:53:28.000Z
2022-02-07T20:39:45.000Z
from core.user.models import User data_user = { "email": "testuser@yopmail.com", "password": "12345678", "username": "testuser" } User.objects.create_user(**data_user)
16.636364
37
0.672131
22
183
5.454545
0.681818
0.133333
0.2
0
0
0
0
0
0
0
0
0.052288
0.163934
183
10
38
18.3
0.732026
0
0
0
0
0
0.311475
0
0
0
0
0
0
1
0
false
0.142857
0.142857
0
0.142857
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
1f6ee516584929e148b400ee4e2c8ac02f2cdb98
1,397
py
Python
lung_cancer_prediction_app/views.py
MohanaRuban619/LUNG-CANCER-PREDICTION
0f70770bf7819cda38de8cabfdc9561f3b703f27
[ "MIT" ]
null
null
null
lung_cancer_prediction_app/views.py
MohanaRuban619/LUNG-CANCER-PREDICTION
0f70770bf7819cda38de8cabfdc9561f3b703f27
[ "MIT" ]
null
null
null
lung_cancer_prediction_app/views.py
MohanaRuban619/LUNG-CANCER-PREDICTION
0f70770bf7819cda38de8cabfdc9561f3b703f27
[ "MIT" ]
null
null
null
from django.shortcuts import render import joblib def frontview(request): return render(request,"fors.html",{}) def backview(request): file = joblib.load(open("data.xlsx.svc","rb")) cls = file lis = [] lis.append(request.GET["Age"]) lis.append(request.GET["Gender"]) lis.append(request.GET["Air Pollution"]) lis.append(request.GET["Alcohol use"]) lis.append(request.GET["Dust Allergy"]) lis.append(request.GET["OccuPational Hazards"]) lis.append(request.GET["Genetic Risk"]) lis.append(request.GET["chronic Lung Disease"]) lis.append(request.GET["Balanced Diet"]) lis.append(request.GET["Obesity"]) lis.append(request.GET["Smoking"]) lis.append(request.GET["Passive Smoker"]) lis.append(request.GET["Chest Pain"]) lis.append(request.GET["Coughing of Blood"]) lis.append(request.GET["Fatigue"]) lis.append(request.GET["Weight Loss"]) lis.append(request.GET["Shortness of Breath"]) lis.append(request.GET["Wheezing"]) lis.append(request.GET["Swallowing Difficulty"]) lis.append(request.GET["Clubbing of Finger Nails"]) lis.append(request.GET["Frequent Cold"]) lis.append(request.GET["Dry Cough"]) lis.append(request.GET["Snoring"]) Result = cls.predict([lis]) return render(request,"backes.html",{"Result":Result}) # Create your views here.
37.756757
59
0.663565
180
1,397
5.15
0.405556
0.223301
0.39698
0.471413
0
0
0
0
0
0
0
0
0.166786
1,397
36
60
38.805556
0.796392
0.016464
0
0
0
0
0.243263
0
0
0
0
0
0
1
0.060606
false
0.030303
0.060606
0.030303
0.181818
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
1f73846a61ec527a885d52773ba7a90cefb7ecf1
249
py
Python
setup.py
cybojenix/django-hybrid-properties
ee52f3132a60b9443cd9c3d9145163c235afac80
[ "Apache-2.0" ]
9
2018-05-01T09:02:54.000Z
2020-07-15T08:58:04.000Z
setup.py
antdking/django-hybrid
ee52f3132a60b9443cd9c3d9145163c235afac80
[ "Apache-2.0" ]
1
2018-05-08T18:45:29.000Z
2018-05-08T18:45:29.000Z
setup.py
antdking/django-hybrid
ee52f3132a60b9443cd9c3d9145163c235afac80
[ "Apache-2.0" ]
1
2018-05-08T18:39:45.000Z
2018-05-08T18:39:45.000Z
from setuptools import setup tests_require = [ "pytest", "pytest-cov", "pytest-django", "pytest-factoryboy", "pytest-mock", ] setup( tests_require=tests_require, extras_require={ 'test': tests_require, } )
13.833333
32
0.614458
25
249
5.92
0.52
0.324324
0.22973
0
0
0
0
0
0
0
0
0
0.253012
249
17
33
14.647059
0.795699
0
0
0
0
0
0.24498
0
0
0
0
0
0
1
0
false
0
0.071429
0
0.071429
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
1f759bb47f98ee5e8df11eed02ac5e5f1840f167
226
py
Python
setup.py
bergundy/android-market-api-py
55a3b0e838a37e52928b4802bea355c47a44c326
[ "MIT" ]
1
2017-04-11T15:26:01.000Z
2017-04-11T15:26:01.000Z
setup.py
bergundy/android-market-api-py
55a3b0e838a37e52928b4802bea355c47a44c326
[ "MIT" ]
null
null
null
setup.py
bergundy/android-market-api-py
55a3b0e838a37e52928b4802bea355c47a44c326
[ "MIT" ]
null
null
null
try: from setuptools import setup except ImportError: from distutils.core import setup setup(name='googleplay-api', version='0.1', packages=['googleplay_api'], install_requires=['protobuf'], )
20.545455
36
0.663717
25
226
5.92
0.76
0.148649
0
0
0
0
0
0
0
0
0
0.011299
0.216814
226
10
37
22.6
0.824859
0
0
0
0
0
0.172566
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
1f75d21ad15e62a6417f815378fe0f2ebc26bfd8
319
py
Python
app/tests/test_main.py
arthur0410/fastapi-serverless
449cdd3e807745b38cc2272f78f4236f350c9425
[ "MIT" ]
null
null
null
app/tests/test_main.py
arthur0410/fastapi-serverless
449cdd3e807745b38cc2272f78f4236f350c9425
[ "MIT" ]
null
null
null
app/tests/test_main.py
arthur0410/fastapi-serverless
449cdd3e807745b38cc2272f78f4236f350c9425
[ "MIT" ]
null
null
null
from fastapi.testclient import TestClient from main import app client = TestClient(app) def test_main_resource(): response_auth = client.get(f"/") assert response_auth.status_code == 200 def test_child_resource(): response_auth = client.get(f"/api/v1/test") assert response_auth.status_code == 200
21.266667
47
0.742947
45
319
5.044444
0.466667
0.211454
0.176211
0.229075
0.537445
0.537445
0
0
0
0
0
0.026022
0.15674
319
14
48
22.785714
0.817844
0
0
0.222222
0
0
0.040752
0
0
0
0
0
0.222222
1
0.222222
false
0
0.222222
0
0.444444
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
2f0b8cbca57b85c135feb65d89ccb04de5a9ed1c
316
py
Python
utils/mixins/base.py
TCC-CIMATEC/TCC-BACK
103dcdc9dba995221d0606f1fdd162aa9c43aa86
[ "MIT" ]
null
null
null
utils/mixins/base.py
TCC-CIMATEC/TCC-BACK
103dcdc9dba995221d0606f1fdd162aa9c43aa86
[ "MIT" ]
null
null
null
utils/mixins/base.py
TCC-CIMATEC/TCC-BACK
103dcdc9dba995221d0606f1fdd162aa9c43aa86
[ "MIT" ]
null
null
null
from django.db import models class BaseMixin(models.Model): class Meta: abstract = True active = models.BooleanField(default=True) created_at = models.DateTimeField(auto_now_add=True, verbose_name='Data de criação') updated_at = models.DateTimeField(auto_now=True, verbose_name='Data de atualização')
35.111111
86
0.775316
43
316
5.534884
0.627907
0.067227
0.176471
0.210084
0.411765
0
0
0
0
0
0
0
0.129747
316
9
87
35.111111
0.865455
0
0
0
0
0
0.107256
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.857143
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
2f13ddd04e81af444a5d0064372f5ba82be74268
1,587
py
Python
Utils/Packetizer.py
alexweav/ADAF
e46e48d4183ae024dded48feb2fba6bef59cce9f
[ "MIT" ]
2
2017-10-09T21:13:56.000Z
2017-10-16T05:55:09.000Z
Utils/Packetizer.py
alexweav/ADAF
e46e48d4183ae024dded48feb2fba6bef59cce9f
[ "MIT" ]
16
2017-10-06T20:04:41.000Z
2017-11-29T22:34:02.000Z
Utils/Packetizer.py
alexweav/ADAF
e46e48d4183ae024dded48feb2fba6bef59cce9f
[ "MIT" ]
null
null
null
import math """ Helps convert bytes into network packets """ class Packetizer(object): def __init__(self, data, packet_size=1024): self.data = data self.packet_size = packet_size self.final_packet_size = len(self.data) % self.packet_size """ Size of each packet """ def PacketSize(self): return self.packet_size """ Number of packets remaining """ def PacketsRemaining(self): return math.ceil(len(self.data) / self.packet_size) """ Length of the final offset packet """ def FinalPacketSize(self): return self.final_packet_size """ Whether or not the packetizer is ready to send the final packet """ def OnFinalPacket(self): return len(self.data) != 0 and len(self.data) < self.packet_size """ Whether or not the packetizer is done with the data stream """ def Done(self): return self.data is None or len(self.data) == 0 """ Gets the next packet from the data """ def Next(self): if self.Done(): raise ValueError('Packets have all already been consumed') if self.OnFinalPacket(): packet = self.data self.data = b"" return self.ZeroPadData(packet, self.packet_size - len(packet)) packet = self.data[:self.packet_size] self.data = self.data[self.packet_size:] return packet """ Pads a piece of data with null bytes """ def ZeroPadData(self, data, pad_amount): return data + (b"\x00" * pad_amount)
25.596774
75
0.606175
202
1,587
4.663366
0.326733
0.118896
0.118896
0.11465
0.194268
0.147558
0.078556
0.078556
0
0
0
0.007111
0.291115
1,587
61
76
26.016393
0.830222
0
0
0
0
0
0.036301
0
0
0
0
0
0
1
0.285714
false
0
0.035714
0.214286
0.642857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
2f322cf0d196249e072966488a9b7d388b988ee7
142
py
Python
Python/2_math_header.py
RealFilllykung/Sample-Programming-Code-Collection
3a1f5085867b82adc4c1216054dbd2dbc7adfee1
[ "MIT" ]
null
null
null
Python/2_math_header.py
RealFilllykung/Sample-Programming-Code-Collection
3a1f5085867b82adc4c1216054dbd2dbc7adfee1
[ "MIT" ]
null
null
null
Python/2_math_header.py
RealFilllykung/Sample-Programming-Code-Collection
3a1f5085867b82adc4c1216054dbd2dbc7adfee1
[ "MIT" ]
null
null
null
import sys from math import * #อันนี้คือตัวอย่างเกี่ยวกับ module math ของ python x = 3.7 print(floor(x)) print(ceil(x)) print(int(sqrt(36)))
15.777778
50
0.683099
37
142
2.864865
0.783784
0.113208
0
0
0
0
0
0
0
0
0
0.032258
0.126761
142
9
51
15.777778
0.75
0.34507
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.5
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
1
0
3
2f5fb3538e3435d5ced84291ead369c7fbc818ed
1,767
py
Python
compass/tasks/tasks.py
huawei-cloud/compass
1f09b2d270917ffcc3829e533dd90c04ef664ff6
[ "Apache-2.0" ]
2
2015-12-07T06:47:16.000Z
2016-01-18T05:27:52.000Z
compass/tasks/tasks.py
SysCompass/compass-core
b04d02ae97d6fbeea9a6c42c6d368778c371ccde
[ "Apache-2.0" ]
1
2016-03-17T17:36:56.000Z
2016-03-17T17:36:56.000Z
compass/tasks/tasks.py
SysCompass/compass-core
b04d02ae97d6fbeea9a6c42c6d368778c371ccde
[ "Apache-2.0" ]
4
2016-06-13T17:19:14.000Z
2018-09-10T03:11:51.000Z
"""Module to define celery tasks. .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com> """ from celery.signals import setup_logging from compass.actions import poll_switch from compass.actions import trigger_install from compass.actions import progress_update from compass.db import database from compass.tasks.client import celery from compass.utils import flags from compass.utils import logsetting from compass.utils import setting_wrapper as setting def tasks_setup_logging(**_): """Setup logging options from compass setting.""" flags.init() flags.OPTIONS.logfile = setting.CELERY_LOGFILE logsetting.init() setup_logging.connect(tasks_setup_logging) @celery.task(name="compass.tasks.pollswitch") def pollswitch(ip_addr, req_obj='mac', oper="SCAN"): """Query switch and return expected result. :param ip_addr: switch ip address. :type ip_addr: str :param reqObj: the object requested to query from switch. :type reqObj: str :param oper: the operation to query the switch (SCAN, GET, SET). :type oper: str """ with database.session(): poll_switch.poll_switch(ip_addr, req_obj='mac', oper="SCAN") @celery.task(name="compass.tasks.trigger_install") def triggerinstall(clusterid): """Deploy the given cluster. :param clusterid: the id of the cluster to deploy. :type clusterid: int """ with database.session(): trigger_install.trigger_install(clusterid) @celery.task(name="compass.tasks.progress_update") def progressupdate(clusterid): """Calculate the installing progress of the given cluster. :param clusterid: the id of the cluster to get the intstalling progress. :type clusterid: int """ progress_update.update_progress(clusterid)
28.967213
76
0.739672
233
1,767
5.497854
0.334764
0.077283
0.042155
0.056206
0.171741
0.110851
0.110851
0.074941
0.074941
0.074941
0
0
0.164686
1,767
60
77
29.45
0.867886
0.359932
0
0.08
0
0
0.091778
0.078394
0
0
0
0
0
1
0.16
false
0.44
0.36
0
0.52
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
3
2f6948b0645143c3cf093b2750b66a6352a3945b
407
py
Python
tools/generate_method_table.py
meghanto/tiny_vm
da19e9f4bff3b48349bf13f8dc725d387d01e1f5
[ "CC-BY-3.0" ]
2
2022-01-12T03:16:07.000Z
2022-01-15T07:52:48.000Z
tools/generate_method_table.py
meghanto/tiny_vm
da19e9f4bff3b48349bf13f8dc725d387d01e1f5
[ "CC-BY-3.0" ]
2
2022-01-25T20:52:54.000Z
2022-01-31T02:46:45.000Z
tools/generate_method_table.py
meghanto/tiny_vm
da19e9f4bff3b48349bf13f8dc725d387d01e1f5
[ "CC-BY-3.0" ]
10
2022-01-04T04:36:07.000Z
2022-01-15T00:55:27.000Z
"""Generate a method table file, initializing with the built-in methods. The method table is used in class loading to load method addresses into vtables. It is not used during vm program execution. """ import datetime PROLOG = f"""/* GENERATED ON {datetime.datetime.now()} by {__name__} DO NOT EDIT BY HAND. REGENERATE INSTEAD. */ struct method_address_table_struct { char *method_name; } """
21.421053
68
0.734644
59
407
4.932203
0.728814
0.075601
0
0
0
0
0
0
0
0
0
0
0.176904
407
19
69
21.421053
0.868657
0
0
0
1
0
0.825243
0.252427
0
0
0
0
0
0
null
null
0
0.125
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
2f7c7940e8c42e31b4e593427ecb7a7095d301ef
266
py
Python
lf3py/wsgi/types.py
rog-works/lf3py
e89937f7aa133ed54d85764f06101ab9abf6b960
[ "CNRI-Python" ]
null
null
null
lf3py/wsgi/types.py
rog-works/lf3py
e89937f7aa133ed54d85764f06101ab9abf6b960
[ "CNRI-Python" ]
48
2020-12-19T13:47:26.000Z
2021-01-07T22:27:56.000Z
lf3py/wsgi/types.py
rog-works/lf3py
e89937f7aa133ed54d85764f06101ab9abf6b960
[ "CNRI-Python" ]
null
null
null
from typing_extensions import Protocol from typing import Any, Callable, List, Optional, Tuple class StartResponse(Protocol): def __call__(self, status: str, headers: List[Tuple[str, str]], exc_info: Optional[Any] = ...) -> Callable[[bytes], Any]: ...
33.25
125
0.699248
33
266
5.454545
0.636364
0.111111
0
0
0
0
0
0
0
0
0
0
0.161654
266
7
126
38
0.807175
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.8
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
2f807b03df0634b6e77718f34fe1faea99c1b6b7
2,358
py
Python
linalgebra/vector2.py
matthewflegg/LinearAlgebra
9de606f16d0730b8cbaa38b64fb9707c09bb1066
[ "MIT" ]
1
2022-01-03T22:36:01.000Z
2022-01-03T22:36:01.000Z
linalgebra/vector2.py
matthewflegg/linearalgebra
9de606f16d0730b8cbaa38b64fb9707c09bb1066
[ "MIT" ]
null
null
null
linalgebra/vector2.py
matthewflegg/linearalgebra
9de606f16d0730b8cbaa38b64fb9707c09bb1066
[ "MIT" ]
null
null
null
# Import math import math class Vector2D: """The Vector2D object we construct in the __init__ method has cartesian co-ordinates""" # Vector2D constructor def __init__(self, x, y): self.x, self.y = x, y def __str__(self): # Human readable string repr return '{:g}i + {:g}j'.format(self.x, self.y) def __repr__(self): # Unambiguous string repr return repr((self.x, self.y)) def dot(self, other): # The dot product of two vectors if not isinstance(other, Vector2D): # Raises error if not instance raise TypeError('Can only take dot product of two vectors') # Returns x and y vals multiplied respectively return self.x * other.x + self.y * other.y """Alias the __matmul__ method to dot so we can use a @ b as well as a.dot(b).""" __matmul__ = dot def __sub__(self, other): # Subtraction of two vectors return Vector2D(self.x - other.x, self.y - other.y) def __add__(self, other): # Addition of two vectors return Vector2D(self.x + other.x, self.y + other.y) def __mul__(self, scalar): # Multiplies vector by scalar # If 'scalar' passed in is a scalar calculate product if isinstance(scalar, int) or isinstance(scalar, float): # Returns both components multiplied by scalar return Vector2D(self.x * scalar, self.y * scalar) # If 'scalar' passed in isn't actually a scalar throw an error raise NotImplementedError('Can only multiply Vector2D by a scalar') # Reflected multiplication def __rmul__(self, scalar): # Returns scalar * vector return self.__mul__(scalar) def __neg__(self): # Invert through origin return Vector2D(-self.x, -self.y) def __truediv__(self, scalar): # True division return Vector2D(self.x / scalar, self.y / scalar) def __mod__(self, scalar): # Modulus by component return Vector2D(self.x % scalar, self.y % scalar) def __abs__(self): # Magnitude of vector return math.sqrt(self.x**2 + self.y**2) # Distance between vectors def distance_to(self, other): # Subtract magnitudes return abs(self - other) def to_polar(self): # Convert co-ordinates to polar return self.__abs__(), math.atan2(self.y, self.x)
31.026316
75
0.638253
324
2,358
4.441358
0.339506
0.04517
0.029187
0.079222
0.249479
0.165393
0.165393
0.165393
0.125087
0.070883
0
0.008088
0.265903
2,358
75
76
31.44
0.823224
0.301527
0
0
0
0
0.059711
0
0
0
0
0
0
1
0.4
false
0
0.028571
0.314286
0.857143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
2f9e315f82db404eb4417e941ea743f79f9d2022
216
py
Python
scripts/npc/roro_Check3.py
G00dBye/YYMS
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
[ "MIT" ]
54
2019-04-16T23:24:48.000Z
2021-12-18T11:41:50.000Z
scripts/npc/roro_Check3.py
G00dBye/YYMS
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
[ "MIT" ]
3
2019-05-19T15:19:41.000Z
2020-04-27T16:29:16.000Z
scripts/npc/roro_Check3.py
G00dBye/YYMS
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
[ "MIT" ]
49
2020-11-25T23:29:16.000Z
2022-03-26T16:20:24.000Z
# Created by MechAviv # [Magic Library Checker] | [1032220] # Ellinia : Magic Library if "1" not in sm.getQuestEx(25566, "c3"): sm.setQuestEx(25566, "c3", "1") sm.chatScript("You search the Magic Library.")
36
50
0.671296
30
216
4.833333
0.7
0.248276
0
0
0
0
0
0
0
0
0
0.117978
0.175926
216
6
50
36
0.696629
0.375
0
0
0
0
0.265152
0
0
0
0
0
0
1
0
true
0
0
0
0
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
85cf7c417e149027d7493e02a9c70323e5a207b6
572
py
Python
kiacopy/__init__.py
ganyariya/kiacopy
2379119a7360fec32211d31c12e37fd9daefd002
[ "MIT" ]
null
null
null
kiacopy/__init__.py
ganyariya/kiacopy
2379119a7360fec32211d31c12e37fd9daefd002
[ "MIT" ]
69
2021-06-23T05:15:49.000Z
2022-03-31T18:07:22.000Z
kiacopy/__init__.py
ganyariya/kiacopy
2379119a7360fec32211d31c12e37fd9daefd002
[ "MIT" ]
null
null
null
from logging import getLogger, NullHandler from kiacopy.colony import Colony from kiacopy.colony import Colony from kiacopy.solver import Solver from kiacopy.solverplugin import SolverPlugin from kiacopy.solution import Solution from kiacopy.ants.ant import Ant from kiacopy.ants.randomant import RandomAnt from kiacopy.ants.sensitiveant import SensitiveAnt from kiacopy import plugins from kiacopy import utils from kiacopy import ants __author__ = """ganariya""" __email__ = 'ganariya2525@gmail.com' __version__ = '0.1.0' getLogger(__name__).addHandler(NullHandler())
30.105263
50
0.83042
74
572
6.202703
0.364865
0.263617
0.098039
0.100218
0.150327
0.150327
0.150327
0
0
0
0
0.013699
0.106643
572
18
51
31.777778
0.88454
0
0
0.125
0
0
0.061189
0.038462
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
c823dbe85d23b5116847536714a915899c74862f
324
py
Python
Nivek maths/kanu/__init__.py
ThiccTT/nivek-maths
37f222b3ddc72455313eb619a79bc6045b89b506
[ "MIT" ]
6
2020-10-18T08:15:44.000Z
2022-01-19T16:14:40.000Z
Nivek maths/kanu/__init__.py
ThiccTT/nivek-maths
37f222b3ddc72455313eb619a79bc6045b89b506
[ "MIT" ]
null
null
null
Nivek maths/kanu/__init__.py
ThiccTT/nivek-maths
37f222b3ddc72455313eb619a79bc6045b89b506
[ "MIT" ]
3
2019-06-17T15:44:58.000Z
2021-08-04T01:37:25.000Z
from .element import get_matching_paren, InvalidVariableError, Element, Variable, InvalidElementError from .expression import OperatorList, format_parens, parse_expression, to_op_list, to_rpn, all_together_now from .equation import find_variables, solve_single_linear_equation, NonLinearEquationError __version__ = '0.1.3'
54
107
0.858025
40
324
6.525
0.8
0
0
0
0
0
0
0
0
0
0
0.010101
0.083333
324
5
108
64.8
0.868687
0
0
0
0
0
0.015432
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
c83157f749d7fe31683e613d0d7cb0bab865f887
247
py
Python
possum/exc.py
brysontyrrell/Possum
0ae7c90d7890f16d8405e346b0778bf878e29d5f
[ "MIT" ]
24
2018-03-14T15:25:52.000Z
2019-03-13T18:36:46.000Z
possum/exc.py
brysontyrrell/Possum
0ae7c90d7890f16d8405e346b0778bf878e29d5f
[ "MIT" ]
20
2018-04-11T04:36:40.000Z
2019-01-17T11:44:30.000Z
possum/exc.py
brysontyrrell/Possum
0ae7c90d7890f16d8405e346b0778bf878e29d5f
[ "MIT" ]
5
2018-01-04T21:10:15.000Z
2019-01-16T20:10:12.000Z
class PossumException(Exception): """Base Possum Exception""" class PipenvPathNotFound(PossumException): """Pipenv could not be located""" class SAMTemplateError(PossumException): """There was an error reading the template file"""
22.454545
54
0.736842
25
247
7.28
0.8
0
0
0
0
0
0
0
0
0
0
0
0.153846
247
10
55
24.7
0.870813
0.380567
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
3
c845b405dfb89bfc6afeaccf4fa73dab1dee293a
262
py
Python
scieio/disposition/views.py
arnelimperial/scieio
279a25766f20d074a3df824c0fbc8b2d8e35f272
[ "MIT" ]
null
null
null
scieio/disposition/views.py
arnelimperial/scieio
279a25766f20d074a3df824c0fbc8b2d8e35f272
[ "MIT" ]
8
2021-03-19T01:56:44.000Z
2022-03-12T00:24:21.000Z
scieio/disposition/views.py
arnelimperial/scieio
279a25766f20d074a3df824c0fbc8b2d8e35f272
[ "MIT" ]
null
null
null
from django.shortcuts import render from rest_framework import viewsets from . import models, serializers class DispositionViewSet(viewsets.ModelViewSet): queryset = models.Disposition.objects.all() serializer_class = serializers.DispositionSerializer
29.111111
56
0.824427
27
262
7.925926
0.703704
0
0
0
0
0
0
0
0
0
0
0
0.118321
262
8
57
32.75
0.926407
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
c847ba632fcc836780c365bf6e3ce89816b9948d
262
py
Python
Labs/Lab04/QuestionEuler01Solution.py
tariqueameer7/course-python
f1252e82471f7e34d66beb30d9236850df1bd8d4
[ "Apache-2.0" ]
1
2021-02-04T16:59:11.000Z
2021-02-04T16:59:11.000Z
Labs/Lab04/QuestionEuler01Solution.py
tariqueameer7/course-python
f1252e82471f7e34d66beb30d9236850df1bd8d4
[ "Apache-2.0" ]
null
null
null
Labs/Lab04/QuestionEuler01Solution.py
tariqueameer7/course-python
f1252e82471f7e34d66beb30d9236850df1bd8d4
[ "Apache-2.0" ]
1
2019-10-30T14:37:48.000Z
2019-10-30T14:37:48.000Z
end = 1000 total = 0 for x in range(1,end): if x % 15 == 0: total = total + x print(x) elif x % 5 == 0: total = total + x print(x) elif x % 3 == 0: total = total + x print(x) print(f"total = {total}")
17.466667
25
0.438931
41
262
2.804878
0.390244
0.347826
0.286957
0.313043
0.556522
0.556522
0.4
0.4
0
0
0
0.085526
0.419847
262
15
26
17.466667
0.671053
0
0
0.461538
0
0
0.057252
0
0
0
0
0
0
1
0
false
0
0
0
0
0.307692
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c85497d6792fe4c6a6dc394dfbf2b80f0877b324
234
py
Python
output/models/nist_data/atomic/long/schema_instance/nistschema_sv_iv_atomic_long_pattern_4_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
1
2021-08-14T17:59:21.000Z
2021-08-14T17:59:21.000Z
output/models/nist_data/atomic/long/schema_instance/nistschema_sv_iv_atomic_long_pattern_4_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
4
2020-02-12T21:30:44.000Z
2020-04-15T20:06:46.000Z
output/models/nist_data/atomic/long/schema_instance/nistschema_sv_iv_atomic_long_pattern_4_xsd/__init__.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
null
null
null
from output.models.nist_data.atomic.long.schema_instance.nistschema_sv_iv_atomic_long_pattern_4_xsd.nistschema_sv_iv_atomic_long_pattern_4 import NistschemaSvIvAtomicLongPattern4 __all__ = [ "NistschemaSvIvAtomicLongPattern4", ]
39
178
0.884615
28
234
6.714286
0.642857
0.159574
0.148936
0.212766
0.340426
0.340426
0.340426
0
0
0
0
0.018182
0.059829
234
5
179
46.8
0.836364
0
0
0
0
0
0.136752
0.136752
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c07b9919a990aa591ba51626c9f048e22c056d38
550
py
Python
tests/test_routes.py
jonaskaz/Noodle-API
bcfb20c58a92ddea1aa870b6f717cbf4b9aa2488
[ "MIT" ]
1
2021-11-04T16:33:34.000Z
2021-11-04T16:33:34.000Z
tests/test_routes.py
jonaskaz/Noodle-API
bcfb20c58a92ddea1aa870b6f717cbf4b9aa2488
[ "MIT" ]
null
null
null
tests/test_routes.py
jonaskaz/Noodle-API
bcfb20c58a92ddea1aa870b6f717cbf4b9aa2488
[ "MIT" ]
null
null
null
def test_post_order(client, order_payload): res = client.post("/order", json = order_payload) assert res.status_code == 200 def test_post_game(client, game_payload): res = client.post("/order", json = game_payload) assert res.status_code == 200 def test_get_order(client): res1 = client.get("/order") assert res1.json()["mode"] == 0 assert res1.json()["toppings"] == ["onions", "spice"] res2 = client.get("/order") assert res2.json()["mode"] == 1 res3 = client.get("/order") assert res3.json() == {}
26.190476
57
0.634545
74
550
4.554054
0.310811
0.094955
0.124629
0.178042
0.385757
0.385757
0.21365
0.21365
0
0
0
0.033632
0.189091
550
20
58
27.5
0.721973
0
0
0.142857
0
0
0.104015
0
0
0
0
0
0.428571
1
0.214286
false
0
0
0
0.214286
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
0
0
0
0
0
0
0
3
c0962c0011012f40b124a247ae3c0cc696c96587
477
py
Python
census_similarity/metrics.py
18F/census-similarity
1af03a7fc9fc865b037abcb460386fb9b73bc103
[ "CC0-1.0" ]
1
2016-09-28T16:02:58.000Z
2016-09-28T16:02:58.000Z
census_similarity/metrics.py
18F/census-similarity
1af03a7fc9fc865b037abcb460386fb9b73bc103
[ "CC0-1.0" ]
11
2016-09-26T18:44:18.000Z
2016-10-15T23:30:41.000Z
census_similarity/metrics.py
18F/census-similarity
1af03a7fc9fc865b037abcb460386fb9b73bc103
[ "CC0-1.0" ]
3
2016-09-28T10:02:17.000Z
2021-02-15T09:58:33.000Z
"""Distance metrics""" import distance from scipy import spatial def cosine(left, right): elements = set(left) | set(right) elements = list(sorted(elements)) left = [int(el in left) for el in elements] right = [int(el in right) for el in elements] return spatial.distance.cosine(left, right) def levenshtein(left, right): return distance.levenshtein(left, right, normalized=True) def jaccard(left, right): return distance.jaccard(left, right)
23.85
61
0.704403
65
477
5.169231
0.353846
0.160714
0.089286
0.089286
0
0
0
0
0
0
0
0
0.18239
477
19
62
25.105263
0.861538
0.033543
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.166667
0.166667
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
c09718359923b744f7ba947170a3adc6e69f2cb7
162
py
Python
lib/pylsdj/bread/__init__.py
iLambda/lsdj-wave-cruncher
b49f2f3943d396ee01263f25c8d7a38a65f63ff4
[ "MIT" ]
15
2016-10-11T16:48:58.000Z
2022-02-19T01:39:00.000Z
lib/pylsdj/bread/__init__.py
urbster1/lsdj-wave-cruncher
8da1f384f83c79a87cbd311c5dbbf3fe93fb5054
[ "MIT" ]
2
2016-10-11T11:55:30.000Z
2021-01-31T12:11:16.000Z
lib/pylsdj/bread/__init__.py
iLambda/lsdj-wave-cruncher
b49f2f3943d396ee01263f25c8d7a38a65f63ff4
[ "MIT" ]
5
2016-10-11T11:42:47.000Z
2021-07-10T12:23:25.000Z
__title__ = 'bread' __version__ = '2.2.0' __author__ = 'Alex Rasmussen' __license__ = 'MIT' __copyright__ = 'Copyright 2015 Alex Rasmussen' from .bread import *
20.25
47
0.734568
19
162
5.210526
0.736842
0.262626
0
0
0
0
0
0
0
0
0
0.050725
0.148148
162
7
48
23.142857
0.666667
0
0
0
0
0
0.345679
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c0be744cb13aff37e64849df94dc5c6c0e8f9cfc
89
py
Python
okex/v5/cttype.py
quantmew/okex-py
3e96413cd4e6dd5779ff2c47b8c76be53448783d
[ "MIT" ]
32
2021-05-06T01:46:17.000Z
2022-03-09T03:34:24.000Z
okex/v5/cttype.py
PattonChen/okex-py
cced7f1419da0940a2200ce66d62b4f9539949f2
[ "MIT" ]
3
2021-05-09T14:40:54.000Z
2021-11-24T13:29:30.000Z
okex/v5/cttype.py
PattonChen/okex-py
cced7f1419da0940a2200ce66d62b4f9539949f2
[ "MIT" ]
14
2021-05-11T23:36:18.000Z
2021-12-28T09:47:35.000Z
from enum import Enum class CtType(Enum): LINEAR = "linear" INVERSE = "inverse"
14.833333
23
0.662921
11
89
5.363636
0.636364
0
0
0
0
0
0
0
0
0
0
0
0.235955
89
6
23
14.833333
0.867647
0
0
0
0
0
0.146067
0
0
0
0
0
0
1
0
false
0
0.25
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
c0c5910e021a0d83ff30289ae62e1159af2f9c6d
7,315
py
Python
backend/externalise/tests.py
StichtingIAPC/swipe
d1ea35a40813d2d5e9cf9edde33148c0a825efc4
[ "BSD-3-Clause-Clear" ]
null
null
null
backend/externalise/tests.py
StichtingIAPC/swipe
d1ea35a40813d2d5e9cf9edde33148c0a825efc4
[ "BSD-3-Clause-Clear" ]
null
null
null
backend/externalise/tests.py
StichtingIAPC/swipe
d1ea35a40813d2d5e9cf9edde33148c0a825efc4
[ "BSD-3-Clause-Clear" ]
null
null
null
from django.test import TestCase from tools.testing import TestData from decimal import Decimal from money.models import Cost from externalise.models import ExternaliseDocument, IncorrectPriceError, IncorrectCountError, ExternaliseLine from stock.models import Stock class ExternaliseTests(TestCase, TestData): def setUp(self): self.setup_base_data() def test_illegal_cost(self): local_cost = Cost(amount=Decimal(-2), currency=self.currency_eur) count = 2 with self.assertRaises(IncorrectPriceError): ExternaliseDocument.create_external_products_document(user=self.user_1, article_information_list=[ (self.articletype_1, count, local_cost)], memo="Foo") def test_zero_count(self): local_cost = Cost(amount=Decimal(2), currency=self.currency_eur) count = 0 with self.assertRaises(IncorrectCountError): ExternaliseDocument.create_external_products_document(user=self.user_1, article_information_list=[ (self.articletype_1, count, local_cost)], memo="Foo") def test_single_article(self): local_cost = Cost(amount=Decimal(2), currency=self.currency_eur) count = 2 ExternaliseDocument.create_external_products_document(user=self.user_1, article_information_list=[ [self.articletype_1, count, local_cost]], memo="Foo") st = Stock.objects.get() self.assertEqual(st.book_value, local_cost) self.assertEqual(st.count, count) self.assertEqual(st.article, self.articletype_1) self.assertEqual(st.labeltype, None) els = ExternaliseLine.objects.all() self.assertEqual(len(els), 1) el = els[0] self.assertEqual(el.count, count) self.assertEqual(el.article_type, self.articletype_1) self.assertEqual(el.cost, local_cost) def test_article_combination_merging(self): local_cost = Cost(amount=Decimal(2), currency=self.currency_eur) count = 2 ExternaliseDocument.create_external_products_document(user=self.user_1, article_information_list=[ [self.articletype_1, count, local_cost], [self.articletype_1, count, local_cost]], memo="Foo") st = Stock.objects.get() self.assertEqual(st.book_value, local_cost) self.assertEqual(st.count, 2*count) self.assertEqual(st.article, self.articletype_1) self.assertEqual(st.labeltype, None) els = ExternaliseLine.objects.all() self.assertEqual(len(els), 1) def test_separate_addition_of_article(self): local_cost = Cost(amount=Decimal(2), currency=self.currency_eur) count = 2 ExternaliseDocument.create_external_products_document(user=self.user_1, article_information_list=[ [self.articletype_1, count, local_cost], ], memo="Foo") ExternaliseDocument.create_external_products_document(user=self.user_1, article_information_list=[ [self.articletype_1, count, local_cost], ], memo="Foo") st = Stock.objects.get() self.assertEqual(st.book_value, local_cost) self.assertEqual(st.count, 2 * count) self.assertEqual(st.article, self.articletype_1) self.assertEqual(st.labeltype, None) els = ExternaliseLine.objects.all() self.assertEqual(len(els), 2) def test_two_articles(self): local_cost_1 = Cost(amount=Decimal(4), currency=self.currency_eur) local_cost_2 = Cost(amount=Decimal(5), currency=self.currency_eur) count_1 = 2 count_2 = 3 ExternaliseDocument.create_external_products_document(user=self.user_1, article_information_list=[ [self.articletype_1, count_1, local_cost_1], [self.articletype_2, count_2, local_cost_2]], memo="Foo") self.assertEqual(len(Stock.objects.all()), 2) st_1 = Stock.objects.get(article=self.articletype_1) st_2 = Stock.objects.get(article=self.articletype_2) self.assertEqual(st_1.book_value, local_cost_1) self.assertEqual(st_1.count, count_1) self.assertEqual(st_1.article, self.articletype_1) self.assertEqual(st_1.labeltype, None) self.assertEqual(st_2.book_value, local_cost_2) self.assertEqual(st_2.count, count_2) self.assertEqual(st_2.article, self.articletype_2) self.assertEqual(st_2.labeltype, None) els = ExternaliseLine.objects.all() self.assertEqual(len(els), 2) def test_price_merging(self): local_cost_1 = Cost(amount=Decimal(2), currency=self.currency_eur) local_cost_2 = Cost(amount=Decimal(5), currency=self.currency_eur) count_1 = 2 count_2 = 1 ExternaliseDocument.create_external_products_document(user=self.user_1, article_information_list=[ [self.articletype_1, count_1, local_cost_1]], memo="Foo") ExternaliseDocument.create_external_products_document(user=self.user_1, article_information_list=[ [self.articletype_1, count_2, local_cost_2]], memo="Foo2") desired_cost = Cost(amount=Decimal(3), currency=self.currency_eur) st = Stock.objects.get() self.assertEqual(st.book_value, desired_cost) self.assertEqual(st.count, count_1+count_2) self.assertEqual(st.article, self.articletype_1) self.assertEqual(st.labeltype, None) els = ExternaliseLine.objects.all() self.assertEqual(len(els), 2)
54.185185
111
0.520437
684
7,315
5.317251
0.109649
0.136101
0.11218
0.063239
0.786637
0.753643
0.721199
0.675832
0.670883
0.65906
0
0.020562
0.40164
7,315
134
112
54.589552
0.810372
0
0
0.606557
0
0
0.003828
0
0
0
0
0
0.286885
1
0.065574
false
0
0.04918
0
0.122951
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c0c82419820fc2654d3313e9ba40f79e72c45b38
403
py
Python
wcraas_discovery/cli.py
panagiks/wcraas_discovery
77f9a9ec8a2b00555e8176fc84294da953b3b1fa
[ "MIT" ]
null
null
null
wcraas_discovery/cli.py
panagiks/wcraas_discovery
77f9a9ec8a2b00555e8176fc84294da953b3b1fa
[ "MIT" ]
1
2021-11-15T17:49:49.000Z
2021-11-15T17:49:49.000Z
wcraas_discovery/cli.py
panagiks/wcraas_discovery
77f9a9ec8a2b00555e8176fc84294da953b3b1fa
[ "MIT" ]
1
2019-10-04T05:07:28.000Z
2019-10-04T05:07:28.000Z
# -*- coding: utf-8 -*- """Console script for wcraas_discovery.""" import sys import click from wcraas_discovery import DiscoveryWorker from wcraas_discovery.config import Config @click.command() def main(args=None): """Console script for wcraas_discovery.""" worker = DiscoveryWorker(*Config.fromenv()) worker.run() if __name__ == "__main__": sys.exit(main()) # pragma: no cover
20.15
47
0.707196
49
403
5.571429
0.571429
0.21978
0.117216
0.161172
0.227106
0
0
0
0
0
0
0.00295
0.158809
403
19
48
21.210526
0.80236
0.280397
0
0
0
0
0.028777
0
0
0
0
0
0
1
0.1
false
0
0.4
0
0.5
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
c0e8f92a75016fbbf76bf2a6fb6b6f279be5b5e1
1,092
py
Python
source/detection/OCRDetector.py
FrancescoSaverioZuppichini/Smash-Bros-Melee
4ca6f67240bfe60e86a82409c4056aaa25a5a860
[ "MIT" ]
null
null
null
source/detection/OCRDetector.py
FrancescoSaverioZuppichini/Smash-Bros-Melee
4ca6f67240bfe60e86a82409c4056aaa25a5a860
[ "MIT" ]
null
null
null
source/detection/OCRDetector.py
FrancescoSaverioZuppichini/Smash-Bros-Melee
4ca6f67240bfe60e86a82409c4056aaa25a5a860
[ "MIT" ]
null
null
null
import pytesseract import cv2 from detection import Detector from PIL import Image from dataclasses import dataclass from utils import imshow import numpy as np from detection import Detector @dataclass class OCRDetector(Detector): """ Easy peasy ocr with tesseract, to improve the prediction you can pass `text_color` to cutoff all values that are not text. TODO: probably there is a cv2 function in which we can define a color and a range. """ text_color: int = 165 smooth: bool = True # if True apply Gaussian Blur to remove noise show_img: bool = False transform: callable = None config: str = '' def detect(self, img): if self.smooth: img = cv2.GaussianBlur(img, (3, 3), 0) if self.text_color: img = img < self.text_color if self.transform is not None: img = self.transform(img) img = img.astype('uint8') if self.show_img: imshow(img) x = Image.fromarray(img * 255) text = pytesseract.image_to_string(x, lang='eng', config=self.config) return text
30.333333
86
0.666667
156
1,092
4.615385
0.532051
0.05
0.052778
0.075
0
0
0
0
0
0
0
0.016069
0.259158
1,092
36
87
30.333333
0.873918
0.231685
0
0.076923
0
0
0.009804
0
0
0
0
0.027778
0
1
0.038462
false
0
0.307692
0
0.615385
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
1
0
1
0
0
3
23a0a03a2bb614c031ced5ebee63ba58401296d4
268
py
Python
Application.py
l0athe/lecture2
8505f1dc6edb9e04aeb4f6f6f9dc5a7e64b87b69
[ "MIT" ]
1
2018-07-29T18:40:47.000Z
2018-07-29T18:40:47.000Z
Application.py
l0athe/lecture2
8505f1dc6edb9e04aeb4f6f6f9dc5a7e64b87b69
[ "MIT" ]
null
null
null
Application.py
l0athe/lecture2
8505f1dc6edb9e04aeb4f6f6f9dc5a7e64b87b69
[ "MIT" ]
null
null
null
from flask import Flask app = Flask(__name__) @app.route("/") def index(): return "MAKE IT RAINGURL, MAKE IT RAIN!" @app.route("/<string:name>") def hello(name): name = name.capitalize() return f"<h1>Oh yeah hey {name} hey wassup?</h1>"
17.866667
54
0.608209
38
268
4.184211
0.578947
0.100629
0
0
0
0
0
0
0
0
0
0.009662
0.227612
268
14
55
19.142857
0.758454
0
0
0
0
0
0.334646
0
0
0
0
0
0
1
0.222222
false
0
0.111111
0.111111
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
23a0eb8783bdb2147bd53bedb25189d718eab953
835
py
Python
mnelab/dialogs/__init__.py
dmedine/mnelab
d1942da5fd274de6aa97fd9ba050a9c12a8da058
[ "BSD-3-Clause" ]
null
null
null
mnelab/dialogs/__init__.py
dmedine/mnelab
d1942da5fd274de6aa97fd9ba050a9c12a8da058
[ "BSD-3-Clause" ]
null
null
null
mnelab/dialogs/__init__.py
dmedine/mnelab
d1942da5fd274de6aa97fd9ba050a9c12a8da058
[ "BSD-3-Clause" ]
null
null
null
# Authors: Clemens Brunner <clemens.brunner@gmail.com> # # License: BSD (3-clause) from .annotationsdialog import AnnotationsDialog from .calcdialog import CalcDialog from .channelpropertiesdialog import ChannelPropertiesDialog from .cropdialog import CropDialog from .epochdialog import EpochDialog from .errormessagebox import ErrorMessageBox from .eventsdialog import EventsDialog from .filterdialog import FilterDialog from .findeventsdialog import FindEventsDialog from .historydialog import HistoryDialog from .interpolatebadsdialog import InterpolateBadsDialog from .metainfodialog import MetaInfoDialog from .montagedialog import MontageDialog from .pickchannelsdialog import PickChannelsDialog from .referencedialog import ReferenceDialog from .runicadialog import RunICADialog from .xdfstreamsdialog import XDFStreamsDialog
37.954545
60
0.869461
79
835
9.189873
0.35443
0.038567
0
0
0
0
0
0
0
0
0
0.001323
0.094611
835
21
61
39.761905
0.958995
0.091018
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
23c42b3616a337da92d375cf92a907b1db17fc19
123
py
Python
data/studio21_generated/interview/1954/starter_code.py
vijaykumawat256/Prompt-Summarization
614f5911e2acd2933440d909de2b4f86653dc214
[ "Apache-2.0" ]
null
null
null
data/studio21_generated/interview/1954/starter_code.py
vijaykumawat256/Prompt-Summarization
614f5911e2acd2933440d909de2b4f86653dc214
[ "Apache-2.0" ]
null
null
null
data/studio21_generated/interview/1954/starter_code.py
vijaykumawat256/Prompt-Summarization
614f5911e2acd2933440d909de2b4f86653dc214
[ "Apache-2.0" ]
null
null
null
class Solution: def smallestSufficientTeam(self, req_skills: List[str], people: List[List[str]]) -> List[int]:
41
98
0.674797
15
123
5.466667
0.733333
0.170732
0
0
0
0
0
0
0
0
0
0
0.178862
123
3
99
41
0.811881
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3