hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9126af8fc4854391b11011eb26d9bf08576a47ea
| 100
|
py
|
Python
|
hooks/hook-palisades.py
|
natcap/opal
|
7b960d51344483bae30d14ccfa6004bd550f3737
|
[
"BSD-3-Clause"
] | 1
|
2020-04-15T23:23:27.000Z
|
2020-04-15T23:23:27.000Z
|
hooks/hook-palisades.py
|
natcap/opal
|
7b960d51344483bae30d14ccfa6004bd550f3737
|
[
"BSD-3-Clause"
] | null | null | null |
hooks/hook-palisades.py
|
natcap/opal
|
7b960d51344483bae30d14ccfa6004bd550f3737
|
[
"BSD-3-Clause"
] | null | null | null |
from PyInstaller.hooks.hookutils import collect_data_files
datas = collect_data_files('palisades')
| 25
| 58
| 0.85
| 13
| 100
| 6.230769
| 0.769231
| 0.271605
| 0.395062
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 100
| 3
| 59
| 33.333333
| 0.880435
| 0
| 0
| 0
| 0
| 0
| 0.09
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
913f12e6428573e4e895df5ad73f9791d08397ee
| 84
|
py
|
Python
|
core/storage/__init__.py
|
lehduong/Policy-gradient-credit-assignment
|
1d4c102964b985212874c1fe8710a8aa6ff9f328
|
[
"MIT"
] | 8
|
2020-06-29T03:45:14.000Z
|
2022-02-21T03:41:34.000Z
|
core/storage/__init__.py
|
lehduong/Policy-gradient-credit-assignment
|
1d4c102964b985212874c1fe8710a8aa6ff9f328
|
[
"MIT"
] | null | null | null |
core/storage/__init__.py
|
lehduong/Policy-gradient-credit-assignment
|
1d4c102964b985212874c1fe8710a8aa6ff9f328
|
[
"MIT"
] | null | null | null |
from .base_storage import RolloutStorage
from .cpc_storage import CPCRolloutStorage
| 28
| 42
| 0.880952
| 10
| 84
| 7.2
| 0.7
| 0.361111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 84
| 2
| 43
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9140a4a86d1bead147dba92b72dcc1df0c9d09f0
| 143
|
py
|
Python
|
teacher/admin.py
|
aashutoshrathi/Student-Lifecycle-Management
|
c8ad67db3bde989de6d612454ace57084da272c0
|
[
"MIT"
] | 9
|
2018-03-01T16:01:08.000Z
|
2021-02-21T16:01:29.000Z
|
teacher/admin.py
|
aashutoshrathi/Student-Lifecycle-Management
|
c8ad67db3bde989de6d612454ace57084da272c0
|
[
"MIT"
] | 2
|
2018-03-01T18:16:11.000Z
|
2018-03-03T16:12:24.000Z
|
teacher/admin.py
|
aashutoshrathi/Student-Lifecycle-Management
|
c8ad67db3bde989de6d612454ace57084da272c0
|
[
"MIT"
] | 1
|
2019-12-04T18:20:48.000Z
|
2019-12-04T18:20:48.000Z
|
from django.contrib import admin
from .models import Teacher, AssignedCourse
admin.site.register(Teacher)
admin.site.register(AssignedCourse)
| 23.833333
| 43
| 0.839161
| 18
| 143
| 6.666667
| 0.555556
| 0.15
| 0.283333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083916
| 143
| 6
| 44
| 23.833333
| 0.916031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
914612665bb7def562e2189f2bf65bc7489b8862
| 105
|
py
|
Python
|
pydistort/__init__.py
|
barsikus007/pydistort
|
408c28f01bc5f9240a80df6ebbece1dd60cdb087
|
[
"MIT"
] | null | null | null |
pydistort/__init__.py
|
barsikus007/pydistort
|
408c28f01bc5f9240a80df6ebbece1dd60cdb087
|
[
"MIT"
] | null | null | null |
pydistort/__init__.py
|
barsikus007/pydistort
|
408c28f01bc5f9240a80df6ebbece1dd60cdb087
|
[
"MIT"
] | null | null | null |
__version__ = '0.0.1'
from .image import *
from .video import *
from pydistort.utils.queue import Queue
| 17.5
| 39
| 0.742857
| 16
| 105
| 4.625
| 0.625
| 0.27027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033708
| 0.152381
| 105
| 5
| 40
| 21
| 0.797753
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e6f3427c6126577da5bee550ee7622939ac20fc5
| 168
|
py
|
Python
|
chemml/wrapper/notebook/__init__.py
|
iamchetry/DataChallenge-Fall2021
|
fa7748c9ea2f3c0f6bde8d0b094fc75463e28f33
|
[
"BSD-3-Clause"
] | 108
|
2018-03-23T20:06:03.000Z
|
2022-01-06T19:32:46.000Z
|
chemml/wrapper/notebook/__init__.py
|
hachmannlab/ChemML
|
42b152579872a57c834884596f700c76b9320280
|
[
"BSD-3-Clause"
] | 18
|
2019-08-09T21:16:14.000Z
|
2022-02-14T21:52:06.000Z
|
chemml/wrapper/notebook/__init__.py
|
hachmannlab/ChemML
|
42b152579872a57c834884596f700c76b9320280
|
[
"BSD-3-Clause"
] | 28
|
2018-04-28T17:07:33.000Z
|
2022-02-28T07:22:56.000Z
|
"""
The 'chemml.wrapper.notebook' module contains the ipywidgets implementatoin of the GUI.
"""
from chemml.wrapper.notebook.main import ChemMLNotebook
__all__ = []
| 18.666667
| 87
| 0.767857
| 20
| 168
| 6.25
| 0.75
| 0.208
| 0.336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130952
| 168
| 8
| 88
| 21
| 0.856164
| 0.517857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fc14970e6081bec3f2763dd0f85da75dfb047701
| 37
|
py
|
Python
|
swig/main.py
|
gmatesunny/watchcat
|
765ad6a22bf9c0364b8efebafd2751b91ae6f96d
|
[
"MIT"
] | 7
|
2021-08-31T13:31:47.000Z
|
2022-03-11T21:40:27.000Z
|
swig/main.py
|
gmatesunny/watchcat
|
765ad6a22bf9c0364b8efebafd2751b91ae6f96d
|
[
"MIT"
] | null | null | null |
swig/main.py
|
gmatesunny/watchcat
|
765ad6a22bf9c0364b8efebafd2751b91ae6f96d
|
[
"MIT"
] | null | null | null |
import watchcat
watchcat.TimeEvent()
| 12.333333
| 20
| 0.837838
| 4
| 37
| 7.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 37
| 3
| 20
| 12.333333
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fc207ee91d8e49d0c5987dcd17e9eb47729d5e63
| 815
|
py
|
Python
|
cifar10-classification/utils/make_20cls_label.py
|
elgong/semi-supervised-DL-using-pseduo-label
|
2259c6ef93939f247f71a9d7f6766224120f2548
|
[
"MIT"
] | 1
|
2019-11-26T12:40:05.000Z
|
2019-11-26T12:40:05.000Z
|
cifar10-classification/utils/make_20cls_label.py
|
elgong/semi-supervised-DL-using-pseduo-label
|
2259c6ef93939f247f71a9d7f6766224120f2548
|
[
"MIT"
] | null | null | null |
cifar10-classification/utils/make_20cls_label.py
|
elgong/semi-supervised-DL-using-pseduo-label
|
2259c6ef93939f247f71a9d7f6766224120f2548
|
[
"MIT"
] | null | null | null |
import os
train_path = "/home/elgong/GEL/one_shot/torch/pytorch-cifar-master/data/train"
val_path = "/home/elgong/GEL/one_shot/torch/pytorch-cifar-master/data/val"
train_txt = "./train.txt"
val_txt = "./val.txt"
class_name = []
with open(train_txt, "w") as f:
for root, dic, fList in os.walk(train_path):
for img in fList:
cls = img.split("_")[0]
if cls not in class_name:
class_name.append(cls)
if cls in class_name:
f.write(img + "," + str(class_name.index(cls)) + "\n")
with open(val_txt, "w") as f:
for root, dic, fList in os.walk(val_path):
for img in fList:
cls = img.split("_")[0]
if cls in class_name:
f.write(img + "," + str(class_name.index(cls)) + "\n")
| 26.290323
| 78
| 0.565644
| 124
| 815
| 3.564516
| 0.322581
| 0.142534
| 0.074661
| 0.076923
| 0.717195
| 0.717195
| 0.717195
| 0.717195
| 0.717195
| 0.717195
| 0
| 0.003425
| 0.283436
| 815
| 30
| 79
| 27.166667
| 0.753425
| 0
| 0
| 0.4
| 0
| 0
| 0.188957
| 0.152147
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc308ffc9daef9541c0a1a00e3e998321d3b3104
| 155
|
py
|
Python
|
pdover2t/dnvgl_st_f101/__init__.py
|
qwilka/PDover2t
|
4387d153228f1af20a8f5f3f368aa49c42cda2cd
|
[
"MIT"
] | null | null | null |
pdover2t/dnvgl_st_f101/__init__.py
|
qwilka/PDover2t
|
4387d153228f1af20a8f5f3f368aa49c42cda2cd
|
[
"MIT"
] | null | null | null |
pdover2t/dnvgl_st_f101/__init__.py
|
qwilka/PDover2t
|
4387d153228f1af20a8f5f3f368aa49c42cda2cd
|
[
"MIT"
] | 1
|
2019-11-24T09:32:12.000Z
|
2019-11-24T09:32:12.000Z
|
from .material import *
from .pressure_containment import *
#from .pipe_collapse import *
#from .propagation_buckling import *
#from .stability import *
| 19.375
| 36
| 0.774194
| 18
| 155
| 6.5
| 0.555556
| 0.34188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141935
| 155
| 7
| 37
| 22.142857
| 0.879699
| 0.56129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fc4126ec0318afafd5c0347893d85db2f967bcd9
| 1,427
|
py
|
Python
|
oscar/lib/python2.7/site-packages/phonenumbers/data/region_MV.py
|
bhav11esh/Oscar-Bookshelf
|
b48f088e2ed908b3603f2ecc63d602f81392eac4
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/phonenumbers/data/region_MV.py
|
bhav11esh/Oscar-Bookshelf
|
b48f088e2ed908b3603f2ecc63d602f81392eac4
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/phonenumbers/data/region_MV.py
|
bhav11esh/Oscar-Bookshelf
|
b48f088e2ed908b3603f2ecc63d602f81392eac4
|
[
"BSD-3-Clause"
] | null | null | null |
"""Auto-generated file, do not edit by hand. MV metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_MV = PhoneMetadata(id='MV', country_code=960, international_prefix='0(?:0|19)',
general_desc=PhoneNumberDesc(national_number_pattern='[346-8]\\d{6,9}|9(?:00\\d{7}|\\d{6})', possible_length=(7, 10)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:3(?:0[0-3]|3[0-59])|6(?:[57][02468]|6[024568]|8[024689]|90))\\d{4}', example_number='6701234', possible_length=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:46[46]|7[3-9]\\d|9[15-9]\\d)\\d{4}', example_number='7712345', possible_length=(7,)),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{7}', example_number='8001234567', possible_length=(10,)),
premium_rate=PhoneNumberDesc(national_number_pattern='900\\d{7}', example_number='9001234567', possible_length=(10,)),
pager=PhoneNumberDesc(national_number_pattern='781\\d{4}', example_number='7812345', possible_length=(7,)),
uan=PhoneNumberDesc(national_number_pattern='4[05]0\\d{4}', example_number='4001234', possible_length=(7,)),
preferred_international_prefix='00',
number_format=[NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1-\\2', leading_digits_pattern=['[3467]|9(?:[1-9]|0[1-9])']),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[89]00'])])
| 95.133333
| 176
| 0.701472
| 205
| 1,427
| 4.678049
| 0.37561
| 0.167883
| 0.211679
| 0.262774
| 0.066736
| 0.025026
| 0.025026
| 0
| 0
| 0
| 0
| 0.123874
| 0.066573
| 1,427
| 14
| 177
| 101.928571
| 0.596096
| 0.037141
| 0
| 0
| 1
| 0.25
| 0.241228
| 0.138889
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5da03be42c47c00c68b7861f1ff8737ca9307f95
| 8,573
|
py
|
Python
|
readthedocs/rtd_tests/tests/test_subprojects.py
|
nijel/readthedocs.org
|
4869667a3f0b56d440142187583f4bf79b9bce07
|
[
"MIT"
] | 1
|
2020-08-10T01:14:08.000Z
|
2020-08-10T01:14:08.000Z
|
readthedocs/rtd_tests/tests/test_subprojects.py
|
nijel/readthedocs.org
|
4869667a3f0b56d440142187583f4bf79b9bce07
|
[
"MIT"
] | 11
|
2020-07-24T23:17:24.000Z
|
2022-03-12T00:43:42.000Z
|
readthedocs/rtd_tests/tests/test_subprojects.py
|
nijel/readthedocs.org
|
4869667a3f0b56d440142187583f4bf79b9bce07
|
[
"MIT"
] | 1
|
2021-07-09T14:02:39.000Z
|
2021-07-09T14:02:39.000Z
|
# -*- coding: utf-8 -*-
import django_dynamic_fixture as fixture
from unittest import mock
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from readthedocs.projects.forms import ProjectRelationshipForm
from readthedocs.projects.models import Project, ProjectRelationship
from readthedocs.rtd_tests.utils import create_user
class SubprojectFormTests(TestCase):
def test_empty_child(self):
user = fixture.get(User)
project = fixture.get(Project, slug='mainproject')
form = ProjectRelationshipForm(
{},
project=project,
user=user,
)
form.full_clean()
self.assertEqual(len(form.errors['child']), 1)
self.assertRegex(
form.errors['child'][0],
r'This field is required.',
)
def test_nonexistent_child(self):
user = fixture.get(User)
project = fixture.get(Project, slug='mainproject')
self.assertFalse(Project.objects.filter(pk=9999).exists())
form = ProjectRelationshipForm(
{'child': 9999},
project=project,
user=user,
)
form.full_clean()
self.assertEqual(len(form.errors['child']), 1)
self.assertRegex(
form.errors['child'][0],
r'Select a valid choice.',
)
def test_adding_subproject_fails_when_user_is_not_admin(self):
user = fixture.get(User)
project = fixture.get(Project, slug='mainproject')
project.users.add(user)
subproject = fixture.get(Project, slug='subproject')
self.assertQuerysetEqual(
Project.objects.for_admin_user(user),
[project],
transform=lambda n: n,
ordered=False,
)
form = ProjectRelationshipForm(
{'child': subproject.pk},
project=project,
user=user,
)
form.full_clean()
self.assertEqual(len(form.errors['child']), 1)
self.assertRegex(
form.errors['child'][0],
r'Select a valid choice.',
)
self.assertEqual(
[proj_id for (proj_id, __) in form.fields['child'].choices],
[''],
)
def test_adding_subproject_passes_when_user_is_admin(self):
user = fixture.get(User)
project = fixture.get(Project, slug='mainproject')
project.users.add(user)
subproject = fixture.get(Project, slug='subproject')
subproject.users.add(user)
self.assertQuerysetEqual(
Project.objects.for_admin_user(user),
[project, subproject],
transform=lambda n: n,
ordered=False,
)
form = ProjectRelationshipForm(
{'child': subproject.pk},
project=project,
user=user,
)
form.full_clean()
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(
[r.child for r in project.subprojects.all()],
[subproject],
)
def test_subproject_form_cant_create_sub_sub_project(self):
user = fixture.get(User)
project = fixture.get(Project, users=[user])
subproject = fixture.get(Project, users=[user])
subsubproject = fixture.get(Project, users=[user])
relation = fixture.get(
ProjectRelationship, parent=project, child=subproject,
)
self.assertQuerysetEqual(
Project.objects.for_admin_user(user),
[project, subproject, subsubproject],
transform=lambda n: n,
ordered=False,
)
form = ProjectRelationshipForm(
{'child': subsubproject.pk},
project=subproject,
user=user,
)
# The subsubproject is valid here, as far as the child check is
# concerned, but the parent check should fail.
self.assertEqual(
[proj_id for (proj_id, __) in form.fields['child'].choices],
['', subsubproject.pk],
)
form.full_clean()
self.assertEqual(len(form.errors['parent']), 1)
self.assertRegex(
form.errors['parent'][0],
r'Subproject nesting is not supported',
)
def test_excludes_existing_subprojects(self):
user = fixture.get(User)
project = fixture.get(Project, users=[user])
subproject = fixture.get(Project, users=[user])
relation = fixture.get(
ProjectRelationship, parent=project, child=subproject,
)
self.assertQuerysetEqual(
Project.objects.for_admin_user(user),
[project, subproject],
transform=lambda n: n,
ordered=False,
)
form = ProjectRelationshipForm(
{'child': subproject.pk},
project=project,
user=user,
)
self.assertEqual(
[proj_id for (proj_id, __) in form.fields['child'].choices],
[''],
)
def test_subproject_cant_be_subproject(self):
user = fixture.get(User)
project = fixture.get(Project, users=[user])
another_project = fixture.get(Project, users=[user])
subproject = fixture.get(Project, users=[user])
relation = fixture.get(
ProjectRelationship, parent=project, child=subproject,
)
form = ProjectRelationshipForm(
{'child': subproject.pk},
project=project,
user=user,
)
self.assertFalse(form.is_valid())
self.assertRegex(
form.errors['child'][0],
'Select a valid choice',
)
form = ProjectRelationshipForm(
{'child': subproject.pk},
project=another_project,
user=user,
)
self.assertFalse(form.is_valid())
self.assertRegex(
form.errors['child'][0],
'Select a valid choice',
)
def test_superproject_cant_be_subproject(self):
user = fixture.get(User)
project = fixture.get(Project, users=[user])
another_project = fixture.get(Project, users=[user])
subproject = fixture.get(Project, users=[user])
relation = fixture.get(
ProjectRelationship, parent=project, child=subproject,
)
form = ProjectRelationshipForm(
{'child': project.pk},
project=another_project,
user=user,
)
self.assertFalse(form.is_valid())
self.assertRegex(
form.errors['child'][0],
'Select a valid choice',
)
def test_exclude_self_project_as_subproject(self):
user = fixture.get(User)
project = fixture.get(Project, users=[user])
form = ProjectRelationshipForm(
{'child': project.pk},
project=project,
user=user,
)
self.assertFalse(form.is_valid())
self.assertNotIn(
project.id,
[proj_id for (proj_id, __) in form.fields['child'].choices],
)
def test_alias_already_exists_for_a_project(self):
user = fixture.get(User)
project = fixture.get(Project, users=[user])
subproject = fixture.get(Project, users=[user])
subproject_2 = fixture.get(Project, users=[user])
relation = fixture.get(
ProjectRelationship, parent=project, child=subproject,
alias='subproject'
)
form = ProjectRelationshipForm(
{
'child': subproject_2.id,
'alias': 'subproject'
},
project=project,
user=user,
)
self.assertFalse(form.is_valid())
error_msg = 'A subproject with this alias already exists'
self.assertDictEqual(form.errors, {'alias': [error_msg]})
def test_edit_only_lists_instance_project_in_child_choices(self):
user = fixture.get(User)
project = fixture.get(Project, users=[user])
subproject = fixture.get(Project, users=[user])
relation = fixture.get(
ProjectRelationship, parent=project, child=subproject,
alias='subproject'
)
form = ProjectRelationshipForm(
instance=relation,
project=project,
user=user,
)
self.assertEqual(
[proj_id for (proj_id, __) in form.fields['child'].choices],
['', relation.child.id],
)
| 33.357977
| 72
| 0.576694
| 837
| 8,573
| 5.777778
| 0.144564
| 0.082713
| 0.080852
| 0.077337
| 0.740902
| 0.732423
| 0.710711
| 0.710711
| 0.702233
| 0.679694
| 0
| 0.003742
| 0.314126
| 8,573
| 256
| 73
| 33.488281
| 0.818707
| 0.014931
| 0
| 0.642553
| 0
| 0
| 0.0526
| 0
| 0
| 0
| 0
| 0
| 0.123404
| 1
| 0.046809
| false
| 0.004255
| 0.034043
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5dc5c0ac7551f74b055dec49829aaf268fb5a26d
| 98
|
py
|
Python
|
code/log_msg/admin.py
|
AlanJui/DjangoApp-DevTemplate
|
da39db79439a3e94ced5e853af4aa8b6ebf52191
|
[
"PostgreSQL"
] | null | null | null |
code/log_msg/admin.py
|
AlanJui/DjangoApp-DevTemplate
|
da39db79439a3e94ced5e853af4aa8b6ebf52191
|
[
"PostgreSQL"
] | 2
|
2021-03-30T13:48:40.000Z
|
2021-04-08T20:43:31.000Z
|
code/log_msg/admin.py
|
AlanJui/DjangoApp-DevTemplate
|
da39db79439a3e94ced5e853af4aa8b6ebf52191
|
[
"PostgreSQL"
] | null | null | null |
from django.contrib import admin
from .models import LogMessage
admin.site.register(LogMessage)
| 16.333333
| 32
| 0.826531
| 13
| 98
| 6.230769
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112245
| 98
| 5
| 33
| 19.6
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5dd92a0882a2359802f3fefefb22b726676d3d3f
| 68
|
py
|
Python
|
test/pithy/url.py
|
gwk/glossy
|
6976ca4fd1efc09d9cd670b1fe37817c05b4b529
|
[
"CC0-1.0"
] | 7
|
2019-05-04T00:51:38.000Z
|
2021-12-10T15:36:31.000Z
|
test/pithy/url.py
|
gwk/glossy
|
6976ca4fd1efc09d9cd670b1fe37817c05b4b529
|
[
"CC0-1.0"
] | null | null | null |
test/pithy/url.py
|
gwk/glossy
|
6976ca4fd1efc09d9cd670b1fe37817c05b4b529
|
[
"CC0-1.0"
] | 1
|
2016-07-30T22:38:08.000Z
|
2016-07-30T22:38:08.000Z
|
#!/usr/bin/env python3
from utest import *
from pithy.url import *
| 13.6
| 23
| 0.720588
| 11
| 68
| 4.454545
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 0.161765
| 68
| 4
| 24
| 17
| 0.842105
| 0.308824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5ddd62ec616c5b4a3f11a8a243faf889882ce42a
| 30
|
py
|
Python
|
tests/test_functions.py
|
landegt/pylabel
|
9d0079a1f61eb84ec9cd10fb202a9246a08576fa
|
[
"MIT"
] | 1
|
2021-11-30T04:33:13.000Z
|
2021-11-30T04:33:13.000Z
|
tests/test_functions.py
|
landegt/pylabel
|
9d0079a1f61eb84ec9cd10fb202a9246a08576fa
|
[
"MIT"
] | null | null | null |
tests/test_functions.py
|
landegt/pylabel
|
9d0079a1f61eb84ec9cd10fb202a9246a08576fa
|
[
"MIT"
] | 1
|
2021-12-04T13:57:45.000Z
|
2021-12-04T13:57:45.000Z
|
from pylabel import functions
| 15
| 29
| 0.866667
| 4
| 30
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5dfefde74eb5d9f244299e439524fa29401a3794
| 83
|
py
|
Python
|
backend/day/admin.py
|
sysopmatt/py-schedule
|
c087b6e5ca162481394de0d5e8c7b41a74092f99
|
[
"MIT"
] | null | null | null |
backend/day/admin.py
|
sysopmatt/py-schedule
|
c087b6e5ca162481394de0d5e8c7b41a74092f99
|
[
"MIT"
] | null | null | null |
backend/day/admin.py
|
sysopmatt/py-schedule
|
c087b6e5ca162481394de0d5e8c7b41a74092f99
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Day
admin.site.register(Day)
| 16.6
| 32
| 0.807229
| 13
| 83
| 5.153846
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120482
| 83
| 5
| 33
| 16.6
| 0.917808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b902956b0138ce6c2fa92f9214a024de6277995d
| 269
|
py
|
Python
|
core/context_processors.py
|
losolio/website
|
5b983e9dfaf604212aab87c51d8904ffc29527a3
|
[
"MIT"
] | 10
|
2015-12-18T16:41:33.000Z
|
2018-11-11T08:36:46.000Z
|
core/context_processors.py
|
losolio/website
|
5b983e9dfaf604212aab87c51d8904ffc29527a3
|
[
"MIT"
] | 96
|
2015-07-14T22:45:56.000Z
|
2017-07-25T19:59:48.000Z
|
core/context_processors.py
|
losolio/website
|
5b983e9dfaf604212aab87c51d8904ffc29527a3
|
[
"MIT"
] | 9
|
2015-07-28T14:38:43.000Z
|
2019-01-04T17:38:42.000Z
|
from django.conf import settings
def settings_context(request):
return {'GOOGLE_ANALYTICS_PROPERTY_ID': settings.GOOGLE_ANALYTICS_PROPERTY_ID,
'IS_PRODUCTION': settings.IS_PRODUCTION,
'ADMIN_ENABLED': settings.ADMIN_ENABLED,
}
| 29.888889
| 82
| 0.713755
| 29
| 269
| 6.241379
| 0.586207
| 0.165746
| 0.254144
| 0.276243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208178
| 269
| 8
| 83
| 33.625
| 0.849765
| 0
| 0
| 0
| 0
| 0
| 0.200743
| 0.104089
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0.166667
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
b90850da4ee1cdfc04ab5cbecf6b6b2cce6aa913
| 72
|
py
|
Python
|
keras_contrib/losses/__init__.py
|
rgreenblatt/keras-contrib
|
46fcdb9384b3bc9399c651b2b43640aa54098e64
|
[
"MIT"
] | 1
|
2019-01-24T13:09:51.000Z
|
2019-01-24T13:09:51.000Z
|
keras_contrib/losses/__init__.py
|
rgreenblatt/keras-contrib
|
46fcdb9384b3bc9399c651b2b43640aa54098e64
|
[
"MIT"
] | null | null | null |
keras_contrib/losses/__init__.py
|
rgreenblatt/keras-contrib
|
46fcdb9384b3bc9399c651b2b43640aa54098e64
|
[
"MIT"
] | 1
|
2018-09-03T17:53:44.000Z
|
2018-09-03T17:53:44.000Z
|
from .dssim import DSSIMObjective
from .jaccard import jaccard_distance
| 24
| 37
| 0.861111
| 9
| 72
| 6.777778
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 72
| 2
| 38
| 36
| 0.953125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5d141568edd1ec6c32fa03107ab21653d802f647
| 98
|
py
|
Python
|
_resource/python/platform/default/_parameterized.py
|
amlyj/tensorflowStudy
|
1e3a4b15a57d53e746fd730af540da4be471c70b
|
[
"MIT"
] | 4
|
2021-06-11T09:43:32.000Z
|
2021-11-17T11:15:52.000Z
|
_resource/python/platform/default/_parameterized.py
|
amlyj/tensorflowStudy
|
1e3a4b15a57d53e746fd730af540da4be471c70b
|
[
"MIT"
] | null | null | null |
_resource/python/platform/default/_parameterized.py
|
amlyj/tensorflowStudy
|
1e3a4b15a57d53e746fd730af540da4be471c70b
|
[
"MIT"
] | 2
|
2015-11-13T21:11:49.000Z
|
2015-11-29T04:13:49.000Z
|
"""Extension to unittest to run parameterized tests."""
raise ImportError("Not implemented yet.")
| 32.666667
| 55
| 0.765306
| 12
| 98
| 6.25
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112245
| 98
| 2
| 56
| 49
| 0.862069
| 0.5
| 0
| 0
| 0
| 0
| 0.465116
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5d25f57d1c37aed68c49cf1baffb2187071b7ac4
| 12,130
|
py
|
Python
|
harmonica/tests/test_eql_harmonic.py
|
RichardScottOZ/harmonica
|
ccb0437ea0ed528cfd144844edab98141c8d08da
|
[
"BSD-3-Clause"
] | null | null | null |
harmonica/tests/test_eql_harmonic.py
|
RichardScottOZ/harmonica
|
ccb0437ea0ed528cfd144844edab98141c8d08da
|
[
"BSD-3-Clause"
] | 1
|
2022-01-19T03:02:22.000Z
|
2022-01-19T20:47:19.000Z
|
harmonica/tests/test_eql_harmonic.py
|
RichardScottOZ/harmonica
|
ccb0437ea0ed528cfd144844edab98141c8d08da
|
[
"BSD-3-Clause"
] | 1
|
2022-01-17T23:15:18.000Z
|
2022-01-17T23:15:18.000Z
|
"""
Test the EQLHarmonic gridder
"""
import warnings
import pytest
import numpy as np
import numpy.testing as npt
import verde as vd
import verde.base as vdb
from .. import EQLHarmonic, EQLHarmonicSpherical, point_mass_gravity
from ..equivalent_layer.harmonic import greens_func_cartesian
from ..equivalent_layer.utils import (
jacobian_numba,
pop_extra_coords,
)
from .utils import require_numba
def test_pop_extra_coords():
"""
Test _pop_extra_coords private function
"""
# Check if extra_coords is removed from kwargs
kwargs = {"bla": 1, "blabla": 2, "extra_coords": 1400.0}
with warnings.catch_warnings(record=True) as warn:
pop_extra_coords(kwargs)
assert len(warn) == 1
assert issubclass(warn[0].category, UserWarning)
assert "extra_coords" not in kwargs
# Check if kwargs is not touched if no extra_coords are present
kwargs = {"bla": 1, "blabla": 2}
pop_extra_coords(kwargs)
assert kwargs == {"bla": 1, "blabla": 2}
@require_numba
def test_eql_harmonic_cartesian():
"""
Check that predictions are reasonable when interpolating from one grid to
a denser grid. Use Cartesian coordiantes.
"""
region = (-3e3, -1e3, 5e3, 7e3)
# Build synthetic point masses
points = vd.grid_coordinates(region=region, shape=(6, 6), extra_coords=-1e3)
masses = vd.datasets.CheckerBoard(amplitude=1e13, region=region).predict(points)
# Define a set of observation points
coordinates = vd.grid_coordinates(region=region, shape=(40, 40), extra_coords=0)
# Get synthetic data
data = point_mass_gravity(coordinates, points, masses, field="g_z")
# The interpolation should be perfect on the data points
eql = EQLHarmonic()
eql.fit(coordinates, data)
npt.assert_allclose(data, eql.predict(coordinates), rtol=1e-5)
# Gridding onto a denser grid should be reasonably accurate when compared
# to synthetic values
upward = 0
shape = (60, 60)
grid = vd.grid_coordinates(region=region, shape=shape, extra_coords=upward)
true = point_mass_gravity(grid, points, masses, field="g_z")
npt.assert_allclose(true, eql.predict(grid), rtol=1e-3)
# Test grid method
grid = eql.grid(upward, shape=shape, region=region)
npt.assert_allclose(true, grid.scalars, rtol=1e-3)
# Test profile method
point1 = (region[0], region[2])
point2 = (region[0], region[3])
profile = eql.profile(point1, point2, upward, shape[0])
true = point_mass_gravity(
(profile.easting, profile.northing, profile.upward), points, masses, field="g_z"
)
npt.assert_allclose(true, profile.scalars, rtol=1e-3)
def test_eql_harmonic_small_data_cartesian():
"""
Check predictions against synthetic data using few data points for speed
Use Cartesian coordinates.
"""
region = (-3e3, -1e3, 5e3, 7e3)
# Build synthetic point masses
points = vd.grid_coordinates(region=region, shape=(6, 6), extra_coords=-1e3)
masses = vd.datasets.CheckerBoard(amplitude=1e13, region=region).predict(points)
# Define a set of observation points
coordinates = vd.grid_coordinates(region=region, shape=(8, 8), extra_coords=0)
# Get synthetic data
data = point_mass_gravity(coordinates, points, masses, field="g_z")
# The interpolation should be perfect on the data points
eql = EQLHarmonic(relative_depth=500)
eql.fit(coordinates, data)
npt.assert_allclose(data, eql.predict(coordinates), rtol=1e-5)
# Check that the proper source locations were set
tmp = [i.ravel() for i in coordinates]
npt.assert_allclose(tmp[:2], eql.points_[:2], rtol=1e-5)
npt.assert_allclose(tmp[2] - 500, eql.points_[2], rtol=1e-5)
# Gridding at higher altitude should be reasonably accurate when compared
# to synthetic values
upward = 20
shape = (8, 8)
grid = vd.grid_coordinates(region=region, shape=shape, extra_coords=upward)
true = point_mass_gravity(grid, points, masses, field="g_z")
npt.assert_allclose(true, eql.predict(grid), rtol=0.08)
# Test grid method
grid = eql.grid(upward, shape=shape, region=region)
npt.assert_allclose(true, grid.scalars, rtol=0.08)
# Test profile method
point1 = (region[0], region[2])
point2 = (region[0], region[3])
profile = eql.profile(point1, point2, upward, 10)
true = point_mass_gravity(
(profile.easting, profile.northing, profile.upward), points, masses, field="g_z"
)
npt.assert_allclose(true, profile.scalars, rtol=0.05)
def test_eql_harmonic_custom_points_cartesian():
"""
Check that passing in custom points works and actually uses the points
Use Cartesian coordinates.
"""
region = (-3e3, -1e3, 5e3, 7e3)
# Build synthetic point masses
points = vd.grid_coordinates(region=region, shape=(6, 6), extra_coords=-1e3)
masses = vd.datasets.CheckerBoard(amplitude=1e13, region=region).predict(points)
# Define a set of observation points
coordinates = vd.grid_coordinates(region=region, shape=(5, 5), extra_coords=0)
# Get synthetic data
data = point_mass_gravity(coordinates, points, masses, field="g_z")
# Pass a custom set of point sources
points_custom = tuple(
i.ravel()
for i in vd.grid_coordinates(region=region, shape=(3, 3), extra_coords=-550)
)
eql = EQLHarmonic(points=points_custom)
eql.fit(coordinates, data)
# Check that the proper source locations were set
npt.assert_allclose(points_custom, eql.points_, rtol=1e-5)
def test_eql_harmonic_scatter_not_implemented():
"""
Check if scatter method raises a NotImplementedError
"""
eql = EQLHarmonic()
with pytest.raises(NotImplementedError):
eql.scatter()
@pytest.mark.use_numba
def test_eql_harmonic_jacobian_cartesian():
"""
Test Jacobian matrix under symmetric system of point sources.
Use Cartesian coordinates.
"""
easting, northing, upward = vd.grid_coordinates(
region=[-100, 100, -100, 100], shape=(2, 2), extra_coords=0
)
points = vdb.n_1d_arrays((easting, northing, upward + 100), n=3)
coordinates = vdb.n_1d_arrays((easting, northing, upward), n=3)
n_points = points[0].size
jacobian = np.zeros((n_points, n_points), dtype=points[0].dtype)
jacobian_numba(coordinates, points, jacobian, greens_func_cartesian)
# All diagonal elements must be equal
diagonal = np.diag_indices(4)
npt.assert_allclose(jacobian[diagonal][0], jacobian[diagonal])
# All anti-diagonal elements must be equal (elements between distant
# points)
anti_diagonal = (diagonal[0], diagonal[1][::-1])
npt.assert_allclose(jacobian[anti_diagonal][0], jacobian[anti_diagonal])
# All elements corresponding to nearest neighbors must be equal
nearest_neighbours = np.ones((4, 4), dtype=bool)
nearest_neighbours[diagonal] = False
nearest_neighbours[anti_diagonal] = False
npt.assert_allclose(jacobian[nearest_neighbours][0], jacobian[nearest_neighbours])
@require_numba
def test_eql_harmonic_spherical():
"""
Check that predictions are reasonable when interpolating from one grid to
a denser grid. Use spherical coordiantes.
"""
region = (-70, -60, -40, -30)
radius = 6400e3
# Build synthetic point masses
points = vd.grid_coordinates(
region=region, shape=(6, 6), extra_coords=radius - 500e3
)
masses = vd.datasets.CheckerBoard(amplitude=1e13, region=region).predict(points)
# Define a set of observation points
coordinates = vd.grid_coordinates(
region=region, shape=(40, 40), extra_coords=radius
)
# Get synthetic data
data = point_mass_gravity(
coordinates, points, masses, field="g_z", coordinate_system="spherical"
)
# The interpolation should be perfect on the data points
eql = EQLHarmonicSpherical(relative_depth=500e3)
eql.fit(coordinates, data)
npt.assert_allclose(data, eql.predict(coordinates), rtol=1e-5)
# Gridding onto a denser grid should be reasonably accurate when compared
# to synthetic values
upward = radius
shape = (60, 60)
grid = vd.grid_coordinates(region=region, shape=shape, extra_coords=upward)
true = point_mass_gravity(
grid, points, masses, field="g_z", coordinate_system="spherical"
)
npt.assert_allclose(true, eql.predict(grid), rtol=1e-3)
# Test grid method
grid = eql.grid(upward, shape=shape, region=region)
npt.assert_allclose(true, grid.scalars, rtol=1e-3)
def test_eql_harmonic_small_data_spherical():
"""
Check predictions against synthetic data using few data points for speed
Use spherical coordinates.
"""
region = (-70, -60, -40, -30)
radius = 6400e3
# Build synthetic point masses
points = vd.grid_coordinates(
region=region, shape=(6, 6), extra_coords=radius - 500e3
)
masses = vd.datasets.CheckerBoard(amplitude=1e13, region=region).predict(points)
# Define a set of observation points
coordinates = vd.grid_coordinates(region=region, shape=(8, 8), extra_coords=radius)
# Get synthetic data
data = point_mass_gravity(
coordinates, points, masses, field="g_z", coordinate_system="spherical"
)
# The interpolation should be perfect on the data points
eql = EQLHarmonicSpherical(relative_depth=500e3)
eql.fit(coordinates, data)
npt.assert_allclose(data, eql.predict(coordinates), rtol=1e-5)
# Check that the proper source locations were set
tmp = [i.ravel() for i in coordinates]
npt.assert_allclose(tmp[:2], eql.points_[:2], rtol=1e-5)
npt.assert_allclose(tmp[2] - 500e3, eql.points_[2], rtol=1e-5)
# Gridding at higher altitude should be reasonably accurate when compared
# to synthetic values
upward = radius + 2e3
shape = (8, 8)
grid = vd.grid_coordinates(region=region, shape=shape, extra_coords=upward)
true = point_mass_gravity(
grid, points, masses, field="g_z", coordinate_system="spherical"
)
npt.assert_allclose(true, eql.predict(grid), rtol=0.05)
# Test grid method
grid = eql.grid(upward, shape=shape, region=region)
npt.assert_allclose(true, grid.scalars, rtol=0.05)
def test_eql_harmonic_custom_points_spherical():
"""
Check that passing in custom points works and actually uses the points
Use spherical coordinates.
"""
region = (-70, -60, -40, -30)
radius = 6400e3
# Build synthetic point masses
points = vd.grid_coordinates(
region=region, shape=(6, 6), extra_coords=radius - 500e3
)
masses = vd.datasets.CheckerBoard(amplitude=1e13, region=region).predict(points)
# Define a set of observation points
coordinates = vd.grid_coordinates(region=region, shape=(5, 5), extra_coords=radius)
# Get synthetic data
data = point_mass_gravity(
coordinates, points, masses, field="g_z", coordinate_system="spherical"
)
# Pass a custom set of point sources
points_custom = tuple(
i.ravel()
for i in vd.grid_coordinates(
region=region, shape=(3, 3), extra_coords=radius - 500e3
)
)
eql = EQLHarmonicSpherical(points=points_custom)
eql.fit(coordinates, data)
# Check that the proper source locations were set
npt.assert_allclose(points_custom, eql.points_, rtol=1e-5)
def test_eql_harmonic_spherical_scatter_not_implemented():
"""
Check if scatter method raises a NotImplementedError
"""
eql = EQLHarmonicSpherical()
with pytest.raises(NotImplementedError):
eql.scatter()
def test_eql_harmonic_spherical_profile_not_implemented():
"""
Check if scatter method raises a NotImplementedError
"""
eql = EQLHarmonicSpherical()
with pytest.raises(NotImplementedError):
eql.profile(point1=(1, 1), point2=(2, 2), size=3)
def test_eql_harmonic_spherical_no_projection():
"""
Check if projection is not a valid argument of grid method
"""
eql = EQLHarmonicSpherical()
with pytest.raises(TypeError):
eql.grid(upward=10, projection=lambda a, b: (a * 2, b * 2))
| 36.426426
| 88
| 0.69934
| 1,608
| 12,130
| 5.140547
| 0.131841
| 0.037261
| 0.047302
| 0.052867
| 0.798572
| 0.763247
| 0.749698
| 0.741713
| 0.741713
| 0.741471
| 0
| 0.031224
| 0.194724
| 12,130
| 332
| 89
| 36.536145
| 0.815008
| 0.225886
| 0
| 0.515625
| 0
| 0
| 0.014463
| 0
| 0
| 0
| 0
| 0
| 0.140625
| 1
| 0.0625
| false
| 0
| 0.052083
| 0
| 0.114583
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d2fdc396b7ceb1581627f0f2dfc92f29a013bed
| 185
|
py
|
Python
|
CInterface/SWIG/setup.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | 1
|
2021-12-17T11:03:13.000Z
|
2021-12-17T11:03:13.000Z
|
CInterface/SWIG/setup.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | 1
|
2020-02-05T00:14:43.000Z
|
2020-02-06T09:22:49.000Z
|
CInterface/SWIG/setup.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup, Extension
setup(ext_modules=[Extension("_cos_module",
sources=["cos_module.c", "cos_module.i"])])
| 26.428571
| 49
| 0.686486
| 26
| 185
| 4.692308
| 0.769231
| 0.221311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006135
| 0.118919
| 185
| 6
| 50
| 30.833333
| 0.742331
| 0.227027
| 0
| 0
| 0
| 0
| 0.248227
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5d6653d9d52a2d0ed35e8b2f233c45991218c14d
| 204
|
py
|
Python
|
models/__init__.py
|
Hhhhhhhhhhao/image-cartoonization
|
073b51656b96b069496917d212119caad7bf4728
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
Hhhhhhhhhhao/image-cartoonization
|
073b51656b96b069496917d212119caad7bf4728
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
Hhhhhhhhhhao/image-cartoonization
|
073b51656b96b069496917d212119caad7bf4728
|
[
"MIT"
] | null | null | null |
from .generator import *
from .discriminator import *
from .inception import InceptionV3
from .lenet import LeNet5
from .utils import StyleEncoder, MappingNetwork, PatchSampleF
from .resnet import ResNet
| 29.142857
| 61
| 0.823529
| 24
| 204
| 7
| 0.541667
| 0.119048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011236
| 0.127451
| 204
| 6
| 62
| 34
| 0.932584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5d6e20982d04c288ee0394c6f65608ebbb5233f2
| 8,021
|
py
|
Python
|
Emoji_generator.py
|
mryesiller/Emoji_Generator
|
dba2db070e478b9ff7a16babfc381a584c27975f
|
[
"MIT"
] | 2
|
2022-01-28T21:59:08.000Z
|
2022-03-14T00:00:37.000Z
|
Emoji_generator.py
|
mryesiller/Emoji_Generator
|
dba2db070e478b9ff7a16babfc381a584c27975f
|
[
"MIT"
] | null | null | null |
Emoji_generator.py
|
mryesiller/Emoji_Generator
|
dba2db070e478b9ff7a16babfc381a584c27975f
|
[
"MIT"
] | null | null | null |
from PIL import Image
import numpy as np
import os
from random import randint
dirname = os.path.dirname(__file__)
dimensions = 480, 480 #resize 24x24 to 480x480
for x in range(0,30): #Number of generated pictures
f = randint(0, 1000) #Common-Rare-Epic-Legendary
if f > 400:
bw = (255, 255, 255) #Borders-inside
bg = (255, 255, 255) #Background
bc = (0, 0, 0) #Borders-outside
eb = (0,0,0) #Face
elif 400 >= f > 47:
bw = (255, 255, 255)
bg = (255, 255, 204)
bc = (31, 57, 186)
eb = (0,0,0)
elif 47 >= f > 7:
bw = (255, 255, 255)
bg = (255, 255, 102)
bc = (186, 31, 160)
eb = (0,0,0)
else:
bw = (255, 255, 255)
bg = (255, 51, 255)
bc = (226, 144, 21)
eb = (0,0,0)
emoji = [
[bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc],
[bc, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bc],
[bc, bw, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bw, bc],
[bc, bw, bg, bg, bg, bg, bg, bg, eb, eb, eb, eb, eb, eb, eb, eb, bg, bg, bg, bg, bg, bg, bw, bc],
[bc, bw, bg, bg, bg, bg, bg, eb, bg, bg, bg, bg, bg, bg, bg, bg, eb, bg, bg, bg, bg, bg, bw, bc],
[bc, bw, bg, bg, bg, bg, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bg, bg, bg, bg, bw, bc],
[bc, bw, bg, bg, bg, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bg, bg, bg, bw, bc],
[bc, bw, bg, bg, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bg, bg, bw, bc],
[bc, bw, bg, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bg, bw, bc],
[bc, bw, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bw, bc],
[bc, bw, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bw, bc],
[bc, bw, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bw, bc],
[bc, bw, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bw, bc],
[bc, bw, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bw, bc],
[bc, bw, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bw, bc],
[bc, bw, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bw, bc],
[bc, bw, bg, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bg, bw, bc],
[bc, bw, bg, bg, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bg, bg, bw, bc],
[bc, bw, bg, bg, bg, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bg, bg, bg, bw, bc],
[bc, bw, bg, bg, bg, bg, eb, bg, bg, bg, bg, bg, bg, bg, bg, bg, bg, eb, bg, bg, bg, bg, bw, bc],
[bc, bw, bg, bg, bg, bg, bg, eb, bg, bg, bg, bg, bg, bg, bg, bg, eb, bg, bg, bg, bg, bg, bw, bc],
[bc, bw, bg, bg, bg, bg, bg, bg, eb, eb, eb, eb, eb, eb, eb, eb, bg, bg, bg, bg, bg, bg, bw, bc],
[bc, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bw, bc],
[bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc, bc]
]
#mouth settings
mouth=randint(0, 1000)
if mouth > 600:
#straight mouth
emoji[16][9]=(0,0,0)
emoji[16][10]=(0,0,0)
emoji[16][11]=(0,0,0)
emoji[16][12]=(0,0,0)
emoji[16][13]=(0,0,0)
emoji[16][14]=(0,0,0)
elif 600 >= mouth > 247:
#upset
emoji[15][10]=(0,0,0)
emoji[15][11]=(0,0,0)
emoji[15][12]=(0,0,0)
emoji[15][13]=(0,0,0)
emoji[16][9]=(0,0,0)
emoji[17][8]=(0,0,0)
emoji[16][14]=(0,0,0)
emoji[17][15]=(0,0,0)
elif 247 >= mouth > 107:
#smile
emoji[17][10]=(0,0,0)
emoji[17][11]=(0,0,0)
emoji[17][12]=(0,0,0)
emoji[17][13]=(0,0,0)
emoji[16][9]=(0,0,0)
emoji[15][8]=(0,0,0)
emoji[16][14]=(0,0,0)
emoji[15][15]=(0,0,0)
elif 107 >= mouth > 17:
#circle mouth
emoji[18][10]=(0,0,0)
emoji[18][11]=(0,0,0)
emoji[18][12]=(0,0,0)
emoji[18][13]=(0,0,0)
emoji[17][9]=(0,0,0)
emoji[16][9]=(0,0,0)
emoji[17][14]=(0,0,0)
emoji[16][14]=(0,0,0)
emoji[15][9]=(0,0,0)
emoji[14][10]=(0,0,0)
emoji[14][11]=(0,0,0)
emoji[14][12]=(0,0,0)
emoji[14][13]=(0,0,0)
emoji[15][14]=(0,0,0)
else:
#grin
emoji[18][10]=(0,0,0)
emoji[18][11]=(0,0,0)
emoji[18][12]=(0,0,0)
emoji[18][13]=(0,0,0)
emoji[17][9]=(0,0,0)
emoji[16][8]=(0,0,0)
emoji[17][14]=(0,0,0)
emoji[16][15]=(0,0,0)
emoji[15][7]=(0,0,0)
emoji[15][16]=(0,0,0)
emoji[14][7]=(0,0,0)
emoji[14][16]=(0,0,0)
emoji[14][8]=(0,0,0)
emoji[14][9]=(0,0,0)
emoji[14][10]=(0,0,0)
emoji[14][11]=(0,0,0)
emoji[14][12]=(0,0,0)
emoji[14][13]=(0,0,0)
emoji[14][14]=(0,0,0)
emoji[14][15]=(0,0,0)
emoji[15][10]=(0,0,0)
emoji[15][13]=(0,0,0)
#eye settings
eye=randint(0, 1000)
if eye > 600:
#straight eyes
emoji[9][7]=(0,0,0)
emoji[9][8]=(0,0,0)
emoji[9][9]=(0,0,0)
emoji[9][10]=(0,0,0)
emoji[9][16]=(0,0,0)
emoji[9][15]=(0,0,0)
emoji[9][14]=(0,0,0)
emoji[9][13]=(0,0,0)
elif 600 >= eye > 247:
#down eyes
emoji[10][7]=(0,0,0)
emoji[11][7]=(0,0,0)
emoji[11][8]=(0,0,0)
emoji[11][9]=(0,0,0)
emoji[11][10]=(0,0,0)
emoji[10][10]=(0,0,0)
#-------------------
emoji[10][13]=(0,0,0)
emoji[11][13]=(0,0,0)
emoji[11][14]=(0,0,0)
emoji[11][15]=(0,0,0)
emoji[11][16]=(0,0,0)
emoji[10][16]=(0,0,0)
elif 247 >= eye > 107:
#single eyebrow eyes
emoji[11][7]=(0,0,0)
emoji[10][7]=(0,0,0)
emoji[9][8]=(0,0,0)
emoji[9][9]=(0,0,0)
emoji[9][10]=(0,0,0)
emoji[10][11]=(0,0,0)
emoji[11][11]=(0,0,0)
emoji[11][16]=(0,0,0)
emoji[10][16]=(0,0,0)
emoji[9][15]=(0,0,0)
emoji[9][14]=(0,0,0)
emoji[9][13]=(0,0,0)
emoji[10][12]=(0,0,0)
emoji[11][12]=(0,0,0)
else:
#circle eyes
emoji[8][8]=(0,0,0)
emoji[8][9]=(0,0,0)
emoji[8][10]=(0,0,0)
emoji[8][7]=(0,0,0)
emoji[9][7]=(0,0,0)
emoji[10][7]=(0,0,0)
emoji[11][7]=(0,0,0)
emoji[11][8]=(0,0,0)
emoji[11][9]=(0,0,0)
emoji[11][10]=(0,0,0)
emoji[10][10]=(0,0,0)
emoji[9][10]=(0,0,0)
#-------------------
emoji[8][13]=(0,0,0)
emoji[8][14]=(0,0,0)
emoji[8][15]=(0,0,0)
emoji[8][16]=(0,0,0)
emoji[9][13]=(0,0,0)
emoji[10][13]=(0,0,0)
emoji[11][13]=(0,0,0)
emoji[11][14]=(0,0,0)
emoji[11][15]=(0,0,0)
emoji[11][16]=(0,0,0)
emoji[10][16]=(0,0,0)
emoji[9][16]=(0,0,0)
pixels=emoji
array = np.array(pixels, dtype=np.uint8)
new_image = Image.fromarray(array)
new_image = new_image.resize(dimensions, resample=0)
imgname = dirname + '/emoji_images/' + (str(x)) + '.png'
new_image.save(imgname)
| 36.459091
| 106
| 0.412043
| 1,488
| 8,021
| 2.215054
| 0.067204
| 0.373786
| 0.491505
| 0.572816
| 0.773058
| 0.698119
| 0.669296
| 0.647148
| 0.638653
| 0.624697
| 0
| 0.181051
| 0.349956
| 8,021
| 220
| 107
| 36.459091
| 0.451093
| 0.039646
| 0
| 0.588235
| 0
| 0
| 0.002411
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02139
| 0
| 0.02139
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
537acd993ab0fefca1d2e3cf07c851893da83d25
| 3,142
|
py
|
Python
|
kakaodecrypt.test.py
|
voidedWarranties/kakaodecrypt
|
7c4ffac41d3ff5b773b4c6025cd8f9f738969fae
|
[
"WTFPL"
] | 1
|
2020-02-27T05:28:08.000Z
|
2020-02-27T05:28:08.000Z
|
kakaodecrypt.test.py
|
voidedWarranties/kakaodecrypt
|
7c4ffac41d3ff5b773b4c6025cd8f9f738969fae
|
[
"WTFPL"
] | null | null | null |
kakaodecrypt.test.py
|
voidedWarranties/kakaodecrypt
|
7c4ffac41d3ff5b773b4c6025cd8f9f738969fae
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/python3
import unittest
from kakaodecrypt import KakaoDecrypt
class KakaoDecryptTest(unittest.TestCase):
def testGenSalt(self):
zero = b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'
self.assertEqual(KakaoDecrypt.genSalt(-1, 5), zero)
self.assertEqual(KakaoDecrypt.genSalt(0, 5), zero)
self.assertEqual(KakaoDecrypt.genSalt(1234, 0), b"1234\0\0\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 1), b"1234\0\0\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 2), b"121234\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 3), b"241234\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 4), b"181234\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 5), b"301234\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 6), b"361234\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 7), b"121234\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 8), b"481234\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 9), b"71234\0\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 10), b"351234\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 11), b"401234\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 12), b"171234\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 13), b"231234\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 14), b"291234\0\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 15), b"isabel1234\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 16), b"kale1234\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 17), b"sulli1234\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 18), b"van1234\0\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 19), b"merry1234\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 20), b"kyle1234\0\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(1234, 21), b"james1234\0\0\0\0\0\0\0")
self.assertEqual(KakaoDecrypt.genSalt(216658451, 17), b"sulli216658451\0\0")
self.assertRaises(ValueError, KakaoDecrypt.genSalt, 1234, 42)
def testDecryptMessage(self):
self.assertEqual(KakaoDecrypt.decrypt(216658451, 17, 'UHVw8VBhUhdbIFTlvdBXdA=='), 'Hey friends!')
self.assertEqual(KakaoDecrypt.decrypt(240440409, 22, 'pBO6rG5DQmOOfRwyoV6nqw=='), 'ㄱㅇㄷ')
self.assertEqual(KakaoDecrypt.decrypt(195847548, 24, 'IICZJO/83CXZWZhNmiWmHg=='), "It's ok")
self.assertEqual(KakaoDecrypt.decrypt(1234, 1, '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'), '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
self.assertEqual(KakaoDecrypt.decrypt(712919372478, 22, 'Ah06VZFMkDYZTdUrbrBb77fLJjvbAuv1xjrAkaLOdkQ='), b'r\x1db\x93\x9c\xd7\xe5\xe4.A')
self.assertEqual(KakaoDecrypt.decrypt(283456151, 26, 'gYKexDBLvO7OwDqjD58LlQ=='), 'i have lasers')
if __name__ == '__main__':
unittest.main()
| 66.851064
| 251
| 0.706875
| 564
| 3,142
| 3.923759
| 0.177305
| 0.181654
| 0.239946
| 0.278355
| 0.605061
| 0.603254
| 0.568007
| 0.568007
| 0.568007
| 0.568007
| 0
| 0.233716
| 0.086251
| 3,142
| 46
| 252
| 68.304348
| 0.537095
| 0.005411
| 0
| 0
| 0
| 0.45
| 0.323944
| 0.304417
| 0
| 0
| 0
| 0
| 0.8
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5380e9d5be56cc48b2d63ae0c8266cf55beefd30
| 215
|
py
|
Python
|
Datasets/__init__.py
|
whkwls2653/Pytorch_Face_Recognition-
|
60f3849def589957d9080457a1a9833112a71f6c
|
[
"MIT"
] | 62
|
2020-08-26T05:42:39.000Z
|
2022-03-31T04:25:50.000Z
|
Datasets/__init__.py
|
whkwls2653/Pytorch_Face_Recognition-
|
60f3849def589957d9080457a1a9833112a71f6c
|
[
"MIT"
] | 10
|
2020-08-27T06:46:10.000Z
|
2021-09-29T03:36:07.000Z
|
Datasets/__init__.py
|
whkwls2653/Pytorch_Face_Recognition-
|
60f3849def589957d9080457a1a9833112a71f6c
|
[
"MIT"
] | 13
|
2020-08-30T00:27:37.000Z
|
2021-12-09T02:56:07.000Z
|
from Datasets.webface import CASIA_WebFace
from Datasets.lfw import LFW
from Datasets.cfp import CFP_FP
from Datasets.agedb import AgeDB30
from Datasets.megaface import MegaFace
from Datasets.ms1m import MS_Celeb_1M
| 35.833333
| 42
| 0.865116
| 34
| 215
| 5.352941
| 0.441176
| 0.395604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020833
| 0.106977
| 215
| 6
| 43
| 35.833333
| 0.927083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
538c86e8e58ade9bcc1328effced905d90ff7d91
| 125
|
py
|
Python
|
rateLimit.py
|
lpmi-13/GitHubReadmeCorpus
|
2f5ecef52d1a35dcd7aa41f790a2a9222be8f215
|
[
"MIT"
] | 1
|
2017-06-18T09:39:19.000Z
|
2017-06-18T09:39:19.000Z
|
rateLimit.py
|
lpmi-13/GitHubReadmeCorpus
|
2f5ecef52d1a35dcd7aa41f790a2a9222be8f215
|
[
"MIT"
] | 1
|
2017-06-21T20:12:59.000Z
|
2017-07-02T14:44:26.000Z
|
rateLimit.py
|
lpmi-13/GitHubReadmeCorpus
|
2f5ecef52d1a35dcd7aa41f790a2a9222be8f215
|
[
"MIT"
] | null | null | null |
def return_rate_limit(github):
rate_limit = github.get_rate_limit()
rate = rate_limit.rate
return rate.remaining
| 25
| 40
| 0.744
| 18
| 125
| 4.833333
| 0.388889
| 0.413793
| 0.344828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176
| 125
| 4
| 41
| 31.25
| 0.84466
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
538dad057242140284e3e08bf453fa16d5bde06e
| 18,224
|
py
|
Python
|
pyshacl/constraints/core/property_pair_constraints.py
|
Martijn-Y-ai/pySHACL
|
ddbc11e13cc741d6ffa334089b0d18fd346f36c7
|
[
"Apache-2.0"
] | null | null | null |
pyshacl/constraints/core/property_pair_constraints.py
|
Martijn-Y-ai/pySHACL
|
ddbc11e13cc741d6ffa334089b0d18fd346f36c7
|
[
"Apache-2.0"
] | null | null | null |
pyshacl/constraints/core/property_pair_constraints.py
|
Martijn-Y-ai/pySHACL
|
ddbc11e13cc741d6ffa334089b0d18fd346f36c7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
https://www.w3.org/TR/shacl/#core-components-property-pairs
"""
from typing import Dict, List
import rdflib
from pyshacl.constraints.constraint_component import ConstraintComponent
from pyshacl.consts import SH
from pyshacl.errors import ConstraintLoadError, ReportableRuntimeError
from pyshacl.pytypes import GraphLike
from pyshacl.rdfutil import stringify_node
SH_equals = SH.term('equals')
SH_disjoint = SH.term('disjoint')
SH_lessThan = SH.term('lessThan')
SH_lessThanOrEquals = SH.term('lessThanOrEquals')
SH_EqualsConstraintComponent = SH.term('EqualsConstraintComponent')
SH_DisjointConstraintComponent = SH.term('DisjointConstraintComponent')
SH_LessThanConstraintComponent = SH.term('LessThanConstraintComponent')
SH_LessThanOrEqualsConstraintComponent = SH.term('LessThanOrEqualsConstraintComponent')
class EqualsConstraintComponent(ConstraintComponent):
"""
sh:equals specifies the condition that the set of all value nodes is equal to the set of objects of the triples that have the focus node as subject and the value of sh:equals as predicate.
Link:
https://www.w3.org/TR/shacl/#EqualsConstraintComponent
Textual Definition:
For each value node that does not exist as a value of the property $equals at the focus node, there is a validation result with the value node as sh:value. For each value of the property $equals at the focus node that is not one of the value nodes, there is a validation result with the value as sh:value.
"""
shacl_constraint_component = SH_EqualsConstraintComponent
def __init__(self, shape):
super(EqualsConstraintComponent, self).__init__(shape)
property_compare_set = set(self.shape.objects(SH_equals))
if len(property_compare_set) < 1:
raise ConstraintLoadError(
"EqualsConstraintComponent must have at least one sh:equals predicate.",
"https://www.w3.org/TR/shacl/#EqualsConstraintComponent",
)
self.property_compare_set = property_compare_set
@classmethod
def constraint_parameters(cls):
return [SH_equals]
@classmethod
def constraint_name(cls):
return "EqualsConstraintComponent"
def make_generic_messages(self, datagraph: GraphLike, focus_node, value_node) -> List[rdflib.Literal]:
if len(self.property_compare_set) < 2:
m = "Value of {}->{} != {}".format(
stringify_node(datagraph, focus_node),
stringify_node(self.shape.sg.graph, next(iter(self.property_compare_set))),
stringify_node(datagraph, value_node),
)
else:
rules = ", ".join(stringify_node(self.shape.sg.graph, p) for p in self.property_compare_set)
m = "Value of {}->{} != {}".format(
stringify_node(datagraph, focus_node), rules, stringify_node(datagraph, value_node)
)
return [rdflib.Literal(m)]
def evaluate(self, target_graph: GraphLike, focus_value_nodes: Dict, _evaluation_path: List):
"""
:type target_graph: rdflib.Graph
:type focus_value_nodes: dict
:type _evaluation_path: list
"""
reports = []
non_conformant = False
for eq in iter(self.property_compare_set):
_nc, _r = self._evaluate_property_equals(eq, target_graph, focus_value_nodes)
non_conformant = non_conformant or _nc
reports.extend(_r)
return (not non_conformant), reports
def _evaluate_property_equals(self, eq, target_graph, f_v_dict):
reports = []
non_conformant = False
for f, value_nodes in f_v_dict.items():
value_node_set = set(value_nodes)
compare_values = set(target_graph.objects(f, eq))
value_nodes_missing = value_node_set.difference(compare_values)
compare_values_missing = compare_values.difference(value_node_set)
if len(value_nodes_missing) > 0 or len(compare_values_missing) > 0:
non_conformant = True
else:
continue
for value_node in value_nodes_missing:
rept = self.make_v_result(target_graph, f, value_node=value_node)
reports.append(rept)
for compare_value in compare_values_missing:
rept = self.make_v_result(target_graph, f, value_node=compare_value)
reports.append(rept)
return non_conformant, reports
class DisjointConstraintComponent(ConstraintComponent):
"""
sh:disjoint specifies the condition that the set of value nodes is disjoint with the set of objects of the triples that have the focus node as subject and the value of sh:disjoint as predicate.
Link:
https://www.w3.org/TR/shacl/#DisjointConstraintComponent
Textual Definition:
For each value node that also exists as a value of the property $disjoint at the focus node, there is a validation result with the value node as sh:value.
"""
shacl_constraint_component = SH_DisjointConstraintComponent
def __init__(self, shape):
super(DisjointConstraintComponent, self).__init__(shape)
property_compare_set = set(self.shape.objects(SH_disjoint))
if len(property_compare_set) < 1:
raise ConstraintLoadError(
"DisjointConstraintComponent must have at least one sh:disjoint predicate.",
"https://www.w3.org/TR/shacl/#DisjointConstraintComponent",
)
self.property_compare_set = property_compare_set
@classmethod
def constraint_parameters(cls):
return [SH_disjoint]
@classmethod
def constraint_name(cls):
return "DisjointConstraintComponent"
def make_generic_messages(self, datagraph: GraphLike, focus_node, value_node) -> List[rdflib.Literal]:
if len(self.property_compare_set) < 2:
m = "Value of {}->{} == {}".format(
stringify_node(datagraph, focus_node),
stringify_node(self.shape.sg.graph, next(iter(self.property_compare_set))),
stringify_node(datagraph, value_node),
)
else:
rules = ", ".join(stringify_node(self.shape.sg.graph, p) for p in self.property_compare_set)
m = "Value of {}->{} == {}".format(
stringify_node(datagraph, focus_node), rules, stringify_node(datagraph, value_node)
)
return [rdflib.Literal(m)]
def evaluate(self, target_graph: GraphLike, focus_value_nodes: Dict, _evaluation_path: List):
"""
:type target_graph: rdflib.Graph
:type focus_value_nodes: dict
:type _evaluation_path: list
"""
reports = []
non_conformant = False
for dj in iter(self.property_compare_set):
_nc, _r = self._evaluate_property_disjoint(dj, target_graph, focus_value_nodes)
non_conformant = non_conformant or _nc
reports.extend(_r)
return (not non_conformant), reports
def _evaluate_property_disjoint(self, dj, target_graph, f_v_dict):
reports = []
non_conformant = False
for f, value_nodes in f_v_dict.items():
value_node_set = set(value_nodes)
compare_values = set(target_graph.objects(f, dj))
common_nodes = value_node_set.intersection(compare_values)
if len(common_nodes) > 0:
non_conformant = True
else:
continue
for common_node in common_nodes:
rept = self.make_v_result(target_graph, f, value_node=common_node)
reports.append(rept)
return non_conformant, reports
class LessThanConstraintComponent(ConstraintComponent):
"""
sh:lessThan specifies the condition that each value node is smaller than all the objects of the triples that have the focus node as subject and the value of sh:lessThan as predicate.
Link:
https://www.w3.org/TR/shacl/#LessThanConstraintComponent
Textual Definition:
For each pair of value nodes and the values of the property $lessThan at the given focus node where the first value is not less than the second value (based on SPARQL's < operator) or where the two values cannot be compared, there is a validation result with the value node as sh:value.
"""
shacl_constraint_component = SH_LessThanConstraintComponent
def __init__(self, shape):
super(LessThanConstraintComponent, self).__init__(shape)
property_compare_set = set(self.shape.objects(SH_lessThan))
if len(property_compare_set) < 1:
raise ConstraintLoadError(
"LessThanConstraintComponent must have at least one sh:lessThan predicate.",
"https://www.w3.org/TR/shacl/#LessThanConstraintComponent",
)
if not shape.is_property_shape:
raise ConstraintLoadError(
"LessThanConstraintComponent can only be present on a PropertyShape, not a NodeShape.",
"https://www.w3.org/TR/shacl/#LessThanConstraintComponent",
)
self.property_compare_set = property_compare_set
@classmethod
def constraint_parameters(cls):
return [SH_lessThan]
@classmethod
def constraint_name(cls):
return "LessThanConstraintComponent"
def make_generic_messages(self, datagraph: GraphLike, focus_node, value_node) -> List[rdflib.Literal]:
if len(self.property_compare_set) < 2:
m = "Value of {}->{} <= {}".format(
stringify_node(datagraph, focus_node),
stringify_node(self.shape.sg.graph, next(iter(self.property_compare_set))),
stringify_node(datagraph, value_node),
)
else:
rules = ", ".join(stringify_node(self.shape.sg.graph, p) for p in self.property_compare_set)
m = "Value of {}->{} <= {}".format(
stringify_node(datagraph, focus_node), rules, stringify_node(datagraph, value_node)
)
return [rdflib.Literal(m)]
def evaluate(self, target_graph: GraphLike, focus_value_nodes: Dict, _evaluation_path: List):
"""
:type target_graph: rdflib.Graph
:type focus_value_nodes: dict
:type _evaluation_path: list
"""
reports = []
non_conformant = False
for lt in iter(self.property_compare_set):
if isinstance(lt, rdflib.Literal) or isinstance(lt, rdflib.BNode):
raise ReportableRuntimeError("Value of sh:lessThan MUST be a URI Identifier.")
_nc, _r = self._evaluate_less_than(lt, target_graph, focus_value_nodes)
non_conformant = non_conformant or _nc
reports.extend(_r)
return (not non_conformant), reports
def _evaluate_less_than(self, lt, target_graph, f_v_dict):
reports = []
non_conformant = False
for f, value_nodes in f_v_dict.items():
value_node_set = set(value_nodes)
compare_values = set(target_graph.objects(f, lt))
for value_node in iter(value_node_set):
if isinstance(value_node, rdflib.BNode):
raise ReportableRuntimeError("Cannot use sh:lessThan to compare a BlankNode.")
value_is_string = False
orig_value_node = value_node
if isinstance(value_node, rdflib.URIRef):
value_node = str(value_node)
value_is_string = True
elif isinstance(value_node, rdflib.Literal) and isinstance(value_node.value, str):
value_node = value_node.value
value_is_string = True
for compare_value in compare_values:
if isinstance(compare_value, rdflib.BNode):
raise ReportableRuntimeError("Cannot use sh:lessThan to compare a BlankNode.")
compare_is_string = False
if isinstance(compare_value, rdflib.URIRef):
compare_value = str(compare_value)
compare_is_string = True
elif isinstance(compare_value, rdflib.Literal) and isinstance(compare_value.value, str):
compare_value = compare_value.value
compare_is_string = True
if (value_is_string and not compare_is_string) or (compare_is_string and not value_is_string):
non_conformant = True
elif not value_node < compare_value:
non_conformant = True
else:
continue
rept = self.make_v_result(target_graph, f, value_node=orig_value_node)
reports.append(rept)
return non_conformant, reports
class LessThanOrEqualsConstraintComponent(ConstraintComponent):
"""
sh:lessThanOrEquals specifies the condition that each value node is smaller than or equal to all the objects of the triples that have the focus node as subject and the value of sh:lessThanOrEquals as predicate.
Link:
https://www.w3.org/TR/shacl/#LessThanOrEqualsConstraintComponent
Textual Definition:
For each pair of value nodes and the values of the property $lessThanOrEquals at the given focus node where the first value is not less than or equal to the second value (based on SPARQL's <= operator) or where the two values cannot be compared, there is a validation result with the value node as sh:value.
"""
shacl_constraint_component = SH_LessThanOrEqualsConstraintComponent
def __init__(self, shape):
super(LessThanOrEqualsConstraintComponent, self).__init__(shape)
property_compare_set = set(self.shape.objects(SH_lessThanOrEquals))
if len(property_compare_set) < 1:
raise ConstraintLoadError(
"LessThanOrEqualsConstraintComponent must have at least one sh:lessThanOrEquals predicate.",
"https://www.w3.org/TR/shacl/#LessThanOrEqualsConstraintComponent",
)
if not shape.is_property_shape:
raise ConstraintLoadError(
"LessThanOrEqualsConstraintComponent can only be present on a PropertyShape, not a NodeShape.",
"https://www.w3.org/TR/shacl/#LessThanOrEqualsConstraintComponent",
)
self.property_compare_set = property_compare_set
@classmethod
def constraint_parameters(cls):
return [SH_lessThanOrEquals]
@classmethod
def constraint_name(cls):
return "LessThanOrEqualsConstraintComponent"
def make_generic_messages(self, datagraph: GraphLike, focus_node, value_node) -> List[rdflib.Literal]:
if len(self.property_compare_set) < 2:
m = "Value of {}->{} < {}".format(
stringify_node(datagraph, focus_node),
stringify_node(self.shape.sg.graph, next(iter(self.property_compare_set))),
stringify_node(datagraph, value_node),
)
else:
rules = ", ".join(stringify_node(self.shape.sg.graph, p) for p in self.property_compare_set)
m = "Value of {}->{} < {}".format(
stringify_node(datagraph, focus_node), rules, stringify_node(datagraph, value_node)
)
return [rdflib.Literal(m)]
def evaluate(self, target_graph: GraphLike, focus_value_nodes: Dict, _evaluation_path: List):
"""
:type target_graph: rdflib.Graph
:type focus_value_nodes: dict
:type _evaluation_path: list
"""
reports = []
non_conformant = False
for lt in iter(self.property_compare_set):
if isinstance(lt, rdflib.Literal) or isinstance(lt, rdflib.BNode):
raise ReportableRuntimeError("Value of sh:lessThanOrEquals MUST be a URI Identifier.")
_nc, _r = self._evaluate_ltoe(lt, target_graph, focus_value_nodes)
non_conformant = non_conformant or _nc
reports.extend(_r)
return (not non_conformant), reports
def _evaluate_ltoe(self, lt, target_graph, f_v_dict):
reports = []
non_conformant = False
for f, value_nodes in f_v_dict.items():
value_node_set = set(value_nodes)
compare_values = set(target_graph.objects(f, lt))
for value_node in iter(value_node_set):
if isinstance(value_node, rdflib.BNode):
raise ReportableRuntimeError("Cannot use sh:lessThanOrEquals to compare a BlankNode.")
value_is_string = False
orig_value_node = value_node
if isinstance(value_node, rdflib.URIRef):
value_node = str(value_node)
value_is_string = True
elif isinstance(value_node, rdflib.Literal) and isinstance(value_node.value, str):
value_node = value_node.value
value_is_string = True
for compare_value in compare_values:
if isinstance(compare_value, rdflib.BNode):
raise ReportableRuntimeError("Cannot use sh:lessThanOrEquals to compare a BlankNode.")
compare_is_string = False
if isinstance(compare_value, rdflib.URIRef):
compare_value = str(compare_value)
compare_is_string = True
elif isinstance(compare_value, rdflib.Literal) and isinstance(compare_value.value, str):
compare_value = compare_value.value
compare_is_string = True
if (value_is_string and not compare_is_string) or (compare_is_string and not value_is_string):
non_conformant = True
elif not value_node <= compare_value:
non_conformant = True
else:
continue
rept = self.make_v_result(target_graph, f, value_node=orig_value_node)
reports.append(rept)
return non_conformant, reports
| 46.608696
| 311
| 0.647827
| 2,089
| 18,224
| 5.410244
| 0.085208
| 0.049372
| 0.050964
| 0.038931
| 0.813307
| 0.803575
| 0.776854
| 0.719607
| 0.686516
| 0.657406
| 0
| 0.001741
| 0.275077
| 18,224
| 390
| 312
| 46.728205
| 0.853758
| 0.143821
| 0
| 0.69338
| 0
| 0
| 0.102514
| 0.026379
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083624
| false
| 0
| 0.02439
| 0.027875
| 0.205575
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
53e39d1a08020602570ee1c5bc546f6a213adb6e
| 16,401
|
py
|
Python
|
experiments/cifar_exp/plot_cifar_all_algo.py
|
jdey4/progressive-learning
|
410b3525ab63e1f7c32e9838460b2c9af7b9d256
|
[
"Apache-2.0"
] | 1
|
2022-01-03T12:36:28.000Z
|
2022-01-03T12:36:28.000Z
|
experiments/cifar_exp/plot_cifar_all_algo.py
|
jdey4/progressive-learning
|
410b3525ab63e1f7c32e9838460b2c9af7b9d256
|
[
"Apache-2.0"
] | null | null | null |
experiments/cifar_exp/plot_cifar_all_algo.py
|
jdey4/progressive-learning
|
410b3525ab63e1f7c32e9838460b2c9af7b9d256
|
[
"Apache-2.0"
] | null | null | null |
#%%
import pickle
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
import numpy as np
import pandas as pd
from itertools import product
import seaborn as sns
import matplotlib.gridspec as gridspec
#%%
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def get_fte_bte(err, single_err):
bte = [[] for i in range(10)]
te = [[] for i in range(10)]
fte = []
for i in range(10):
for j in range(i,10):
#print(err[j][i],j,i)
bte[i].append(err[i][i]/err[j][i])
te[i].append(single_err[i]/err[j][i])
for i in range(10):
fte.append(single_err[i]/err[i][i])
return fte,bte,te
def calc_mean_bte(btes,task_num=10,reps=6):
mean_bte = [[] for i in range(task_num)]
for j in range(task_num):
tmp = 0
for i in range(reps):
tmp += np.array(btes[i][j])
tmp=tmp/reps
mean_bte[j].extend(tmp)
return mean_bte
def calc_mean_te(tes,task_num=10,reps=6):
mean_te = [[] for i in range(task_num)]
for j in range(task_num):
tmp = 0
for i in range(reps):
tmp += np.array(tes[i][j])
tmp=tmp/reps
mean_te[j].extend(tmp)
return mean_te
def calc_mean_fte(ftes,task_num=10,reps=6):
fte = np.asarray(ftes)
return list(np.mean(np.asarray(fte),axis=0))
def get_error_matrix(filename):
multitask_df, single_task_df = unpickle(filename)
err = [[] for _ in range(10)]
for ii in range(10):
err[ii].extend(
1 - np.array(
multitask_df[multitask_df['base_task']==ii+1]['accuracy']
)
)
single_err = 1 - np.array(single_task_df['accuracy'])
return single_err, err
def stratified_scatter(te_dict,axis_handle,s,color):
algo = list(te_dict.keys())
total_alg = len(algo)
total_points = len(te_dict[algo[0]])
pivot_points = np.arange(-.25, (total_alg+1)*1, step=1)
interval = .7/(total_points-1)
for algo_no,alg in enumerate(algo):
for no,points in enumerate(te_dict[alg]):
axis_handle.scatter(
pivot_points[algo_no]+interval*no,
te_dict[alg][no],
s=s,
c=color[algo_no]
)
#%%
### MAIN HYPERPARAMS ###
ntrees = 10
slots = 10
task_num = 10
shifts = 6
total_alg = 9
alg_name = ['L2N','L2F','L2F-','Prog-NN', 'DF-CNN','LwF','EWC','O-EWC','SI']
model_file_500 = ['dnn0','fixed_uf10','uf10','Prog_NN','DF_CNN', 'LwF','EWC', 'Online_EWC', 'SI']
model_file_5000 = ['dnn0','fixed_uf5000_40','uf5000_40','Prog_NN','DF_CNN', 'LwF','EWC', 'Online_EWC', 'SI']
btes_500 = [[] for i in range(total_alg)]
ftes_500 = [[] for i in range(total_alg)]
tes_500 = [[] for i in range(total_alg)]
btes_5000 = [[] for i in range(total_alg)]
ftes_5000 = [[] for i in range(total_alg)]
tes_5000 = [[] for i in range(total_alg)]
########################
#%% code for 5000 samples
reps = shifts
for alg in range(total_alg):
count = 0
te_tmp = [[] for _ in range(reps)]
bte_tmp = [[] for _ in range(reps)]
fte_tmp = [[] for _ in range(reps)]
for shift in range(shifts):
if alg < 3:
filename = 'result/result/'+model_file_5000[alg]+'_'+str(shift+1)+'_0'+'.pickle'
else:
filename = 'benchmarking_algorthms_result/'+model_file_5000[alg]+'_'+str(shift+1)+'.pickle'
multitask_df, single_task_df = unpickle(filename)
single_err, err = get_error_matrix(filename)
fte, bte, te = get_fte_bte(err,single_err)
te_tmp[count].extend(te)
bte_tmp[count].extend(bte)
fte_tmp[count].extend(fte)
count+=1
tes_5000[alg].extend(calc_mean_te(te_tmp,reps=reps))
btes_5000[alg].extend(calc_mean_bte(bte_tmp,reps=reps))
ftes_5000[alg].extend(calc_mean_fte(fte_tmp,reps=reps))
#%% code for 500 samples
reps = slots*shifts
for alg in range(total_alg):
count = 0
te_tmp = [[] for _ in range(reps)]
bte_tmp = [[] for _ in range(reps)]
fte_tmp = [[] for _ in range(reps)]
for slot in range(slots):
for shift in range(shifts):
if alg < 3:
filename = 'result/result/'+model_file_500[alg]+'_'+str(shift+1)+'_'+str(slot)+'.pickle'
else:
filename = 'benchmarking_algorthms_result/'+model_file_500[alg]+'_'+str(shift+1)+'_'+str(slot)+'.pickle'
multitask_df, single_task_df = unpickle(filename)
single_err, err = get_error_matrix(filename)
fte, bte, te = get_fte_bte(err,single_err)
te_tmp[count].extend(te)
bte_tmp[count].extend(bte)
fte_tmp[count].extend(fte)
count+=1
tes_500[alg].extend(calc_mean_te(te_tmp,reps=reps))
btes_500[alg].extend(calc_mean_bte(bte_tmp,reps=reps))
ftes_500[alg].extend(calc_mean_fte(fte_tmp,reps=reps))
#%%
te_500 = {'L2N':np.zeros(10,dtype=float), 'L2F':np.zeros(10,dtype=float),'L2Fc':np.zeros(10,dtype=float), 'Prog-NN':np.zeros(10,dtype=float), 'DF-CNN':np.zeros(10,dtype=float), 'LwF':np.zeros(10,dtype=float),'EWC':np.zeros(10,dtype=float), 'Online EWC':np.zeros(10,dtype=float), 'SI':np.zeros(10,dtype=float)}
for count,name in enumerate(te_500.keys()):
for i in range(10):
te_500[name][i] = tes_500[count][i][9-i]
df_500 = pd.DataFrame.from_dict(te_500)
df_500 = pd.melt(df_500,var_name='Algorithms', value_name='Transfer Efficieny')
'''mean_te = {'L2N':[np.mean(te['L2N'])],'L2F':[np.mean(te['L2F'])], 'L2Fc':[np.mean(te['L2Fc'])],
'Prog-NN':[np.mean(te['Prog-NN'])],'DF-CNN':[np.mean(te['DF-CNN'])],
'LwF':[np.mean(te['LwF'])],'EWC':[np.mean(te['EWC'])],
'Online EWC':[np.mean(te['Online EWC'])], 'SI':[np.mean(te['SI'])]
}
mean_df = pd.DataFrame.from_dict(mean_te)
mean_df = pd.melt(mean_df,var_name='Algorithms', value_name='Transfer Efficieny')'''
#%%
te_5000 = {'L2N':np.zeros(10,dtype=float), 'L2F':np.zeros(10,dtype=float),'L2Fc':np.zeros(10,dtype=float), 'Prog-NN':np.zeros(10,dtype=float), 'DF-CNN':np.zeros(10,dtype=float), 'LwF':np.zeros(10,dtype=float),'EWC':np.zeros(10,dtype=float), 'Online EWC':np.zeros(10,dtype=float), 'SI':np.zeros(10,dtype=float)}
for count,name in enumerate(te_5000.keys()):
for i in range(10):
te_5000[name][i] = tes_5000[count][i][9-i]
df_5000 = pd.DataFrame.from_dict(te_5000)
df_5000 = pd.melt(df_5000,var_name='Algorithms', value_name='Transfer Efficieny')
#%%
clr = ["#00008B", "#e41a1c", "#e41a1c", "#a65628", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00", "#CCCC00"]
c = sns.color_palette(clr, n_colors=len(clr))
fontsize=24
ticksize=20
fig, ax = plt.subplots(2,2, figsize=(14.5,12))
fig.tight_layout(pad=12.0)
# plt.subplots_adjust(right=0.5)
for i, fte in enumerate(ftes_500):
if i == 0:
ax[0][0].plot(np.arange(1,11), fte, color=clr[i], marker='.', markersize=12, label=alg_name[i], linewidth=3)
continue
if i == 1:
ax[0][0].plot(np.arange(1,11), fte, color=clr[i], marker='.', markersize=12, label=alg_name[i], linewidth=3)
continue
if i == 2:
ax[0][0].plot(np.arange(1,11), fte, color=clr[i], marker='.', linestyle='dashed', markersize=12, label=alg_name[i], linewidth=3)
continue
ax[0][0].plot(np.arange(1,11), fte, color=clr[i], marker='.', markersize=12, label=alg_name[i])
ax[0][0].set_xticks(np.arange(1,11))
ax[0][0].set_yticks([0.9, 1, 1.1, 1.2, 1.3,1.4])
ax[0][0].set_ylim(0.85, 1.41)
ax[0][0].tick_params(labelsize=ticksize)
# ax[0].legend(algos, loc='upper left', fontsize=14)
# ax[0].legend(algos, bbox_to_anchor=(1.2, -.2), loc=2, borderaxespad=0)
ax[0][0].set_ylabel('Forward Transfer Efficiency', fontsize=fontsize)
ax[0][0].set_xlabel('Number of tasks seen', fontsize=fontsize)
#ax[0][0].grid(axis='x')
for i in range(task_num - 1):
et = np.zeros((total_alg,task_num-i))
for j in range(0,total_alg):
et[j,:] = np.asarray(btes_500[j][i])
ns = np.arange(i + 1, task_num + 1)
for j in range(0,total_alg):
if j == 0:
if i == 0:
ax[0][1].plot(ns, et[j,:], marker='.', markersize=8, label = alg_name[j], color=clr[j], linewidth = 3)
else:
ax[0][1].plot(ns, et[j,:], marker='.', markersize=8, color=clr[j], linewidth = 3)
elif j == 1:
if i == 0:
ax[0][1].plot(ns, et[j,:], marker='.', markersize=8, label = alg_name[j], color=clr[j], linewidth = 3)
else:
ax[0][1].plot(ns, et[j,:], marker='.', markersize=8, color=clr[j], linewidth = 3)
elif j==2:
if i == 0:
ax[0][1].plot(ns, et[j,:], marker='.', markersize=8, label = alg_name[j], color=clr[j], linestyle='dashed', linewidth = 3)
else:
ax[0][1].plot(ns, et[j,:], marker='.', markersize=8, color=clr[j], linestyle='dashed', linewidth = 3)
else:
if i == 0:
ax[0][1].plot(ns, et[j,:], marker='.', markersize=8, label = alg_name[j], color=clr[j])
else:
ax[0][1].plot(ns, et[j,:], marker='.', markersize=8, color=clr[j])
# ax[1].set_title(ttle, fontsize=20)
ax[0][1].set_xlabel('Number of tasks seen', fontsize=fontsize)
ax[0][1].set_ylabel('Backward Transfer Efficiency', fontsize=fontsize)
# ax.set_ylim(0.05 - 0.01, 0.5 + 0.01)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# ax[1].legend(loc='upper left', fontsize=12)
#ax[0][1].legend(loc='center left', bbox_to_anchor=(1,0.5), fontsize=22)
ax[0][1].set_yticks([.4,.6,.8,.9,1, 1.1,1.2])
ax[0][1].set_xticks(np.arange(1,11))
ax[0][1].set_ylim(0.85, 1.19)
ax[0][1].tick_params(labelsize=ticksize)
#ax[0][1].grid(axis='x')
right_side = ax[0][0].spines["right"]
right_side.set_visible(False)
top_side = ax[0][0].spines["top"]
top_side.set_visible(False)
right_side = ax[0][1].spines["right"]
right_side.set_visible(False)
top_side = ax[0][1].spines["top"]
top_side.set_visible(False)
ax[0][0].hlines(1, 1,10, colors='grey', linestyles='dashed',linewidth=1.5)
ax[0][1].hlines(1, 1,10, colors='grey', linestyles='dashed',linewidth=1.5)
######################################
for i, fte in enumerate(ftes_5000):
if i == 0:
ax[1][0].plot(np.arange(1,11), fte, color=clr[i], marker='.', markersize=12, label=alg_name[i], linewidth=3)
continue
if i == 1:
ax[1][0].plot(np.arange(1,11), fte, color=clr[i], marker='.', markersize=12, label=alg_name[i], linewidth=3)
continue
if i == 2:
ax[1][0].plot(np.arange(1,11), fte, color=clr[i], marker='.', linestyle='dashed', markersize=12, label=alg_name[i], linewidth=3)
continue
ax[1][0].plot(np.arange(1,11), fte, color=clr[i], marker='.', markersize=12, label=alg_name[i])
ax[1][0].set_xticks(np.arange(1,11))
ax[1][0].set_yticks([0.9, 1, 1.1, 1.2, 1.3,1.4])
ax[1][0].set_ylim(0.85, 1.41)
ax[1][0].tick_params(labelsize=ticksize)
# ax[0].legend(algos, loc='upper left', fontsize=14)
# ax[0].legend(algos, bbox_to_anchor=(1.2, -.2), loc=2, borderaxespad=0)
ax[1][0].set_ylabel('Forward Transfer Efficiency', fontsize=fontsize)
ax[1][0].set_xlabel('Number of tasks seen', fontsize=fontsize)
#ax[0][0].grid(axis='x')
for i in range(task_num - 1):
et = np.zeros((total_alg,task_num-i))
for j in range(0,total_alg):
et[j,:] = np.asarray(btes_5000[j][i])
ns = np.arange(i + 1, task_num + 1)
for j in range(0,total_alg):
if j == 0:
if i == 0:
ax[1][1].plot(ns, et[j,:], marker='.', markersize=8, label = alg_name[j], color=clr[j], linewidth = 3)
else:
ax[1][1].plot(ns, et[j,:], marker='.', markersize=8, color=clr[j], linewidth = 3)
elif j == 1:
if i == 0:
ax[1][1].plot(ns, et[j,:], marker='.', markersize=8, label = alg_name[j], color=clr[j], linewidth = 3)
else:
ax[1][1].plot(ns, et[j,:], marker='.', markersize=8, color=clr[j], linewidth = 3)
elif j==2:
if i == 0:
ax[1][1].plot(ns, et[j,:], marker='.', markersize=8, label = alg_name[j], color=clr[j], linestyle='dashed', linewidth = 3)
else:
ax[1][1].plot(ns, et[j,:], marker='.', markersize=8, color=clr[j], linestyle='dashed', linewidth = 3)
else:
if i == 0:
ax[1][1].plot(ns, et[j,:], marker='.', markersize=8, label = alg_name[j], color=clr[j])
else:
ax[1][1].plot(ns, et[j,:], marker='.', markersize=8, color=clr[j])
# ax[1].set_title(ttle, fontsize=20)
ax[1][1].set_xlabel('Number of tasks seen', fontsize=fontsize)
ax[1][1].set_ylabel('Backward Transfer Efficiency', fontsize=fontsize)
# ax.set_ylim(0.05 - 0.01, 0.5 + 0.01)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# ax[1].legend(loc='upper left', fontsize=12)
#ax[0][1].legend(loc='center left', bbox_to_anchor=(1,0.5), fontsize=22)
ax[1][1].set_yticks([.4,.6,.8,.9,1, 1.1,1.2])
ax[1][1].set_xticks(np.arange(1,11))
ax[1][1].set_ylim(0.85, 1.19)
ax[1][1].tick_params(labelsize=ticksize)
#ax[0][1].grid(axis='x')
right_side = ax[1][0].spines["right"]
right_side.set_visible(False)
top_side = ax[1][0].spines["top"]
top_side.set_visible(False)
right_side = ax[1][1].spines["right"]
right_side.set_visible(False)
top_side = ax[1][1].spines["top"]
top_side.set_visible(False)
ax[1][0].hlines(1, 1,10, colors='grey', linestyles='dashed',linewidth=1.5)
ax[1][1].hlines(1, 1,10, colors='grey', linestyles='dashed',linewidth=1.5)
#plt.tight_layout()
#ax[0][1].legend(loc='upper center', bbox_to_anchor=(0.5, -0.3),
# fancybox=True, shadow=True, ncol=3,fontsize=15)
ax[0][1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=18)
# lgd = fig.legend(algos, bbox_to_anchor=(1, 0.45), loc='center left', fontsize=18)
plt.savefig('result/figs/benchmark.pdf', dpi=500)
#%%
fig, ax = plt.subplots(1,2, figsize=(12,6))
ax[0].tick_params(labelsize=22)
#ax_ = sns.stripplot(x="Algorithms", y="Transfer Efficieny", data=df, palette=c, size=6, ax=ax[1][1])
ax[0].hlines(1, -1,8, colors='grey', linestyles='dashed',linewidth=1.5)
#sns.boxplot(x="Algorithms", y="Transfer Efficieny", data=mean_df, palette=c, linewidth=3, ax=ax[1][1])
ax_=sns.pointplot(x="Algorithms", y="Transfer Efficieny", data=df_500, join=False, color='grey', linewidth=1.5, ci='sd',ax=ax[0])
#ax_.set_yticks([.4,.6,.8,1, 1.2,1.4])
ax_.set_xlabel('', fontsize=fontsize)
ax[0].set_ylabel('Final Transfer Efficiency', fontsize=fontsize)
ax_.set_xticklabels(
['L2N','L2F','L2F-','Prog-NN','DF-CNN','LwF','EWC','O-EWC','SI'],
fontsize=16,rotation=45,ha="right",rotation_mode='anchor'
)
stratified_scatter(te_500,ax[0],10,c)
right_side = ax[0].spines["right"]
right_side.set_visible(False)
top_side = ax[0].spines["top"]
top_side.set_visible(False)
ax[0].hlines(1, 1,9, colors='grey', linestyles='dashed',linewidth=1.5)
ax[1].tick_params(labelsize=22)
#ax_ = sns.stripplot(x="Algorithms", y="Transfer Efficieny", data=df, palette=c, size=6, ax=ax[1][1])
ax[1].hlines(1, -1,8, colors='grey', linestyles='dashed',linewidth=1.5)
#sns.boxplot(x="Algorithms", y="Transfer Efficieny", data=mean_df, palette=c, linewidth=3, ax=ax[1][1])
ax_=sns.pointplot(x="Algorithms", y="Transfer Efficieny", data=df_5000, join=False, color='grey', linewidth=1.5, ci='sd',ax=ax[1])
#ax_.set_yticks([.4,.6,.8,1, 1.2,1.4])
ax_.set_xlabel('', fontsize=fontsize)
ax[1].set_ylabel('Final Transfer Efficiency', fontsize=fontsize)
ax_.set_xticklabels(
['L2N','L2F','L2F-','Prog-NN','DF-CNN','LwF','EWC','O-EWC','SI'],
fontsize=16,rotation=45,ha="right",rotation_mode='anchor'
)
stratified_scatter(te_5000,ax[1],10,c)
right_side = ax[1].spines["right"]
right_side.set_visible(False)
top_side = ax[1].spines["top"]
top_side.set_visible(False)
ax[1].hlines(1, 1,9, colors='grey', linestyles='dashed',linewidth=1.5)
plt.savefig('result/figs/final_TE.pdf', dpi=500)
# %%
| 36.446667
| 310
| 0.607646
| 2,735
| 16,401
| 3.516636
| 0.095064
| 0.015596
| 0.009565
| 0.020586
| 0.805261
| 0.778956
| 0.762321
| 0.720732
| 0.6911
| 0.659597
| 0
| 0.064793
| 0.184135
| 16,401
| 449
| 311
| 36.52784
| 0.653987
| 0.111152
| 0
| 0.407407
| 0
| 0
| 0.083423
| 0.007785
| 0.003367
| 0
| 0
| 0
| 0
| 1
| 0.023569
| false
| 0
| 0.026936
| 0
| 0.070707
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
53f1606593a923659bc78d0d52a9dc7a073dfada
| 178
|
py
|
Python
|
hacmec/__init__.py
|
joernheissler/hacmec
|
1a28abd4211619dae85def7cc6b49cbc80e9c5c6
|
[
"MIT"
] | null | null | null |
hacmec/__init__.py
|
joernheissler/hacmec
|
1a28abd4211619dae85def7cc6b49cbc80e9c5c6
|
[
"MIT"
] | null | null | null |
hacmec/__init__.py
|
joernheissler/hacmec
|
1a28abd4211619dae85def7cc6b49cbc80e9c5c6
|
[
"MIT"
] | null | null | null |
VERSION = '0.0.3'
ENDPOINT_LETSENCRYPT = "https://acme-v02.api.letsencrypt.org/directory"
ENDPOINT_LETSENCRYPT_STAGING = "https://acme-staging-v02.api.letsencrypt.org/directory"
| 44.5
| 87
| 0.792135
| 24
| 178
| 5.75
| 0.5
| 0.275362
| 0.246377
| 0.289855
| 0.42029
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04142
| 0.050562
| 178
| 3
| 88
| 59.333333
| 0.775148
| 0
| 0
| 0
| 0
| 0
| 0.589888
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
53f2447fab6825c35d1177a4a32de6e8e5da34af
| 23
|
py
|
Python
|
recoverid/cities/models/__init__.py
|
C3-Zally/api-python
|
4e64ad345d59daa32e750d5f786f2185533f3b38
|
[
"MIT"
] | null | null | null |
recoverid/cities/models/__init__.py
|
C3-Zally/api-python
|
4e64ad345d59daa32e750d5f786f2185533f3b38
|
[
"MIT"
] | 1
|
2020-08-12T01:26:08.000Z
|
2020-08-12T01:26:08.000Z
|
recoverid/cities/models/__init__.py
|
C3-Zally/api-python
|
4e64ad345d59daa32e750d5f786f2185533f3b38
|
[
"MIT"
] | null | null | null |
from .city import City
| 11.5
| 22
| 0.782609
| 4
| 23
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
54ef935f94ece42860db040ce04757cdf093cd86
| 10,671
|
py
|
Python
|
data_pre/reg_preprocess_example/preprocess_brain35.py
|
norveclibalikci/easyreg-mirror
|
a16254733fe957cc4024923f8dce91412966a189
|
[
"Apache-2.0"
] | null | null | null |
data_pre/reg_preprocess_example/preprocess_brain35.py
|
norveclibalikci/easyreg-mirror
|
a16254733fe957cc4024923f8dce91412966a189
|
[
"Apache-2.0"
] | null | null | null |
data_pre/reg_preprocess_example/preprocess_brain35.py
|
norveclibalikci/easyreg-mirror
|
a16254733fe957cc4024923f8dce91412966a189
|
[
"Apache-2.0"
] | null | null | null |
"""
A demo on data augmentation and segmentation for brain dataset
"""
import os, sys
sys.path.insert(0,os.path.abspath('..'))
sys.path.insert(0,os.path.abspath('.'))
sys.path.insert(0,os.path.abspath('../easyreg'))
from data_pre.file_tool import get_file_list
from easyreg.reg_data_utils import read_txt_into_list, write_list_into_txt
from data_pre.seg_data_pool import BaseSegDataSet
def find_corr_label(img_path_list,label_root_path=None,label_switch=None):
get_par_folder_name = lambda x: os.path.split(os.path.split(os.path.split(x)[0])[0])[-1]
fname_list = [get_par_folder_name(path) for path in img_path_list]
label_path_list = [get_file_list('/playpen-raid1/Data/annotation',fname+"*"+".nii.gz")[0] for fname in fname_list]
if label_root_path is not None:
label_path_list = [path.replace(os.path.split(path)[0],label_root_path) for path in label_path_list]
return label_path_list
def get_file_name( img_path):
get_par_folder_path = lambda x: os.path.split(os.path.split(x)[0])[0]
file_name = os.path.split(get_par_folder_path(img_path))[-1]
return file_name
dataset = BaseSegDataSet(file_type_list=["T1w_acpc_dc_restore.nii.gz"])
data_path = "/playpen-raid1/Data/Brain35"
output_path ='/playpen-raid1/zyshen/data/brain_35/corrected'
divided_ratio = (0.6,0.1,0.3)
dataset.set_data_path(data_path)
dataset.find_corr_label = find_corr_label
dataset.get_file_name = get_file_name
dataset.set_output_path(output_path)
dataset.set_divided_ratio(divided_ratio)
dataset.img_after_resize = (200,240,200)
dataset.prepare_data()
from easyreg.aug_utils import gen_post_aug_pair_list
train_file_path = "/playpen-raid1/zyshen/data/brain_35/corrected/train/file_path_list.txt"
test_file_path = "/playpen-raid1/zyshen/data/brain_35/corrected/test/file_path_list.txt"
train_name_path = "/playpen-raid1/zyshen/data/brain_35/corrected/train/file_name_list.txt"
test_name_path = "/playpen-raid1/zyshen/data/brain_35/corrected/test/file_name_list.txt"
output_file_path = "/playpen-raid1/zyshen/data/brain_35/corrected/test_aug_path_list.txt"
output_name_path = "/playpen-raid1/zyshen/data/brain_35/corrected/test_aug_name_list.txt"
train_path_list = read_txt_into_list(train_file_path)
test_path_list = read_txt_into_list(test_file_path)
train_name_list = read_txt_into_list(train_name_path)
test_name_list = read_txt_into_list(test_name_path)
test_img_path_list = [path[0] for path in test_path_list]
test_label_path_list = [path[1] for path in test_path_list]
if isinstance(train_path_list[0],list):
train_img_path_list = [path[0] for path in train_path_list]
train_label_path_list = [path[1] for path in train_path_list]
else:
train_img_path_list = train_path_list
train_label_path_list = None
img_pair_list, pair_name_list = gen_post_aug_pair_list(test_img_path_list,train_img_path_list, test_fname_list=test_name_list,train_fname_list=train_name_list,
test_label_path_list=test_label_path_list,train_label_path_list=train_label_path_list, pair_num_limit=-1, per_num_limit=5)
pair_name_list = [pair_name[1:] for pair_name in pair_name_list]
write_list_into_txt(output_file_path,img_pair_list)
write_list_into_txt(output_name_path,pair_name_list)
train_aug_output_path = "/playpen-raid1/zyshen/data/brain_35/corrected/data_aug_train"
train_aug_output_full_path = train_aug_output_path+"/aug"
output_folder = "/playpen-raid1/zyshen/data/brain_35/corrected/seg_aug_train_k2/train"
os.makedirs(output_folder,exist_ok=True)
output_path = os.path.join(output_folder,"file_path_list.txt")
train_aug_img_list = get_file_list(train_aug_output_full_path,"*_image.nii.gz")
train_aug_label_list = [path.replace("_image.nii.gz","_label.nii.gz") for path in train_aug_img_list]
img_label_path_list = [[img_path, label_path] for img_path, label_path in zip(train_aug_img_list,train_aug_label_list)]
write_list_into_txt(output_path,img_label_path_list)
#
train_aug_output_path = "/playpen-raid1/zyshen/data/brain_35/corrected/data_aug_train_random"
train_aug_output_full_path = train_aug_output_path+"/aug"
output_folder = "/playpen-raid1/zyshen/data/brain_35/corrected/seg_aug_train_random/train"
os.makedirs(output_folder,exist_ok=True)
output_path = os.path.join(output_folder,"file_path_list.txt")
train_aug_img_list = get_file_list(train_aug_output_full_path,"*_image.nii.gz")
train_aug_label_list = [path.replace("_image.nii.gz","_label.nii.gz") for path in train_aug_img_list]
img_label_path_list = [[img_path, label_path] for img_path, label_path in zip(train_aug_img_list,train_aug_label_list)]
write_list_into_txt(output_path,img_label_path_list)
train_aug_output_path = "/playpen-raid1/zyshen/data/brain_35/corrected/data_aug_train_bspline"
train_aug_output_full_path = train_aug_output_path+"/aug"
output_folder = "/playpen-raid1/zyshen/data/brain_35/corrected/seg_aug_train_bspline/train"
os.makedirs(output_folder,exist_ok=True)
output_path = os.path.join(output_folder,"file_path_list.txt")
train_aug_img_list = get_file_list(train_aug_output_full_path,"*_image.nii.gz")
train_aug_label_list = [path.replace("_image.nii.gz","_label.nii.gz") for path in train_aug_img_list]
img_label_path_list = [[img_path, label_path] for img_path, label_path in zip(train_aug_img_list,train_aug_label_list)]
write_list_into_txt(output_path,img_label_path_list)
"""
training phase augmentation
python demo_for_data_aug.py --file_txt=/playpen-raid1/zyshen/data/brain_35/corrected/train/file_path_list.txt --name_txt=/playpen-raid1/zyshen/data/brain_35/corrected/train/file_name_list.txt --txt_format=aug_by_file --setting_folder_path=/playpen-raid/zyshen/reg_clean/demo/demo_settings/data_aug/opt_lddmm_brain35 --task_output_path=/playpen-raid1/zyshen/data/brain_35/corrected/data_aug_train --gpu_id_list 2 3 2 3
testing phase augmentation
python demo_for_data_aug.py --file_txt=/playpen-raid1/zyshen/data/brain_35/corrected/test_aug_path_list.txt --name_txt=/playpen-raid1/zyshen/data/brain_35/corrected/test_aug_name_list.txt --txt_format=aug_by_line --setting_folder_path=/playpen-raid/zyshen/reg_clean/demo/demo_settings/data_aug/opt_lddmm_brain35_postaug --task_output_path=/playpen-raid1/zyshen/data/brain_35/corrected/data_aug_test --gpu_id_list 0 1 2 3 0 1 2 3
training phase augmentation (random)
python gen_aug_samples.py -t=/playpen-raid1/zyshen/data/brain_35/corrected/train/file_path_list.txt -as=/playpen-raid/zyshen/reg_clean/demo/demo_settings/data_aug/rand_lddmm_brain35_random/data_aug_setting.json -ms=/playpen-raid/zyshen/reg_clean/demo/demo_settings/data_aug/rand_lddmm_brain35_random/mermaid_nonp_settings.json -o=/playpen-raid1/zyshen/data/brain_35/corrected/data_aug_train_random/aug -g=0
testing phase augmentation (random)
python gen_aug_samples.py -t=/playpen-raid1/zyshen/data/brain_35/corrected/test/file_path_list.txt -as=/playpen-raid/zyshen/reg_clean/demo/demo_settings/data_aug/rand_lddmm_brain35_postaug_random/data_aug_setting.json -ms=/playpen-raid/zyshen/reg_clean/demo/demo_settings/data_aug/rand_lddmm_brain35_random/mermaid_nonp_settings.json -o=/playpen-raid1/zyshen/data/brain_35/corrected/data_aug_test_random/aug -g=1
training phase augmentation (bspline)
python gen_aug_samples.py -t=/playpen-raid1/zyshen/data/brain_35/corrected/train/file_path_list.txt -as=/playpen-raid/zyshen/reg_clean/demo/demo_settings/data_aug/rand_bspline_brain35/data_aug_setting.json --bspline -o=/playpen-raid1/zyshen/data/brain_35/corrected/data_aug_train_bspline/aug
train segmentation without aug
python demo_for_seg_train.py -o /playpen-raid1/zyshen/data/brain_35 -dtn=corrected -tn=custom_seg -ts=/playpen-raid/zyshen/reg_clean/debug/brain35/seg_train -g=0
train segmentation with aug
python demo_for_seg_train.py -o /playpen-raid1/zyshen/data/brain_35/corrected -dtn=seg_aug_train_k2 -tn=aug_seg -ts=/playpen-raid/zyshen/reg_clean/debug/brain35/seg_train -g=1
train segmentation with aug random
python demo_for_seg_train.py -o /playpen-raid1/zyshen/data/brain_35/corrected -dtn=seg_aug_train_random -tn=aug_seg -ts=/playpen-raid/zyshen/reg_clean/debug/brain35/seg_train -g=2
train segmentation with aug bspline
python demo_for_seg_train.py -o /playpen-raid1/zyshen/data/brain_35/corrected -dtn=seg_aug_train_bspline -tn=aug_seg -ts=/playpen-raid/zyshen/reg_clean/debug/brain35/seg_train -g=3
test segmentation without aug
python demo_for_seg_eval.py -ts=/playpen-raid/zyshen/reg_clean/debug/brain35/seg_eval -txt=/playpen-raid1/zyshen/data/brain_35/corrected/test/file_path_list.txt -m=/playpen-raid1/zyshen/data/brain_35/corrected/custom_seg/checkpoints/model_best.pth.tar -o=/playpen-raid1/zyshen/data/brain_35/corrected/custom_seg_res -g=0
test segmentation with training aug
python demo_for_seg_eval.py -ts=/playpen-raid/zyshen/reg_clean/debug/brain35/seg_eval -txt=/playpen-raid1/zyshen/data/brain_35/corrected/test/file_path_list.txt -m=/playpen-raid1/zyshen/data/brain_35/corrected/seg_aug_train_k2/aug_seg/checkpoints/epoch_150_ -o=/playpen-raid1/zyshen/data/brain_35/corrected/seg_aug_train_k2_res_epoch150 -g=1
test segmentation with training_random aug
python demo_for_seg_eval.py -ts=/playpen-raid/zyshen/reg_clean/debug/brain35/seg_eval -txt=/playpen-raid1/zyshen/data/brain_35/corrected/test/file_path_list.txt -m=/playpen-raid1/zyshen/data/brain_35/corrected/seg_aug_train_random/aug_seg/checkpoints/model_best.pth.tar -o=/playpen-raid1/zyshen/data/brain_35/corrected/seg_aug_train_random_res -g=2
test segmentation with bspline aug
python demo_for_seg_eval.py -ts=/playpen-raid/zyshen/reg_clean/debug/brain35/seg_eval -txt=/playpen-raid1/zyshen/data/brain_35/corrected/test/file_path_list.txt -m=/playpen-raid1/zyshen/data/brain_35/corrected/seg_aug_train_bspline/aug_seg/checkpoints/model_best.pth.tar -o=/playpen-raid1/zyshen/data/brain_35/corrected/seg_aug_train_bspline_res -g=3
test segmentation with training testing aug
python demo_for_seg_eval.py -ts=/playpen-raid/zyshen/reg_clean/debug/brain35/seg_eval_aug -txt=/playpen-raid1/zyshen/data/brain_35/corrected/test/file_path_list.txt -m=/playpen-raid1/zyshen/data/brain_35/corrected/seg_aug_train_k2/aug_seg/checkpoints/epoch_150_ -o=/playpen-raid1/zyshen/data/brain_35/corrected/seg_aug_train_and_test_res_trainedk2testk2 -g=2
test segmentation with training testing random_aug
python demo_for_seg_eval.py -ts=/playpen-raid/zyshen/reg_clean/debug/brain35/seg_eval_aug_random -txt=/playpen-raid1/zyshen/data/brain_35/corrected/test/file_path_list.txt -m=/playpen-raid1/zyshen/data/brain_35/corrected/seg_aug_train_random/aug_seg/checkpoints/model_best.pth.tar -o=/playpen-raid1/zyshen/data/brain_35/corrected/seg_aug_train_and_test_random_res -g=2
"""
| 65.067073
| 428
| 0.830569
| 1,865
| 10,671
| 4.370509
| 0.080965
| 0.048092
| 0.103791
| 0.126856
| 0.79389
| 0.769722
| 0.743344
| 0.723224
| 0.707398
| 0.692553
| 0
| 0.025284
| 0.05857
| 10,671
| 163
| 429
| 65.466258
| 0.786084
| 0.00581
| 0
| 0.2625
| 0
| 0
| 0.219545
| 0.180266
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.0625
| 0
| 0.1125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
54fa8aba69d224d4910f4868277e2c72af6dcc07
| 1,567
|
py
|
Python
|
Data Collection/test.py
|
YooInKeun/CAU_CSE_Capstone_3
|
51405c4bed2b55661aa0708c8acea17fe72aa701
|
[
"MIT"
] | 6
|
2019-12-07T07:30:34.000Z
|
2022-01-20T14:26:44.000Z
|
Data Collection/test.py
|
YooInKeun/CAU_CSE_Capstone_3
|
51405c4bed2b55661aa0708c8acea17fe72aa701
|
[
"MIT"
] | 9
|
2019-12-28T06:18:53.000Z
|
2022-01-13T01:54:21.000Z
|
Data Collection/test.py
|
YooInKeun/CAU_CSE_Capstone_3
|
51405c4bed2b55661aa0708c8acea17fe72aa701
|
[
"MIT"
] | 1
|
2020-05-21T15:55:45.000Z
|
2020-05-21T15:55:45.000Z
|
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
import json
req = Request("https://www.dabangapp.com/search#/map?filters=%7B%22multi_room_type%22%3A%5B0%2C1%2C2%5D%2C%22selling_type%22%3A%5B0%2C1%2C2%5D%2C%22deposit_range%22%3A%5B0%2C999999%5D%2C%22price_range%22%3A%5B0%2C999999%5D%2C%22trade_range%22%3A%5B0%2C999999%5D%2C%22maintenance_cost_range%22%3A%5B0%2C999999%5D%2C%22include_maintenance_option1%22%3Atrue%2C%22room_size%22%3A%5B0%2C999999%5D%2C%22supply_space_range%22%3A%5B0%2C999999%5D%2C%22room_floor_multi%22%3A%5B1%2C2%2C3%2C4%2C5%2C6%2C7%2C-1%2C0%5D%2C%22division%22%3Afalse%2C%22duplex%22%3Afalse%2C%22room_type%22%3A%5B1%2C2%5D%2C%22enter_date_range%22%3A%5B0%2C999999%5D%2C%22parking_average_range%22%3A%5B0%2C999999%5D%2C%22household_num_range%22%3A%5B0%2C999999%5D%2C%22parking%22%3Afalse%2C%22animal%22%3Afalse%2C%22short_lease%22%3Afalse%2C%22full_option%22%3Afalse%2C%22built_in%22%3Afalse%2C%22elevator%22%3Afalse%2C%22balcony%22%3Afalse%2C%22loan%22%3Afalse%2C%22pano%22%3Afalse%2C%22deal_type%22%3A%5B0%2C1%5D%7D&position=%7B%22location%22%3A%5B%5B126.84998760716898%2C37.41464989903129%5D%2C%5B127.12956613898%2C37.715102046666125%5D%5D%2C%22center%22%3A%5B126.98949617689095%2C37.5649606036606%5D%2C%22zoom%22%3A9%7D&search=%7B%22id%22%3A%22%22%2C%22type%22%3A%22%22%2C%22name%22%3A%22%22%7D&tab=all", headers={'User-Agent': 'Mozilla/5.0'})
main_html = urlopen(req).read()
soup = BeautifulSoup(main_html, "html.parser")
print(soup)
raws = soup.find_all("ul", {"class": "styled__Ul-ityzo6-5 fxRDHg"})
print(raws)
| 156.7
| 1,309
| 0.804722
| 290
| 1,567
| 4.241379
| 0.393103
| 0.061789
| 0.068293
| 0.109756
| 0.247967
| 0.220325
| 0.204878
| 0.087805
| 0
| 0
| 0
| 0.296224
| 0.019783
| 1,567
| 10
| 1,310
| 156.7
| 0.504557
| 0
| 0
| 0
| 0
| 0.111111
| 0.840561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
070091cb6032a03981c635127be648be10409990
| 396
|
py
|
Python
|
pydlm/modeler/__init__.py
|
dtolpin/pydlm
|
0016876bf6357784b161ecaa4f0798c063c54785
|
[
"BSD-3-Clause"
] | 423
|
2016-09-15T06:45:26.000Z
|
2022-03-29T08:41:11.000Z
|
pydlm/modeler/__init__.py
|
dtolpin/pydlm
|
0016876bf6357784b161ecaa4f0798c063c54785
|
[
"BSD-3-Clause"
] | 50
|
2016-09-14T19:45:49.000Z
|
2021-07-26T17:04:10.000Z
|
pydlm/modeler/__init__.py
|
dtolpin/pydlm
|
0016876bf6357784b161ecaa4f0798c063c54785
|
[
"BSD-3-Clause"
] | 99
|
2016-09-19T08:08:41.000Z
|
2022-03-07T13:47:36.000Z
|
# this module defines the tools for modeling
# __all__ = ['trends', 'seasonality', 'dynamic', 'autoReg', 'longSeason', 'builder']
# import pydlm.modeler.trends as trends
# import pydlm.modeler.seasonality as seasonality
# import pydlm.modeler.dynamic as dynamic
# import pydlm.modeler.builder as builder
# import pydlm.modeler.autoReg as autoReg
# import pydlm.modeler.longSeason as longSeason
| 36
| 84
| 0.775253
| 50
| 396
| 6.06
| 0.36
| 0.217822
| 0.356436
| 0.165017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123737
| 396
| 10
| 85
| 39.6
| 0.873199
| 0.95202
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
072a71d75db0b0ec2d3543e74e5467cc36515ff2
| 48
|
py
|
Python
|
code/arc018_1_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | 3
|
2019-08-16T16:55:48.000Z
|
2021-04-11T10:21:40.000Z
|
code/arc018_1_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
code/arc018_1_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
H,B=map(float,input().split())
print(H**2*B/1e4)
| 24
| 30
| 0.645833
| 11
| 48
| 2.818182
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 0.020833
| 48
| 2
| 31
| 24
| 0.595745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
073d01b92cf7075e32fa92b1e9a3711372694558
| 262
|
py
|
Python
|
bitmovin_api_sdk/encoding/encodings/streams/burn_in_subtitles/dvbsub/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/encoding/encodings/streams/burn_in_subtitles/dvbsub/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/encoding/encodings/streams/burn_in_subtitles/dvbsub/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
from bitmovin_api_sdk.encoding.encodings.streams.burn_in_subtitles.dvbsub.dvbsub_api import DvbsubApi
from bitmovin_api_sdk.encoding.encodings.streams.burn_in_subtitles.dvbsub.burn_in_subtitle_dvb_sub_list_query_params import BurnInSubtitleDvbSubListQueryParams
| 87.333333
| 159
| 0.923664
| 36
| 262
| 6.277778
| 0.555556
| 0.079646
| 0.132743
| 0.159292
| 0.557522
| 0.557522
| 0.557522
| 0.557522
| 0.557522
| 0.557522
| 0
| 0
| 0.030534
| 262
| 2
| 160
| 131
| 0.889764
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
074da2fca1e6ab8fdfc1fb5622ebec670d995ad1
| 68
|
py
|
Python
|
abc/abc090/abc090a.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | 1
|
2019-08-21T00:49:34.000Z
|
2019-08-21T00:49:34.000Z
|
abc/abc090/abc090a.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
abc/abc090/abc090a.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
c = [input() for _ in range(3)]
print(c[0][0] + c[1][1] + c[2][2])
| 17
| 34
| 0.470588
| 16
| 68
| 1.9375
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127273
| 0.191176
| 68
| 3
| 35
| 22.666667
| 0.436364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
4acbd9dfce233d444086e0991dca3677d10668f6
| 229
|
py
|
Python
|
typeidea/base/funtion.py
|
birdywings/typeidea
|
d7ce276a7a823b4a9d50bf57edc07e002aa08863
|
[
"MIT"
] | 1
|
2018-08-28T06:26:18.000Z
|
2018-08-28T06:26:18.000Z
|
typeidea/base/funtion.py
|
birdywings/typeidea
|
d7ce276a7a823b4a9d50bf57edc07e002aa08863
|
[
"MIT"
] | 2
|
2020-03-10T10:21:22.000Z
|
2021-06-10T20:52:02.000Z
|
typeidea/base/funtion.py
|
birdywings/typeidea
|
d7ce276a7a823b4a9d50bf57edc07e002aa08863
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
def value_judge(request, *args): # 查看客户端有没有漏传字段
for i in args:
if i not in request.data or request.data.get(i) == '' or request.data.get(i) is None:
return False
return True
| 25.444444
| 93
| 0.598253
| 35
| 229
| 3.885714
| 0.628571
| 0.242647
| 0.191176
| 0.235294
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005988
| 0.270742
| 229
| 8
| 94
| 28.625
| 0.808383
| 0.148472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
4ae9786ca8ffcb225ebb57110db7cc8df8a943f5
| 29
|
py
|
Python
|
frds/data/wrds/execucomp/__init__.py
|
mgao6767/wrds
|
7dca2651a181bf38c61ebde675c9f64d6c96f608
|
[
"MIT"
] | 31
|
2020-06-17T13:19:12.000Z
|
2022-03-27T08:56:38.000Z
|
frds/data/wrds/execucomp/__init__.py
|
mgao6767/wrds
|
7dca2651a181bf38c61ebde675c9f64d6c96f608
|
[
"MIT"
] | null | null | null |
frds/data/wrds/execucomp/__init__.py
|
mgao6767/wrds
|
7dca2651a181bf38c61ebde675c9f64d6c96f608
|
[
"MIT"
] | 8
|
2020-06-14T15:21:51.000Z
|
2021-09-29T06:28:53.000Z
|
from .anncomp import Anncomp
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ab613c70006f712e3afdac76d87e9315e1feec69
| 68
|
py
|
Python
|
maya/python/dl_bifrost_utils/__init__.py
|
Mikfr83/bifrost-dl-core
|
a5660076f6f76cdcd95b73c63c521f6056b05123
|
[
"MIT"
] | 55
|
2021-02-08T05:17:28.000Z
|
2022-01-28T18:04:43.000Z
|
maya/python/dl_bifrost_utils/__init__.py
|
Mikfr83/bifrost-dl-core
|
a5660076f6f76cdcd95b73c63c521f6056b05123
|
[
"MIT"
] | 27
|
2021-02-13T08:05:46.000Z
|
2021-12-07T07:32:39.000Z
|
maya/python/dl_bifrost_utils/__init__.py
|
Mikfr83/bifrost-dl-core
|
a5660076f6f76cdcd95b73c63c521f6056b05123
|
[
"MIT"
] | 6
|
2021-04-10T06:42:43.000Z
|
2022-02-15T07:17:56.000Z
|
from . import group_utils
# import the ui package
from . import ui
| 13.6
| 25
| 0.75
| 11
| 68
| 4.545455
| 0.636364
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 68
| 4
| 26
| 17
| 0.925926
| 0.308824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
db495df5f6aac6c091c0e550ebcf37eccc8aa643
| 83
|
py
|
Python
|
Lab_6/Task_4.py
|
spencerperley/CPE_101
|
9ae3c5a0042780f824de5edee275b35cdb0bbaec
|
[
"MIT"
] | 1
|
2022-01-12T21:48:23.000Z
|
2022-01-12T21:48:23.000Z
|
Lab_6/Task_4.py
|
spencerperley/CPE_101
|
9ae3c5a0042780f824de5edee275b35cdb0bbaec
|
[
"MIT"
] | null | null | null |
Lab_6/Task_4.py
|
spencerperley/CPE_101
|
9ae3c5a0042780f824de5edee275b35cdb0bbaec
|
[
"MIT"
] | null | null | null |
def groups_of_3(myList):
return [myList[0:3],myList[3:6],myList[6:len(myList)]]
| 41.5
| 58
| 0.698795
| 16
| 83
| 3.5
| 0.5625
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 0.084337
| 83
| 2
| 58
| 41.5
| 0.657895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
db5a4045cb12c42a179327b9e4f4b6d30c6d831e
| 834
|
py
|
Python
|
space-age/space_age.py
|
ederst/exercism-python
|
8791f145ff4ce1a3b78ac3566fbe428ce3a3bd7b
|
[
"Unlicense"
] | 1
|
2021-06-25T16:09:02.000Z
|
2021-06-25T16:09:02.000Z
|
space-age/space_age.py
|
ederst/exercism-python
|
8791f145ff4ce1a3b78ac3566fbe428ce3a3bd7b
|
[
"Unlicense"
] | 1
|
2021-05-17T23:45:29.000Z
|
2021-05-17T23:46:01.000Z
|
space-age/space_age.py
|
ederst/exercism-python
|
8791f145ff4ce1a3b78ac3566fbe428ce3a3bd7b
|
[
"Unlicense"
] | null | null | null |
class SpaceAge:
def __init__(self, seconds: float):
self.seconds = seconds
def _space_age(self, ratio: float = 1.0, ndigits: int = 2) -> float:
return round(self.seconds / 31557600.0 / ratio, ndigits)
def on_mercury(self) -> float:
return self._space_age(0.2408467)
def on_venus(self) -> float:
return self._space_age(0.61519726)
def on_earth(self) -> float:
return self._space_age()
def on_mars(self) -> float:
return self._space_age(1.8808158)
def on_jupiter(self) -> float:
return self._space_age(11.862615)
def on_saturn(self) -> float:
return self._space_age(29.447498)
def on_uranus(self) -> float:
return self._space_age(84.016846)
def on_neptune(self) -> float:
return self._space_age(164.79132)
| 26.0625
| 72
| 0.635492
| 116
| 834
| 4.310345
| 0.327586
| 0.144
| 0.24
| 0.304
| 0.436
| 0.436
| 0.112
| 0
| 0
| 0
| 0
| 0.109873
| 0.247002
| 834
| 31
| 73
| 26.903226
| 0.686306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.47619
| false
| 0
| 0
| 0.428571
| 0.952381
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
db96a3880d6623c7a65c679c54a2181a9c18f50a
| 11
|
py
|
Python
|
login.py
|
LiuTongred/test
|
90d9b09f2c9c29143f2ca47691ccdef28010949a
|
[
"MIT"
] | null | null | null |
login.py
|
LiuTongred/test
|
90d9b09f2c9c29143f2ca47691ccdef28010949a
|
[
"MIT"
] | null | null | null |
login.py
|
LiuTongred/test
|
90d9b09f2c9c29143f2ca47691ccdef28010949a
|
[
"MIT"
] | null | null | null |
num = 2222
| 5.5
| 10
| 0.636364
| 2
| 11
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0.272727
| 11
| 1
| 11
| 11
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
dbdca5e84d423d50b740bd0c4363098894318379
| 38
|
py
|
Python
|
simplpy/list_/__init__.py
|
frankhart2018/simplpy
|
a9e8781f9cc8ba9578d6ec786d58e349cba9c52a
|
[
"MIT"
] | 1
|
2021-02-15T11:36:47.000Z
|
2021-02-15T11:36:47.000Z
|
simplpy/list_/__init__.py
|
frankhart2018/simplpy
|
a9e8781f9cc8ba9578d6ec786d58e349cba9c52a
|
[
"MIT"
] | null | null | null |
simplpy/list_/__init__.py
|
frankhart2018/simplpy
|
a9e8781f9cc8ba9578d6ec786d58e349cba9c52a
|
[
"MIT"
] | null | null | null |
from simplpy.list_.list_func import *
| 19
| 37
| 0.815789
| 6
| 38
| 4.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.852941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
915e48f7f8846a80b3fd111683669a6dfc582b29
| 107
|
py
|
Python
|
starkbank/utilitypayment/__init__.py
|
thalesmello/sdk-python
|
fe897883b5c91948e812cfaa6ac176edcf0f9290
|
[
"MIT"
] | null | null | null |
starkbank/utilitypayment/__init__.py
|
thalesmello/sdk-python
|
fe897883b5c91948e812cfaa6ac176edcf0f9290
|
[
"MIT"
] | null | null | null |
starkbank/utilitypayment/__init__.py
|
thalesmello/sdk-python
|
fe897883b5c91948e812cfaa6ac176edcf0f9290
|
[
"MIT"
] | null | null | null |
from .__utilitypayment import create, get, pdf, query, delete
from .log.__log import Log
from . import log
| 26.75
| 61
| 0.775701
| 16
| 107
| 4.9375
| 0.5625
| 0.227848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149533
| 107
| 3
| 62
| 35.666667
| 0.868132
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
918a523cfa56dd7f4925fcb00762460b23a1f614
| 173
|
py
|
Python
|
BDProjectsSessionsMonitor/__init__.py
|
bond-anton/SPAdminTools
|
1bc12f773b8fb7b96a64348d0b807ee4807fd5a0
|
[
"Apache-2.0"
] | null | null | null |
BDProjectsSessionsMonitor/__init__.py
|
bond-anton/SPAdminTools
|
1bc12f773b8fb7b96a64348d0b807ee4807fd5a0
|
[
"Apache-2.0"
] | null | null | null |
BDProjectsSessionsMonitor/__init__.py
|
bond-anton/SPAdminTools
|
1bc12f773b8fb7b96a64348d0b807ee4807fd5a0
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division, print_function
from BDProjectsSessionsMonitor.Application import SPSMApplication
from BDProjectsSessionsMonitor.AboutWindow import _version
| 43.25
| 65
| 0.907514
| 16
| 173
| 9.4375
| 0.6875
| 0.384106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075145
| 173
| 3
| 66
| 57.666667
| 0.94375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
91ac180c9ec1fe60d487d0f499cfe5393071236e
| 10,888
|
py
|
Python
|
swiftbrowser/tests/test_utils.py
|
OLRC/django-swiftbrowser
|
cfc6fa44f8eeda5e66db81cce39b5340fc5a898a
|
[
"Apache-2.0"
] | 1
|
2021-09-06T12:31:27.000Z
|
2021-09-06T12:31:27.000Z
|
swiftbrowser/tests/test_utils.py
|
OLRC/django-swiftbrowser
|
cfc6fa44f8eeda5e66db81cce39b5340fc5a898a
|
[
"Apache-2.0"
] | null | null | null |
swiftbrowser/tests/test_utils.py
|
OLRC/django-swiftbrowser
|
cfc6fa44f8eeda5e66db81cce39b5340fc5a898a
|
[
"Apache-2.0"
] | null | null | null |
from django.test import TestCase
import swiftbrowser.views
from swiftbrowser.utils import *
class SplitAclTest(TestCase):
def setUp(self):
# Create an empty
self.expected = {
"users": [],
"referrers": [],
"rlistings": False,
"public": False,
}
def test_empty(self):
'''When no ACL is set, the returned dictionary should have empty
lists and false rlistings and public.'''
acl = ""
split = split_acl(acl)
self.assertEqual(0, len(split["users"]))
self.assertEqual(0, len(split["referrers"]))
self.assertFalse(split["rlistings"])
self.assertFalse(split["public"])
def test_user_single(self):
'''Test when one user is on the acl.'''
acl = "tenant:user"
split = split_acl(acl)
self.assertEqual("tenant:user", split["users"][0])
self.assertEqual(1, len(split["users"]))
self.assertEqual(0, len(split["referrers"]))
self.assertFalse(split["rlistings"])
self.assertFalse(split["public"])
def test_user_multiple(self):
'''Test multiple users on the acl'''
acl = "tenant:user1,tenant:user2,tenant:user3,user4"
split = split_acl(acl)
self.assertEqual(4, len(split))
self.assertEqual("tenant:user1", split["users"][0])
self.assertEqual("tenant:user2", split["users"][1])
self.assertEqual("tenant:user3", split["users"][2])
self.assertEqual("user4", split["users"][3])
self.assertEqual(0, len(split["referrers"]))
self.assertFalse(split["rlistings"])
self.assertFalse(split["public"])
def test_referrers_single(self):
'''Test single referer on the acl'''
acl = ".r:example.com"
split = split_acl(acl)
self.assertEqual(1, len(split["referrers"]))
self.assertEqual("example.com", split["referrers"][0])
self.assertEqual(0, len(split["users"]))
self.assertFalse(split["rlistings"])
self.assertFalse(split["public"])
def test_referrers_multiple(self):
'''Test multiple referrers on acl'''
acl = ".r:example.com,.r:domain.com,.r:swiftbrowser.com,.r:abc.com"
split = split_acl(acl)
self.assertEqual(4, len(split["referrers"]))
self.assertEqual("example.com", split["referrers"][0])
self.assertEqual("domain.com", split["referrers"][1])
self.assertEqual("swiftbrowser.com", split["referrers"][2])
self.assertEqual("abc.com", split["referrers"][3])
self.assertEqual(0, len(split["users"]))
self.assertFalse(split["rlistings"])
self.assertFalse(split["public"])
def test_rlisting(self):
'''Test case where rlisting is set.'''
acl = ".rlistings"
split = split_acl(acl)
self.assertTrue(split["rlistings"])
self.assertEqual(0, len(split["users"]))
self.assertEqual(0, len(split["referrers"]))
self.assertFalse(split["public"])
def test_public(self):
'''Test when the container is set to public.'''
acl = ".r:*"
split = split_acl(acl)
self.assertTrue(split["public"])
self.assertEqual(0, len(split["users"]))
self.assertEqual(0, len(split["referrers"]))
self.assertFalse(split["rlistings"])
def test_public_rlistings(self):
'''Test when a container is set to public and has rlistings is set.'''
acl = ".r:*,.rlistings"
split = split_acl(acl)
self.assertEqual(0, len(split["users"]))
self.assertEqual(0, len(split["referrers"]))
self.assertTrue(split["rlistings"])
self.assertTrue(split["public"])
def test_public_multiple_referrers(self):
'''Test when a container is set to public and has multiple referrers.
'''
acl = ".r:*,.r:domain.com,.r:abc.com"
split = split_acl(acl)
self.assertEqual(0, len(split["users"]))
self.assertEqual(2, len(split["referrers"]))
self.assertEqual("domain.com", split["referrers"][0])
self.assertEqual("abc.com", split["referrers"][1])
self.assertFalse(split["rlistings"])
self.assertTrue(split["public"])
def test_public_multiple_users(self):
'''Test when a container is set to public and has multiple users.'''
acl = ".r:*,tenant:user,user2,user3,tenant:user4"
split = split_acl(acl)
self.assertEqual(4, len(split["users"]))
self.assertEqual("tenant:user", split["users"][0])
self.assertEqual("user2", split["users"][1])
self.assertEqual("user3", split["users"][2])
self.assertEqual("tenant:user4", split["users"][3])
self.assertEqual(0, len(split["referrers"]))
self.assertFalse(split["rlistings"])
self.assertTrue(split["public"])
def test_rlistings_multiple_referrers(self):
'''Test when a container has rlistings set and multiple referrers.'''
acl = ".rlistings,.r:domain.com,.r:abc.com,.r:example.com"
split = split_acl(acl)
self.assertEqual(0, len(split["users"]))
self.assertEqual(3, len(split["referrers"]))
self.assertEqual("domain.com", split["referrers"][0])
self.assertEqual("abc.com", split["referrers"][1])
self.assertEqual("example.com", split["referrers"][2])
self.assertTrue(split["rlistings"])
self.assertFalse(split["public"])
def test_rlistings_multiple_users(self):
'''Test when a container has rlistings set and multiple users.'''
acl = ".rlistings,user1,user2,tenant:user3"
split = split_acl(acl)
self.assertEqual(3, len(split["users"]))
self.assertEqual("user1", split["users"][0])
self.assertEqual("user2", split["users"][1])
self.assertEqual("tenant:user3", split["users"][2])
self.assertEqual(0, len(split["referrers"]))
self.assertTrue(split["rlistings"])
self.assertFalse(split["public"])
def test_multiple_referrers_multiple_users(self):
'''Test when a container has multiple referrers and multiple users.'''
acl = ".r:domain.com,user1,user2,.r:abc.com,.r:swiftbrowser.com,user3"
split = split_acl(acl)
self.assertEqual(3, len(split["users"]))
self.assertEqual("user1", split["users"][0])
self.assertEqual("user2", split["users"][1])
self.assertEqual("user3", split["users"][2])
self.assertEqual(3, len(split["referrers"]))
self.assertEqual("domain.com", split["referrers"][0])
self.assertEqual("abc.com", split["referrers"][1])
self.assertEqual("swiftbrowser.com", split["referrers"][2])
self.assertFalse(split["rlistings"])
self.assertFalse(split["public"])
def test_public_rlistings_referrers_users(self):
'''Test when a container has public set, rlistings set, multiple
referrers and multiple users.'''
acl = (".r:*,user1,.r:domain.com,user2,.rlistings,"
"user3,.r:domain2.com,.r:abc.com")
split = split_acl(acl)
self.assertEqual(3, len(split["users"]))
self.assertEqual("user1", split["users"][0])
self.assertEqual("user2", split["users"][1])
self.assertEqual("user3", split["users"][2])
self.assertEqual(3, len(split["referrers"]))
self.assertEqual("domain.com", split["referrers"][0])
self.assertEqual("domain2.com", split["referrers"][1])
self.assertEqual("abc.com", split["referrers"][2])
self.assertTrue(split["rlistings"])
self.assertTrue(split["public"])
class GetNonConsecutiveTest(TestCase):
def test_empty(self):
'''Test an empty set.'''
objects = []
self.assertEqual(get_first_nonconsecutive(objects), 1)
def test_segment_one_missing(self):
'''Test when segment number one is missing.'''
objects = ["0002", "0003", "0004", "0005", "0006", "0007", "0008",
"0009", "0010", "0011", "0012", "0013", "0014"]
self.assertEqual(get_first_nonconsecutive(objects), 1)
def test_sequential_numbers(self):
'''Test a list of objects that are perfectly sequential.'''
objects = ["0001", "0002", "0003", "0004", "0005", "0006", "0007",
"0008", "0009", "0010", "0011", "0012", "0013", "0014"]
self.assertEqual(get_first_nonconsecutive(objects), 15)
def test_sequential_numbers_small(self):
'''Test a small list of objects that are perfectly sequential.'''
objects = ["0001", "0002"]
self.assertEqual(get_first_nonconsecutive(objects), 3)
def test_set_size_one(self):
'''Test a list with one digit.'''
objects = ["0001"]
self.assertEqual(get_first_nonconsecutive(objects), 2)
def test_set_size_one_incorrect(self):
'''Test a list with one digit that is not one.'''
objects = ["0002"]
self.assertEqual(get_first_nonconsecutive(objects), 1)
def test_break_in_sequence_after_one(self):
'''Test a set where the break in sequence is after 1.'''
objects = ["0001", "0003", "0004", "0005", "0006", "0007", "0008",
"0009", "0010", "0011", "0012", "0013", "0014"]
self.assertEqual(get_first_nonconsecutive(objects), 2)
def test_break_in_sequence_middle(self):
'''Test a break in sequence in the middle.'''
objects = ["0001", "0002", "0003", "0004", "0005", "0007", "0008",
"0009", "0010", "0011", "0012", "0013", "0014"]
self.assertEqual(get_first_nonconsecutive(objects), 6)
def test_large_break_in_sequence_middle(self):
'''Test a break in sequence in the middle.'''
objects = ["0001", "0002", "0003", "0004", "0005", "0014", "0015",
"0016"]
self.assertEqual(get_first_nonconsecutive(objects), 6)
def test_break_in_sequence_near_end(self):
'''Test a break in sequence at the end.'''
objects = ["0001", "0002", "0003", "0004", "0005", "0006", "0007",
"0008", "0009", "0010", "0011", "0012", "0014"]
self.assertEqual(get_first_nonconsecutive(objects), 13)
def test_duplicate(self):
'''Test when there is a duplicate.'''
objects = ["0001", "0002", "0003", "0004", "0005", "0006", "0006",
"0007", "0008", "0009", "0010", "0011", "0012", "0014"]
self.assertEqual(get_first_nonconsecutive(objects), 7)
def test_duplicate_break(self):
'''Test when there is a duplicate and a break following immediately.'''
objects = ["0001", "0002", "0003", "0004", "0005", "0006", "0006",
"0008", "0009", "0010", "0011", "0012", "0014"]
self.assertEqual(get_first_nonconsecutive(objects), 7)
| 36.659933
| 79
| 0.604151
| 1,276
| 10,888
| 5.069749
| 0.095611
| 0.171588
| 0.05565
| 0.046993
| 0.806771
| 0.783274
| 0.750502
| 0.690833
| 0.669501
| 0.603494
| 0
| 0.063327
| 0.229886
| 10,888
| 296
| 80
| 36.783784
| 0.708169
| 0.120775
| 0
| 0.57672
| 0
| 0.010582
| 0.194038
| 0.041693
| 0
| 0
| 0
| 0
| 0.539683
| 1
| 0.142857
| false
| 0
| 0.015873
| 0
| 0.169312
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
91b722d3944117600c07825b9d44f869f27b23da
| 5,555
|
py
|
Python
|
tests/test_cli.py
|
danvalen1/waybackpy
|
4b61b6ecd6eb4ae2f607afcfd3309ef048dd4a32
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
danvalen1/waybackpy
|
4b61b6ecd6eb4ae2f607afcfd3309ef048dd4a32
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
danvalen1/waybackpy
|
4b61b6ecd6eb4ae2f607afcfd3309ef048dd4a32
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import os
import pytest
import argparse
sys.path.append("..")
import waybackpy.cli as cli # noqa: E402
from waybackpy.wrapper import Url # noqa: E402
from waybackpy.__version__ import __version__
codecov_python = False
if sys.version_info > (3, 7):
codecov_python = True
# Namespace(day=None, get=None, hour=None, minute=None, month=None, near=False,
# newest=False, oldest=False, save=False, total=False, url=None, user_agent=None, version=False, year=None)
if codecov_python:
def test_save():
args = argparse.Namespace(user_agent=None, url="https://pypi.org/user/akamhy/", total=False, version=False,
oldest=False, save=True, newest=False, near=False, alive=False, subdomain=False, known_urls=False, get=None)
reply = cli.args_handler(args)
assert "pypi.org/user/akamhy" in reply
def test_oldest():
args = argparse.Namespace(user_agent=None, url="https://pypi.org/user/akamhy/", total=False, version=False,
oldest=True, save=False, newest=False, near=False, alive=False, subdomain=False, known_urls=False, get=None)
reply = cli.args_handler(args)
assert "pypi.org/user/akamhy" in reply
def test_newest():
args = argparse.Namespace(user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9", url="https://pypi.org/user/akamhy/", total=False, version=False,
oldest=False, save=False, newest=True, near=False, alive=False, subdomain=False, known_urls=False, get=None)
reply = cli.args_handler(args)
assert "pypi.org/user/akamhy" in reply
def test_total_archives():
args = argparse.Namespace(user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9", url="https://pypi.org/user/akamhy/", total=True, version=False,
oldest=False, save=False, newest=False, near=False, alive=False, subdomain=False, known_urls=False, get=None)
reply = cli.args_handler(args)
assert isinstance(reply, int)
def test_known_urls():
args = argparse.Namespace(user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9", url="https://akamhy.github.io", total=False, version=False,
oldest=False, save=False, newest=False, near=False, alive=True, subdomain=True, known_urls=True, get=None)
reply = cli.args_handler(args)
assert "github" in reply
def test_near():
args = argparse.Namespace(user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9", url="https://pypi.org/user/akamhy/", total=False, version=False,
oldest=False, save=False, newest=False, near=True, alive=False, subdomain=False, known_urls=False, get=None, year=2020, month=7, day=15, hour=1, minute=1)
reply = cli.args_handler(args)
assert "202007" in reply
def test_get():
args = argparse.Namespace(user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9", url="https://pypi.org/user/akamhy/", total=False, version=False,
oldest=False, save=False, newest=False, near=False, alive=False, subdomain=False, known_urls=False, get="url")
reply = cli.args_handler(args)
assert "waybackpy" in reply
args = argparse.Namespace(user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9", url="https://pypi.org/user/akamhy/", total=False, version=False,
oldest=False, save=False, newest=False, near=False, alive=False, subdomain=False, known_urls=False, get="oldest")
reply = cli.args_handler(args)
assert "waybackpy" in reply
args = argparse.Namespace(user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9", url="https://pypi.org/user/akamhy/", total=False, version=False,
oldest=False, save=False, newest=False, near=False, alive=False, subdomain=False, known_urls=False, get="newest")
reply = cli.args_handler(args)
assert "waybackpy" in reply
if codecov_python:
args = argparse.Namespace(user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9", url="https://pypi.org/user/akamhy/", total=False, version=False,
oldest=False, save=False, newest=False, near=False, alive=False, subdomain=False, known_urls=False, get="save")
reply = cli.args_handler(args)
assert "waybackpy" in reply
args = argparse.Namespace(user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 \
(KHTML, like Gecko) Version/8.0.8 Safari/600.8.9", url="https://pypi.org/user/akamhy/", total=False, version=False,
oldest=False, save=False, newest=False, near=False, alive=False, subdomain=False, known_urls=False, get="BullShit")
reply = cli.args_handler(args)
assert "get the source code of the" in reply
def test_args_handler():
args = argparse.Namespace(version=True)
reply = cli.args_handler(args)
assert ("waybackpy version %s" % (__version__)) == reply
args = argparse.Namespace(url=None, version=False)
reply = cli.args_handler(args)
assert ("waybackpy %s" % (__version__)) in reply
def test_main():
# This also tests the parse_args method in cli.py
cli.main(['temp.py', '--version'])
| 53.413462
| 158
| 0.711251
| 870
| 5,555
| 4.44023
| 0.109195
| 0.018638
| 0.023298
| 0.057209
| 0.786177
| 0.779705
| 0.764691
| 0.745017
| 0.735698
| 0.7124
| 0
| 0.042803
| 0.142034
| 5,555
| 103
| 159
| 53.932039
| 0.76773
| 0.049505
| 0
| 0.487805
| 0
| 0.109756
| 0.061255
| 0
| 0
| 0
| 0
| 0
| 0.158537
| 1
| 0.109756
| false
| 0
| 0.085366
| 0
| 0.195122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
91ca0f2e2dac9b3a2702df7661feb474592c23db
| 426
|
py
|
Python
|
tests/mark.py
|
chrismaille/fastapi-debug-toolbar
|
76d1e78eda4a23fc2b3e3d3c978ee9d8dbf025ae
|
[
"BSD-3-Clause"
] | 36
|
2021-07-22T08:11:31.000Z
|
2022-01-31T13:09:26.000Z
|
tests/mark.py
|
chrismaille/fastapi-debug-toolbar
|
76d1e78eda4a23fc2b3e3d3c978ee9d8dbf025ae
|
[
"BSD-3-Clause"
] | 10
|
2021-07-21T19:39:38.000Z
|
2022-02-26T15:35:35.000Z
|
tests/mark.py
|
chrismaille/fastapi-debug-toolbar
|
76d1e78eda4a23fc2b3e3d3c978ee9d8dbf025ae
|
[
"BSD-3-Clause"
] | 2
|
2021-07-28T09:55:13.000Z
|
2022-02-18T11:29:25.000Z
|
import sys
import typing as t
import pytest
from _pytest.mark import MarkDecorator
def override_settings(**settings: t.Any) -> MarkDecorator:
return pytest.mark.parametrize("settings", [settings])
def override_panels(panels: t.List[str]) -> MarkDecorator:
return override_settings(panels=panels)
def skip_py(*version: int) -> MarkDecorator:
return pytest.mark.skipif(sys.version_info < version, reason="?")
| 23.666667
| 69
| 0.753521
| 54
| 426
| 5.833333
| 0.462963
| 0.095238
| 0.15873
| 0.184127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131455
| 426
| 17
| 70
| 25.058824
| 0.851351
| 0
| 0
| 0
| 0
| 0
| 0.021127
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| false
| 0
| 0.4
| 0.3
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
91d386543140c7dc841311dd9afc4b17a964827b
| 1,998
|
py
|
Python
|
tests/test_SFJWT.py
|
Rehket/SalesForce-JWT
|
df229bb775b59fcaa9c666c73a71de8b4fe901fb
|
[
"MIT"
] | null | null | null |
tests/test_SFJWT.py
|
Rehket/SalesForce-JWT
|
df229bb775b59fcaa9c666c73a71de8b4fe901fb
|
[
"MIT"
] | 2
|
2021-05-09T17:59:51.000Z
|
2021-05-09T18:00:36.000Z
|
tests/test_SFJWT.py
|
Rehket/SalesForce-JWT
|
df229bb775b59fcaa9c666c73a71de8b4fe901fb
|
[
"MIT"
] | null | null | null |
# Standard library imports...
from unittest import mock, TestCase
import responses
mock_environ = {
"SFDC_CONSUMER_KEY": "false",
"SFDC_USERNAME": "foo",
"SFDC_PRIVATE_CERT": "foo",
"SFDC_PRIVATE_CERT_PATH": "foo",
}
# This test is broken for some reason when all the tests are run together.
class TestSandboxSFDCAuth(TestCase):
def test_get_sandbox_login(self):
with responses.RequestsMock() as rsps:
with mock.patch("SFJWT.SFJWT.jwt.encode") as encode:
encode.return_value = "my_secret_string"
from SFJWT.SFJWT import jwt_login
rsps.add(
responses.POST,
"https://test.salesforce.com/services/oauth2/token",
body='{"instance_url": "salesforce.com", "access_token": "my_access_token"}',
status=201,
content_type="application/json",
)
instance_url, token = jwt_login(
"consumer_id", "username", "private_key", "sandbox"
)
assert instance_url == "salesforce.com"
assert token == "my_access_token"
def test_get_prod_login(self):
with responses.RequestsMock() as rsps:
with mock.patch("SFJWT.SFJWT.jwt.encode") as encode:
encode.return_value = "my_secret_string"
from SFJWT.SFJWT import jwt_login
rsps.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
body='{"instance_url": "salesforce.com", "access_token": "my_access_token"}',
status=201,
content_type="application/json",
)
instance_url, token = jwt_login(
"consumer_id", "username", "private_key", "production"
)
assert instance_url == "salesforce.com"
assert token == "my_access_token"
| 37
| 97
| 0.563063
| 205
| 1,998
| 5.258537
| 0.365854
| 0.072356
| 0.077922
| 0.089054
| 0.721707
| 0.721707
| 0.721707
| 0.721707
| 0.721707
| 0.721707
| 0
| 0.005997
| 0.332332
| 1,998
| 53
| 98
| 37.698113
| 0.802099
| 0.05005
| 0
| 0.55814
| 0
| 0
| 0.297098
| 0.034829
| 0
| 0
| 0
| 0
| 0.093023
| 1
| 0.046512
| false
| 0
| 0.093023
| 0
| 0.162791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
37fd2af77f101a4979815b767f91cba220a69cdb
| 271
|
py
|
Python
|
angr/procedures/libc/calloc.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 6,132
|
2015-08-06T23:24:47.000Z
|
2022-03-31T21:49:34.000Z
|
angr/procedures/libc/calloc.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 2,272
|
2015-08-10T08:40:07.000Z
|
2022-03-31T23:46:44.000Z
|
angr/procedures/libc/calloc.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 1,155
|
2015-08-06T23:37:39.000Z
|
2022-03-31T05:54:11.000Z
|
import angr
######################################
# calloc
######################################
class calloc(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, sim_nmemb, sim_size):
return self.state.heap._calloc(sim_nmemb, sim_size)
| 24.636364
| 59
| 0.520295
| 27
| 271
| 5.037037
| 0.666667
| 0.117647
| 0.161765
| 0.220588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125461
| 271
| 10
| 60
| 27.1
| 0.57384
| 0.136531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
530962108ce5a71b6bf458dcb7a328d5897148b2
| 6,179
|
py
|
Python
|
tests/pbxcli/TestPBXProjShow.py
|
yixiaoqingyuz/mod-pbxproj
|
b1ea20e0846cd8f402224a31f7ca119d2e9ff36f
|
[
"MIT"
] | 1
|
2020-01-16T08:33:38.000Z
|
2020-01-16T08:33:38.000Z
|
tests/pbxcli/TestPBXProjShow.py
|
yixiaoqingyuz/mod-pbxproj
|
b1ea20e0846cd8f402224a31f7ca119d2e9ff36f
|
[
"MIT"
] | null | null | null |
tests/pbxcli/TestPBXProjShow.py
|
yixiaoqingyuz/mod-pbxproj
|
b1ea20e0846cd8f402224a31f7ca119d2e9ff36f
|
[
"MIT"
] | 1
|
2021-01-25T05:41:03.000Z
|
2021-01-25T05:41:03.000Z
|
import unittest
import shutil
import sys
from pbxproj.pbxcli import *
import pbxproj.pbxcli.pbxproj_show as pbxproj_show
class PBXProjShowTest(unittest.TestCase):
def setUp(self):
# copy the project.pbxproj, into a file that can be used by the tests
shutil.copyfile('samplescli/project.pbxproj', 'samplescli/test.pbxproj')
def tearDown(self):
os.remove('samplescli/test.pbxproj')
sys.stdout = sys.__stdout__
def testShowAllTargetsInfo(self):
args = {
u'<project>': u'samplescli/test.pbxproj',
u'--target': None
}
project = open_project(args)
result = pbxproj_show.execute(project, args)
self.assertIn('testUITests:', result)
self.assertIn('Product name: testUITests', result)
self.assertIn('Configurations: Debug, Release', result)
self.assertIn('Sources (PBXSourcesBuildPhase) file count: 1', result)
self.assertIn('test:', result)
self.assertIn('Product name: test\n', result)
self.assertIn('Configurations: Debug, Release', result)
self.assertIn('Sources (PBXSourcesBuildPhase) file count: 2', result)
def testShowTargetBasicInfo(self):
args = {
u'<project>': u'samplescli/test.pbxproj',
u'--target': u'test',
u'--source-files': None,
u'--header-files': None,
u'--resource-files': None,
u'--framework-files': None,
u'--configurations': None,
u'--build-phase-files': None
}
project = open_project(args)
result = pbxproj_show.execute(project, args)
self.assertNotIn('testUITests:', result)
self.assertNotIn('Product name: testUITests', result)
self.assertIn('test:', result)
self.assertIn('Product name: test\n', result)
def testShowTargetConfigurations(self):
args = {
u'<project>': u'samplescli/test.pbxproj',
u'--target': u'test',
u'--source-files': None,
u'--header-files': None,
u'--resource-files': None,
u'--framework-files': None,
u'--configurations': True,
u'--build-phase-files': None
}
project = open_project(args)
result = pbxproj_show.execute(project, args)
self.assertIn('test:', result)
self.assertIn('Product name: test\n', result)
self.assertIn('Configurations: Debug, Release\n', result)
def testShowTargetSources(self):
args = {
u'<project>': u'samplescli/test.pbxproj',
u'--target': u'test',
u'--source-files': True,
u'--header-files': None,
u'--resource-files': None,
u'--framework-files': None,
u'--configurations': None,
u'--build-phase-files': None
}
project = open_project(args)
result = pbxproj_show.execute(project, args)
self.assertIn('test:', result)
self.assertIn('Product name: test\n', result)
self.assertIn('Sources:', result)
self.assertIn('AppDelegate.swift', result)
self.assertIn('ViewController.swift', result)
def testShowTargetResources(self):
args = {
u'<project>': u'samplescli/test.pbxproj',
u'--target': u'test',
u'--source-files': None,
u'--header-files': None,
u'--resource-files': True,
u'--framework-files': None,
u'--configurations': None,
u'--build-phase-files': None
}
project = open_project(args)
result = pbxproj_show.execute(project, args)
self.assertIn('test:', result)
self.assertIn('Product name: test\n', result)
self.assertIn('Resources:', result)
self.assertIn('Assets.xcassets', result)
self.assertIn('LaunchScreen.storyboard', result)
self.assertIn('Main.storyboard', result)
def testShowTargetHeaders(self):
args = {
u'<project>': u'samplescli/dependency.xcodeproj/project.pbxproj',
u'--target': u'helloworld',
u'--source-files': None,
u'--header-files': True,
u'--resource-files': None,
u'--framework-files': None,
u'--configurations': None,
u'--build-phase-files': None
}
project = open_project(args)
result = pbxproj_show.execute(project, args)
self.assertIn('helloworld:', result)
self.assertIn('Product name: helloworld\n', result)
self.assertIn('Headers:', result)
self.assertIn('doit.h', result)
self.assertIn('helloworld.h', result)
def testShowTargetFrameworks(self):
args = {
u'<project>': u'samplescli/dependency.xcodeproj/project.pbxproj',
u'--target': u'helloworld',
u'--source-files': None,
u'--header-files': None,
u'--resource-files': None,
u'--framework-files': True,
u'--configurations': None,
u'--build-phase-files': None
}
project = open_project(args)
result = pbxproj_show.execute(project, args)
self.assertIn('helloworld:', result)
self.assertIn('Product name: helloworld\n', result)
self.assertIn('Frameworks:', result)
self.assertIn('AppKit.framework', result)
def testShowTargetExplicitBuildPhase(self):
args = {
u'<project>': u'samplescli/dependency.xcodeproj/project.pbxproj',
u'--target': u'helloworld',
u'--source-files': None,
u'--header-files': None,
u'--resource-files': None,
u'--framework-files': None,
u'--configurations': None,
u'--build-phase-files': u'PBXFrameworksBuildPhase'
}
project = open_project(args)
result = pbxproj_show.execute(project, args)
self.assertIn('helloworld:', result)
self.assertIn('Product name: helloworld\n', result)
self.assertIn('Frameworks:', result)
self.assertIn('AppKit.framework', result)
| 36.347059
| 80
| 0.578573
| 633
| 6,179
| 5.612954
| 0.1406
| 0.124965
| 0.151984
| 0.063327
| 0.740501
| 0.735435
| 0.723051
| 0.723051
| 0.723051
| 0.723051
| 0
| 0.000449
| 0.279657
| 6,179
| 169
| 81
| 36.56213
| 0.797798
| 0.010843
| 0
| 0.655405
| 0
| 0
| 0.308183
| 0.068412
| 0
| 0
| 0
| 0
| 0.263514
| 1
| 0.067568
| false
| 0
| 0.033784
| 0
| 0.108108
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5339a905f22f95ba2960bc5d3c45cf04bbb6aabb
| 10
|
py
|
Python
|
test.py
|
canyus70/playground
|
7bdfc0fda903543a159aa41f52aad85f30f6afa5
|
[
"MIT"
] | null | null | null |
test.py
|
canyus70/playground
|
7bdfc0fda903543a159aa41f52aad85f30f6afa5
|
[
"MIT"
] | null | null | null |
test.py
|
canyus70/playground
|
7bdfc0fda903543a159aa41f52aad85f30f6afa5
|
[
"MIT"
] | null | null | null |
print(3)
| 3.333333
| 8
| 0.6
| 2
| 10
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0.2
| 10
| 2
| 9
| 5
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
5345afcbdb2a4add7fbcde378576030b4c53b31a
| 460
|
py
|
Python
|
src/yass/neuralnetwork/__init__.py
|
Nomow/yass
|
9cc5cc5c5435a664b378bba9332e5b77eb792ff8
|
[
"Apache-2.0"
] | null | null | null |
src/yass/neuralnetwork/__init__.py
|
Nomow/yass
|
9cc5cc5c5435a664b378bba9332e5b77eb792ff8
|
[
"Apache-2.0"
] | null | null | null |
src/yass/neuralnetwork/__init__.py
|
Nomow/yass
|
9cc5cc5c5435a664b378bba9332e5b77eb792ff8
|
[
"Apache-2.0"
] | null | null | null |
from yass.neuralnetwork.model import KerasModel
from yass.neuralnetwork.model_detector import NeuralNetDetector
from yass.neuralnetwork.model_autoencoder import AutoEncoder
from yass.neuralnetwork.model_triage import NeuralNetTriage
from yass.neuralnetwork.apply import run_detect_triage_featurize, fix_indexes
__all__ = ['NeuralNetDetector', 'NeuralNetTriage',
'run_detect_triage_featurize', 'fix_indexes', 'AutoEncoder',
'KerasModel']
| 46
| 77
| 0.819565
| 49
| 460
| 7.387755
| 0.367347
| 0.110497
| 0.290055
| 0.287293
| 0.187845
| 0.187845
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113043
| 460
| 9
| 78
| 51.111111
| 0.887255
| 0
| 0
| 0
| 0
| 0
| 0.197826
| 0.058696
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.625
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
534bb85a4fe4117db9063e5a4419a55709913dd5
| 151
|
py
|
Python
|
src/pyglottolog/admin_commands/tree2lff.py
|
SimonGreenhill/pyglottolog
|
1e0aa0cdc5ae35906c763f9219c6db9b976f8d38
|
[
"Apache-2.0"
] | 7
|
2019-07-28T16:09:05.000Z
|
2021-09-12T20:21:55.000Z
|
src/pyglottolog/admin_commands/tree2lff.py
|
d97hah/pyglottolog
|
fe4c2a52d54cdcf0804b4f889598dbb9b8698dbd
|
[
"Apache-2.0"
] | 52
|
2019-06-18T05:16:38.000Z
|
2022-02-21T11:20:02.000Z
|
src/pyglottolog/admin_commands/tree2lff.py
|
d97hah/pyglottolog
|
fe4c2a52d54cdcf0804b4f889598dbb9b8698dbd
|
[
"Apache-2.0"
] | 6
|
2019-07-26T17:40:25.000Z
|
2021-12-08T00:59:38.000Z
|
"""
Create lff.txt and dff.txt from the current languoid tree.
"""
from pyglottolog import lff
def run(args):
lff.tree2lff(args.repos, args.log)
| 16.777778
| 58
| 0.715232
| 24
| 151
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007937
| 0.165563
| 151
| 8
| 59
| 18.875
| 0.849206
| 0.384106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
72855937cde48d6b148a7e38497c4747fc21bb79
| 70
|
py
|
Python
|
Logistic_Map_Generator/__init__.py
|
SubstanceIsFormAndContent/LMAP_Generator
|
3798bbffa0a355eb0656e804869048acfcbc5637
|
[
"MIT"
] | 2
|
2019-11-23T21:28:09.000Z
|
2020-02-10T23:47:20.000Z
|
Logistic_Map_Generator/__init__.py
|
SubstanceIsFormAndContent/LMAP_Generator
|
3798bbffa0a355eb0656e804869048acfcbc5637
|
[
"MIT"
] | null | null | null |
Logistic_Map_Generator/__init__.py
|
SubstanceIsFormAndContent/LMAP_Generator
|
3798bbffa0a355eb0656e804869048acfcbc5637
|
[
"MIT"
] | null | null | null |
# __init__.py
from .LogisticMapGenerator import LogisticMapGenerator
| 17.5
| 54
| 0.857143
| 6
| 70
| 9.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 70
| 3
| 55
| 23.333333
| 0.888889
| 0.157143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
72c48173be5f4ebd72269e6e9c1940c13425665a
| 141
|
py
|
Python
|
code/answer_3-1-49.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | 1
|
2022-03-29T13:50:12.000Z
|
2022-03-29T13:50:12.000Z
|
code/answer_3-1-49.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | null | null | null |
code/answer_3-1-49.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | null | null | null |
H1, W1 = map(int, input().split())
H2, W2 = map(int, input().split())
print("YES" if H1 == H2 or H1 == W2 or W1 == H2 or W1 == W2 else "NO")
| 35.25
| 70
| 0.553191
| 28
| 141
| 2.785714
| 0.5
| 0.153846
| 0.282051
| 0.410256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 0.205674
| 141
| 3
| 71
| 47
| 0.589286
| 0
| 0
| 0
| 0
| 0
| 0.035461
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
72c544a4f9dcc5c506b7f9372217b3feb758fcd3
| 819
|
py
|
Python
|
sports_manager/mixins.py
|
hbuyse/dj-sports-manager
|
7e32cc41347b968b4ede9ea6846de14d9504c3f9
|
[
"MIT"
] | null | null | null |
sports_manager/mixins.py
|
hbuyse/dj-sports-manager
|
7e32cc41347b968b4ede9ea6846de14d9504c3f9
|
[
"MIT"
] | null | null | null |
sports_manager/mixins.py
|
hbuyse/dj-sports-manager
|
7e32cc41347b968b4ede9ea6846de14d9504c3f9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Django
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
class StaffMixin(LoginRequiredMixin, UserPassesTestMixin):
"""
Mixin allows you to require a user with `is_staff` set to True.
"""
raise_exception = True
def test_func(self):
return self.request.is_staff
class SuperuserMixin(LoginRequiredMixin, UserPassesTestMixin):
"""
Mixin allows you to require a user with `is_superuser` set to True.
"""
raise_exception = True
def test_func(self):
return self.request.is_superuser
class OwnerMixin(LoginRequiredMixin, UserPassesTestMixin):
owner_kwargs = 'username'
raise_exception = True
def test_func(self):
return self.request.user.username == self.kwargs.get(self.owner_kwargs)
| 24.818182
| 79
| 0.714286
| 95
| 819
| 6.031579
| 0.421053
| 0.25829
| 0.094241
| 0.109948
| 0.547993
| 0.547993
| 0.547993
| 0.547993
| 0.547993
| 0.547993
| 0
| 0.00152
| 0.196581
| 819
| 32
| 80
| 25.59375
| 0.869301
| 0.196581
| 0
| 0.428571
| 0
| 0
| 0.01278
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0.285714
| 0.071429
| 0.214286
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
72c6c0ad298834b4519f5b56809e20963f45a9f1
| 46
|
py
|
Python
|
raw_input.py
|
Lana-Pa/Getting-Started-with-Python
|
c4822755a579b6723cc966412bd06496870d118b
|
[
"Apache-2.0"
] | null | null | null |
raw_input.py
|
Lana-Pa/Getting-Started-with-Python
|
c4822755a579b6723cc966412bd06496870d118b
|
[
"Apache-2.0"
] | null | null | null |
raw_input.py
|
Lana-Pa/Getting-Started-with-Python
|
c4822755a579b6723cc966412bd06496870d118b
|
[
"Apache-2.0"
] | null | null | null |
name = raw_input("Enter")
print"Hello " + name
| 23
| 25
| 0.695652
| 7
| 46
| 4.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 46
| 2
| 26
| 23
| 0.775
| 0
| 0
| 0
| 0
| 0
| 0.234043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
72ee0e065980f02069a5c9e89690609ed82d427f
| 64
|
py
|
Python
|
apps/asset/serializer/__init__.py
|
plsof/tabops_api
|
39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8
|
[
"MIT"
] | 1
|
2019-07-31T07:34:38.000Z
|
2019-07-31T07:34:38.000Z
|
apps/asset/serializer/__init__.py
|
plsof/tabops_api
|
39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8
|
[
"MIT"
] | 9
|
2019-12-05T00:39:29.000Z
|
2022-02-10T14:13:29.000Z
|
apps/asset/serializer/__init__.py
|
plsof/tabops_api
|
39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8
|
[
"MIT"
] | null | null | null |
from .idc import IdcSerializer
from .host import HostSerializer
| 21.333333
| 32
| 0.84375
| 8
| 64
| 6.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 64
| 2
| 33
| 32
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
72f3aed5564f3f31e0d58951bc12d191d0d85912
| 55
|
py
|
Python
|
IceSpringMusicPlayer/plugins/IceSpringPlaylistPlugin/__init__.py
|
baijifeilong/rawsteelp
|
425547e6e2395bf4acb62435b18b5b3a4b7ebef4
|
[
"MIT"
] | null | null | null |
IceSpringMusicPlayer/plugins/IceSpringPlaylistPlugin/__init__.py
|
baijifeilong/rawsteelp
|
425547e6e2395bf4acb62435b18b5b3a4b7ebef4
|
[
"MIT"
] | null | null | null |
IceSpringMusicPlayer/plugins/IceSpringPlaylistPlugin/__init__.py
|
baijifeilong/rawsteelp
|
425547e6e2395bf4acb62435b18b5b3a4b7ebef4
|
[
"MIT"
] | null | null | null |
# Created by BaiJiFeiLong@gmail.com at 2022/1/24 17:09
| 27.5
| 54
| 0.763636
| 11
| 55
| 3.818182
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229167
| 0.127273
| 55
| 1
| 55
| 55
| 0.645833
| 0.945455
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f409abe051cdbb8b06224a0c113a401508244b20
| 225
|
py
|
Python
|
core/views.py
|
Bilal815/ecommerce_storee
|
45e61f1d865a65b4c52d74502b4fcab7ee6c1adf
|
[
"MIT"
] | 95
|
2020-04-13T09:02:30.000Z
|
2022-03-25T14:11:34.000Z
|
core/views.py
|
Bilal815/ecommerce_api
|
a3d8ce7a9e1fa2528d240d5ab508afe92607c9f8
|
[
"MIT"
] | 87
|
2020-02-21T17:58:56.000Z
|
2022-03-21T21:37:05.000Z
|
core/views.py
|
Bilal815/ecommerce_api
|
a3d8ce7a9e1fa2528d240d5ab508afe92607c9f8
|
[
"MIT"
] | 33
|
2021-01-18T09:30:29.000Z
|
2022-03-30T01:31:57.000Z
|
from django.shortcuts import render
from django.db import transaction
# class Get_Host(APIView):
# def post(self, request):
# host = request.META.get('HTTP_USER_AGENT')
# return Response({"Host": host})
| 25
| 52
| 0.68
| 29
| 225
| 5.172414
| 0.724138
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 225
| 8
| 53
| 28.125
| 0.833333
| 0.64
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f4216304cad26c15aa2173f37638cadd00579f0e
| 104
|
py
|
Python
|
tests/fibonacci.py
|
ssarangi/PyVyM
|
f96c46e7b8d38f938345ca915c5356b4d9c86d64
|
[
"MIT"
] | 3
|
2017-09-24T17:35:29.000Z
|
2021-02-14T21:53:03.000Z
|
tests/fibonacci.py
|
ssarangi/PyVyM
|
f96c46e7b8d38f938345ca915c5356b4d9c86d64
|
[
"MIT"
] | null | null | null |
tests/fibonacci.py
|
ssarangi/PyVyM
|
f96c46e7b8d38f938345ca915c5356b4d9c86d64
|
[
"MIT"
] | 1
|
2019-08-22T01:09:15.000Z
|
2019-08-22T01:09:15.000Z
|
def fibonacci(n):
if n == 1 or n == 2:
return 1
return fibonacci(n-1) + fibonacci(n-2)
| 17.333333
| 42
| 0.538462
| 18
| 104
| 3.111111
| 0.444444
| 0.535714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070423
| 0.317308
| 104
| 5
| 43
| 20.8
| 0.71831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
f45a889c89ebfa8804c4f8845d9ca62807cf02d3
| 147
|
py
|
Python
|
pybamm/models/submodels/particle/fickian/__init__.py
|
jedgedrudd/PyBaMM
|
79c9d34978382d50e09adaf8bf74c8fa4723f759
|
[
"BSD-3-Clause"
] | 1
|
2019-10-29T19:06:04.000Z
|
2019-10-29T19:06:04.000Z
|
pybamm/models/submodels/particle/fickian/__init__.py
|
jedgedrudd/PyBaMM
|
79c9d34978382d50e09adaf8bf74c8fa4723f759
|
[
"BSD-3-Clause"
] | null | null | null |
pybamm/models/submodels/particle/fickian/__init__.py
|
jedgedrudd/PyBaMM
|
79c9d34978382d50e09adaf8bf74c8fa4723f759
|
[
"BSD-3-Clause"
] | null | null | null |
from .base_fickian_particle import BaseModel
from .fickian_many_particles import ManyParticles
from .fickian_single_particle import SingleParticle
| 36.75
| 51
| 0.897959
| 18
| 147
| 7
| 0.611111
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 147
| 3
| 52
| 49
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f483bf2809d647f5a04dba9e2c822fdcca1ccd3d
| 109
|
py
|
Python
|
tests/__init__.py
|
sonny-zhang/MyBlog
|
880a80c5d95f472f0301f7380addc6c31d341b70
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
sonny-zhang/MyBlog
|
880a80c5d95f472f0301f7380addc6c31d341b70
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
sonny-zhang/MyBlog
|
880a80c5d95f472f0301f7380addc6c31d341b70
|
[
"MIT"
] | null | null | null |
# @Time : 2019/3/13 17:20
# @Author : sonny.zhang
# @FileName : __init__.py
# @github : @sonny-zhang
| 21.8
| 29
| 0.59633
| 15
| 109
| 4.066667
| 0.866667
| 0.327869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130952
| 0.229358
| 109
| 4
| 30
| 27.25
| 0.595238
| 0.917431
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
be3a323cb542283023f528c61e5eec22f86dc56d
| 91
|
py
|
Python
|
kea/test_utils/__init__.py
|
SmartAcoustics/Kea
|
5790f18dafccfc01fe9dbe98de5bb1a5ce584c56
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 3
|
2020-02-28T13:03:59.000Z
|
2020-09-20T06:33:04.000Z
|
kea/test_utils/__init__.py
|
SmartAcoustics/Kea
|
5790f18dafccfc01fe9dbe98de5bb1a5ce584c56
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
kea/test_utils/__init__.py
|
SmartAcoustics/Kea
|
5790f18dafccfc01fe9dbe98de5bb1a5ce584c56
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 3
|
2018-12-17T16:33:08.000Z
|
2020-01-21T14:10:25.000Z
|
from .base_test import (
KeaTestCase, KeaVivadoVHDLTestCase, KeaVivadoVerilogTestCase)
| 30.333333
| 65
| 0.824176
| 7
| 91
| 10.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120879
| 91
| 2
| 66
| 45.5
| 0.925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
be5b24af3985c89a74b813828eda9445c8d8e2a7
| 148
|
py
|
Python
|
ex027.py
|
BrunosVieira88/Python
|
7dc105a62ede0b33d25c5864e892637ca71f2beb
|
[
"MIT"
] | null | null | null |
ex027.py
|
BrunosVieira88/Python
|
7dc105a62ede0b33d25c5864e892637ca71f2beb
|
[
"MIT"
] | null | null | null |
ex027.py
|
BrunosVieira88/Python
|
7dc105a62ede0b33d25c5864e892637ca71f2beb
|
[
"MIT"
] | null | null | null |
nome=str(input('digite seu nome ')).split()
print('seu primeiro nome é {}'.format(nome[0]))
print('seu ultimo nome é {}'.format(nome[len(nome)-1]))
| 37
| 55
| 0.668919
| 25
| 148
| 3.96
| 0.56
| 0.161616
| 0.222222
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014925
| 0.094595
| 148
| 4
| 55
| 37
| 0.723881
| 0
| 0
| 0
| 0
| 0
| 0.389262
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
fe36672a9b224dcaf2163d5f4ebec01e9c9c9140
| 27
|
py
|
Python
|
tda/contrib/__init__.py
|
zhangted/tda-api
|
1169c87129b80c120217d420e4996a439c5903dc
|
[
"MIT"
] | 986
|
2020-04-14T21:50:03.000Z
|
2022-03-29T19:09:31.000Z
|
tda/contrib/__init__.py
|
zhangted/tda-api
|
1169c87129b80c120217d420e4996a439c5903dc
|
[
"MIT"
] | 243
|
2020-04-26T14:05:34.000Z
|
2022-03-12T13:02:51.000Z
|
tda/contrib/__init__.py
|
zhangted/tda-api
|
1169c87129b80c120217d420e4996a439c5903dc
|
[
"MIT"
] | 286
|
2020-04-14T22:17:04.000Z
|
2022-03-27T07:30:15.000Z
|
from . import orders, util
| 13.5
| 26
| 0.740741
| 4
| 27
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 27
| 1
| 27
| 27
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fe374344bbfcdbbd759190959389ffcbfcc889cc
| 2,266
|
py
|
Python
|
slowquant/numerical/numForce.py
|
Melisius/Hartree-Fock
|
46bf811dfcf217ce0c37ddec77d34ef00da769c3
|
[
"BSD-3-Clause"
] | 8
|
2019-12-05T16:02:56.000Z
|
2022-03-31T17:20:46.000Z
|
slowquant/numerical/numForce.py
|
erikkjellgren/SlowQuant
|
46bf811dfcf217ce0c37ddec77d34ef00da769c3
|
[
"BSD-3-Clause"
] | 1
|
2017-05-31T23:48:28.000Z
|
2017-05-31T23:49:21.000Z
|
slowquant/numerical/numForce.py
|
Melisius/Hartree-Fock
|
46bf811dfcf217ce0c37ddec77d34ef00da769c3
|
[
"BSD-3-Clause"
] | 7
|
2019-11-11T22:42:31.000Z
|
2021-12-30T20:30:42.000Z
|
import slowquant.hartreefock.runHartreeFock as HF
import numpy as np
import slowquant.basissets.BasisSet as BS
import slowquant.molecularintegrals.runMolecularIntegrals as MI
def nForce(input, set, results, print_time='No', print_scf='Yes'):
dX = np.zeros(len(input))
dY = np.zeros(len(input))
dZ = np.zeros(len(input))
for j in range(1, len(input)):
input[j,1] += 10**-6
basis = BS.bassiset(input, set['basisset'])
results = MI.runIntegrals(input, basis, set, results)
input[j,1] -= 10**-6
results = HF.runHartreeFock(input, set, results, print_SCF=print_scf)
xplus = results['HFenergy']
input[j,1] -= 10**-6
basis = BS.bassiset(input, set['basisset'])
results = MI.runIntegrals(input, basis, set, results)
input[j,1] += 10**-6
results = HF.runHartreeFock(input, set, results, print_SCF=print_scf)
xminus = results['HFenergy']
input[j,2] += 10**-6
basis = BS.bassiset(input, set['basisset'])
results = MI.runIntegrals(input, basis, set, results)
input[j,2] -= 10**-6
results = HF.runHartreeFock(input, set, results, print_SCF=print_scf)
yplus = results['HFenergy']
input[j,2] -= 10**-6
basis = BS.bassiset(input, set['basisset'])
results = MI.runIntegrals(input, basis, set, results)
input[j,2] += 10**-6
results = HF.runHartreeFock(input, set, results, print_SCF=print_scf)
yminus = results['HFenergy']
input[j,3] += 10**-6
basis = BS.bassiset(input, set['basisset'])
results = MI.runIntegrals(input, basis, set, results)
input[j,3] -= 10**-6
results = HF.runHartreeFock(input, set, results, print_SCF=print_scf)
zplus = results['HFenergy']
input[j,3] -= 10**-6
basis = BS.bassiset(input, set['basisset'])
results = MI.runIntegrals(input, basis, set, results)
input[j,3] += 10**-6
results = HF.runHartreeFock(input, set, results, print_SCF=print_scf)
zminus = results['HFenergy']
dX[j] = (xplus-xminus)/(2*10**-6)
dY[j] = (yplus-yminus)/(2*10**-6)
dZ[j] = (zplus-zminus)/(2*10**-6)
return dX, dY, dZ
| 41.962963
| 77
| 0.595322
| 296
| 2,266
| 4.510135
| 0.165541
| 0.033708
| 0.078652
| 0.104869
| 0.701124
| 0.701124
| 0.701124
| 0.701124
| 0.701124
| 0.701124
| 0
| 0.035819
| 0.248455
| 2,266
| 54
| 78
| 41.962963
| 0.748092
| 0
| 0
| 0.612245
| 0
| 0
| 0.044552
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0
| 0.081633
| 0
| 0.122449
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fe43128c571f4ca6541a6dae01ecf3bfb2c8f296
| 5,816
|
py
|
Python
|
app/api/v1/routes/routes.py
|
Mik3y-F/sendIT-api
|
868c4f50424e258ef978a2541c8379fe5b9195f7
|
[
"MIT"
] | null | null | null |
app/api/v1/routes/routes.py
|
Mik3y-F/sendIT-api
|
868c4f50424e258ef978a2541c8379fe5b9195f7
|
[
"MIT"
] | null | null | null |
app/api/v1/routes/routes.py
|
Mik3y-F/sendIT-api
|
868c4f50424e258ef978a2541c8379fe5b9195f7
|
[
"MIT"
] | null | null | null |
from dicttoxml import dicttoxml
from flask import Blueprint, request, jsonify
from ..models.models import User, Parcel
api = Blueprint('api', __name__)
@api.route('/parcels/', methods=['GET', 'POST'])
def parcels():
if request.method == "POST":
# POST
# Gets name argument variable from url
name = request.args.get('name')
user_id = request.args.get('sender')
user_location = request.args.get('location')
destination = request.args.get('dest')
parc_weight = request.args.get('parc_weight')
if name:
parcel = Parcel(
name=name,
senderId=user_id,
delivered=False,
presentLocation=user_location,
pickupLocation=None,
destination=destination,
parcelWeight=parc_weight
)
parcel.save()
response = {
'parcelId': parcel.id,
'parcelName': parcel.name,
'sender': User.query.filter(User.id == user_id).first().name,
'delivered': parcel.delivered,
'presentLocation': parcel.presentLocation,
'pickupLocation': parcel.pickupLocation,
'destination': parcel.destination,
'parcelWeight': parcel.parcelWeight
}
response = dicttoxml(response, custom_root='test', attr_type=False)
# response.status_code = 201
return response
else:
# GET
all_parcels = Parcel.get_all()
results = []
for parcel in all_parcels:
obj = {
'parcelId': parcel.id,
'parcelName': parcel.name,
'sender': User.query.filter(User.id == parcel.senderId).first().name,
'delivered': parcel.delivered,
'presentLocation': parcel.presentLocation,
'pickupLocation': parcel.pickupLocation,
'destination': parcel.destination,
'parcelWeight': parcel.parcelWeight
}
results.append(obj)
response = dicttoxml(results, custom_root='test', attr_type=False)
# response.status_code = 200
return response
@api.route('/parcels/<int:parcel_id>/', methods=['GET'])
def get_specific_parcel(parcel_id):
# GET
parcel = Parcel.query.filter(Parcel.id==parcel_id).first_or_404()
results = []
obj = {
'parcelId': parcel.id,
'parcelName': parcel.name,
'sender': User.query.filter(User.id == Parcel.senderId).first().name,
'delivered': parcel.delivered,
'presentLocation': parcel.presentLocation,
'pickupLocation': parcel.pickupLocation,
'destination': parcel.destination,
'parcelWeight': parcel.parcelWeight
}
results.append(obj)
response = dicttoxml(results, custom_root='test', attr_type=False)
# response.status_code = 200
return response
@api.route('/users/<int:user_id>/parcels/', methods=['GET'])
def get_user_parcels(user_id):
parcels = Parcel.query.filter(Parcel.senderId==user_id)
results = []
for parcel in parcels:
obj = {
'parcelId': parcel.id,
'parcelName': parcel.name,
'sender': User.query.filter(User.id == user_id).first().name,
'delivered': parcel.delivered,
'presentLocation': parcel.presentLocation,
'pickupLocation': parcel.pickupLocation,
'destination': parcel.destination,
'parcelWeight': parcel.parcelWeight
}
results.append(obj)
response = dicttoxml(results, custom_root='test', attr_type=False)
# response.status_code = 200
return response
@api.route('/parcels/<int:parcel_id>/cancel', methods=['PUT'])
def cancel_parcel(parcel_id):
parcel = Parcel.query.filter(Parcel.senderId==parcel_id).first_or_404()
results = []
obj = {
'parcelId': parcel.id,
'parcelName': parcel.name,
'sender': User.query.filter(User.id == parcel.id).first().name,
'delivered': parcel.delivered,
'presentLocation': parcel.presentLocation,
'pickupLocation': parcel.pickupLocation,
'destination': parcel.destination,
'parcelWeight': parcel.parcelWeight
}
parcel.delete()
results.append(obj)
response = dicttoxml(results, custom_root='test', attr_type=False)
response.status_code = 200
return response
@api.route('/users/', methods=['GET', 'POST'])
def users():
if request.method == "POST":
# POST
# Gets name argument variable from url
name = request.args.get('name')
email = request.args.get('email')
phone = request.args.get('phone')
if name:
user = User(
name=name,
email=email,
mobileNo=phone,
isAdmin=0
)
user.save()
response = {
'userId': user.id,
'userName': user.name,
'email': user.email,
'mobileNo': user.mobileNo,
}
response = dicttoxml(response, custom_root='test', attr_type=False)
# response.status_code = 201
return response
else:
# GET
all_users = User.get_all()
results = []
for user in all_users:
obj = {
'userId': user.id,
'userName': user.name,
'email': user.email,
'mobileNo': user.mobileNo,
}
results.append(obj)
response = dicttoxml(results, custom_root='test', attr_type=False)
# response.status_code = 200
return response
| 29.673469
| 85
| 0.566025
| 554
| 5,816
| 5.837545
| 0.140794
| 0.025974
| 0.034632
| 0.038961
| 0.741806
| 0.711812
| 0.711812
| 0.711812
| 0.711812
| 0.711812
| 0
| 0.007026
| 0.314821
| 5,816
| 196
| 86
| 29.673469
| 0.804517
| 0.044188
| 0
| 0.609929
| 0
| 0
| 0.124234
| 0.015326
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035461
| false
| 0
| 0.021277
| 0
| 0.106383
| 0.014184
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fe505f617ec49460bfba58577cd08491bd2d14b0
| 14,953
|
py
|
Python
|
mkt/api/tests/test_authorization.py
|
spasovski/zamboni
|
c7f4714029e3b2dc918ddfc2103f8e051193c14d
|
[
"BSD-3-Clause"
] | 1
|
2021-07-29T00:51:09.000Z
|
2021-07-29T00:51:09.000Z
|
mkt/api/tests/test_authorization.py
|
imclab/olympia
|
35bc9c484e384bafab520ca8b5d5b0f8da5b62c0
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/api/tests/test_authorization.py
|
imclab/olympia
|
35bc9c484e384bafab520ca8b5d5b0f8da5b62c0
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib.auth.models import AnonymousUser, User
from rest_framework.permissions import AllowAny, BasePermission
from mock import Mock
from nose.tools import eq_, ok_
from test_utils import RequestFactory
from amo.tests import TestCase
from users.models import UserProfile
from mkt.api.authorization import (AllowAppOwner, AllowNone, AllowOwner,
AllowRelatedAppOwner, AllowReadOnlyIfPublic,
AllowSelf, AnyOf, ByHttpMethod, flag,
GroupPermission, switch)
from mkt.site.fixtures import fixture
from mkt.webapps.models import Webapp
class TestWaffle(TestCase):
def setUp(self):
super(TestWaffle, self).setUp()
self.request = RequestFactory().get('/')
def test_waffle_flag(self):
self.create_flag('foo')
ok_(flag('foo')().has_permission(self.request, ''))
def test_not_waffle_flag(self):
ok_(not flag('foo')().has_permission(self.request, ''))
def test_waffle_switch(self):
self.create_switch('foo')
ok_(switch('foo')().has_permission(self.request, ''))
def test_not_switch_flag(self):
ok_(not switch('foo')().has_permission(self.request, ''))
class TestAllowSelfAuthorization(TestCase):
fixtures = fixture('user_2519', 'user_999')
def setUp(self):
self.permission = AllowSelf()
self.anonymous = AnonymousUser()
self.user = User.objects.get(pk=2519)
self.request = RequestFactory().get('/')
self.request.user = self.anonymous
self.request.amo_user = None
def test_has_permission_anonymous(self):
eq_(self.permission.has_permission(self.request, 'myview'), False)
def test_has_permission_user(self):
self.request.user = self.user
self.request.amo_user = self.request.user.get_profile()
eq_(self.permission.has_permission(self.request, 'myview'), True)
def test_has_object_permission_anonymous(self):
eq_(self.permission.has_object_permission(
self.request, 'myview', self.user), False)
def test_has_object_permission_user(self):
self.request.user = self.user
self.request.amo_user = self.request.user.get_profile()
obj = self.user
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
True)
def test_has_object_permission_different_user(self):
self.request.user = User.objects.get(pk=999)
self.request.amo_user = self.request.user.get_profile()
obj = self.user
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
False)
class TestAllowOwner(TestCase):
fixtures = fixture('user_2519', 'user_999')
def setUp(self):
self.permission = AllowOwner()
self.anonymous = AnonymousUser()
self.user = User.objects.get(pk=2519)
self.request = RequestFactory().get('/')
self.request.user = self.anonymous
self.request.amo_user = None
def test_has_permission_anonymous(self):
eq_(self.permission.has_permission(self.request, 'myview'), False)
def test_has_permission_user(self):
self.request.user = self.user
self.request.amo_user = self.request.user.get_profile()
eq_(self.permission.has_permission(self.request, 'myview'), True)
def test_has_object_permission_user(self):
self.request.user = self.user
self.request.amo_user = self.request.user.get_profile()
obj = Mock()
obj.user = self.user
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
True)
def test_has_object_permission_different_user(self):
self.request.user = User.objects.get(pk=999)
self.request.amo_user = self.request.user.get_profile()
obj = Mock()
obj.user = self.user
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
False)
class PartialFailPermission(BasePermission):
def has_object_permission(self, request, view, obj):
return False
class FailPartialPermission(BasePermission):
def has_permission(self, request, view):
return False
class TestAnyOf(TestCase):
def test_has_permission(self):
request = RequestFactory().get('/')
ok_(AnyOf(AllowNone, AllowAny)().has_permission(
request, 'myview'))
ok_(AnyOf(AllowAny, AllowNone)().has_permission(
request, 'myview'))
def test_has_permission_fail(self):
request = RequestFactory().get('/')
ok_(not AnyOf(AllowNone, AllowNone)().has_permission(
request, 'myview'))
def test_has_object_permission(self):
request = RequestFactory().get('/')
ok_(AnyOf(AllowNone, AllowAny
)().has_object_permission(request, 'myview', None))
ok_(AnyOf(AllowAny, AllowNone
)().has_object_permission(request, 'myview', None))
def test_has_object_permission_fail(self):
request = RequestFactory().get('/')
ok_(not AnyOf(AllowNone, AllowNone
)().has_object_permission(request, 'myview', None))
def test_has_object_permission_partial_fail(self):
request = RequestFactory().get('/')
ok_(not AnyOf(FailPartialPermission, PartialFailPermission
)().has_object_permission(request, 'myview', None))
class TestAllowNone(TestCase):
def setUp(self):
self.permission = AllowNone()
self.anonymous = AnonymousUser()
self.user = User()
self.request = RequestFactory().get('/')
self.request.user = self.anonymous
self.request.amo_user = None
def test_has_permission_anonymous(self):
eq_(self.permission.has_permission(self.request, 'myview'), False)
def test_has_permission_user(self):
self.request.user = Mock()
self.request_amo_user = Mock()
eq_(self.permission.has_permission(self.request, 'myview'), False)
def test_has_object_permission_anonymous(self):
obj = Mock()
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
False)
def test_has_object_permission_user(self):
self.request.user = Mock()
self.request_amo_user = Mock()
obj = Mock()
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
False)
class TestAllowAppOwner(TestCase):
fixtures = fixture('user_2519', 'webapp_337141')
def setUp(self):
self.app = Webapp.objects.get(pk=337141)
self.permission = AllowAppOwner()
self.anonymous = AnonymousUser()
self.owner = self.app.authors.all()[0]
self.request = RequestFactory().get('/')
self.request.user = self.anonymous
self.request.amo_user = None
def test_has_permission_anonymous(self):
eq_(self.permission.has_permission(self.request, 'myview'), False)
def test_has_permission_user(self):
self.request.user = self.owner.user
self.request.amo_user = self.owner
eq_(self.permission.has_permission(self.request, 'myview'), True)
def test_has_object_permission_user(self):
self.request.user = self.owner.user
self.request.amo_user = self.owner
obj = self.app
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
True)
def test_has_object_permission_different_user(self):
self.request.user = User.objects.get(pk=2519)
self.request.amo_user = self.request.user.get_profile()
obj = self.app
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
False)
def test_has_object_permission_anonymous(self):
obj = self.app
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
False)
class TestAllowRelatedAppOwner(TestCase):
fixtures = fixture('user_2519', 'webapp_337141')
def setUp(self):
self.app = Webapp.objects.get(pk=337141)
self.permission = AllowRelatedAppOwner()
self.anonymous = AnonymousUser()
self.owner = self.app.authors.all()[0]
self.request = RequestFactory().get('/')
self.request.user = self.anonymous
self.request.amo_user = None
def test_has_permission_anonymous(self):
eq_(self.permission.has_permission(self.request, 'myview'), False)
def test_has_permission_user(self):
self.request.user = self.owner.user
self.request.amo_user = self.owner
eq_(self.permission.has_permission(self.request, 'myview'), True)
def test_has_object_permission_user(self):
self.request.user = self.owner.user
self.request.amo_user = self.owner
obj = Mock()
obj.addon = self.app
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
True)
def test_has_object_permission_different_user(self):
self.request.user = User.objects.get(pk=2519)
self.request.amo_user = self.request.user.get_profile()
obj = Mock()
obj.addon = self.app
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
False)
class TestAllowReadOnlyIfPublic(TestCase):
def setUp(self):
self.permission = AllowReadOnlyIfPublic()
self.anonymous = AnonymousUser()
self.request_factory = RequestFactory()
# 'patch' is missing because it's absent from RequestFactory in
# django < 1.5. Usually we don't special case 'put' vs 'patch' in
# permissions code though, so it's fine.
self.unsafe_methods = ('post', 'put', 'delete')
self.safe_methods = ('get', 'options', 'head')
def _request(self, verb):
request = getattr(self.request_factory, verb)('/')
request.user = self.anonymous
request.amo_user = None
return request
def test_has_permission(self):
for verb in self.safe_methods:
eq_(self.permission.has_permission(self._request(verb), 'myview'),
True)
for verb in self.unsafe_methods:
eq_(self.permission.has_permission(self._request(verb), 'myview'),
False)
def test_has_object_permission_public(self):
obj = Mock()
obj.is_public.return_value = True
for verb in self.safe_methods:
eq_(self.permission.has_object_permission(self._request(verb),
'myview', obj), True)
for verb in self.unsafe_methods:
eq_(self.permission.has_object_permission(self._request(verb),
'myview', obj), False)
def test_has_object_permission_not_public(self):
obj = Mock()
obj.is_public.return_value = False
for verb in (self.unsafe_methods + self.safe_methods):
eq_(self.permission.has_object_permission(self._request(verb),
'myview', obj), False)
class TestGroupPermission(TestCase):
fixtures = fixture('user_2519')
def setUp(self):
self.permission = GroupPermission('Drinkers', 'Beer')
self.obj = Mock()
self.profile = UserProfile.objects.get(pk=2519)
self.anonymous = AnonymousUser()
self.request = RequestFactory().get('/')
self.request.user = self.anonymous
def test_has_permission_user_without(self):
self.request.user = self.profile.user
self.request.amo_user = self.profile
self.request.groups = self.profile.groups.all()
self.grant_permission(self.profile, 'Drinkers:Scotch')
eq_(self.permission.has_permission(self.request, 'myview'), False)
def test_has_permission_user_with(self):
self.request.user = self.profile.user
self.request.amo_user = self.profile
self.request.groups = self.profile.groups.all()
self.grant_permission(self.profile, 'Drinkers:Beer')
eq_(self.permission.has_permission(self.request, 'myview'), True)
def test_has_permission_anonymous(self):
eq_(self.permission.has_permission(self.request, 'myview'), False)
def test_has_object_permission_user_without(self):
self.request.user = self.profile.user
self.request.amo_user = self.profile
self.request.groups = self.profile.groups.all()
self.grant_permission(self.profile, 'Drinkers:Scotch')
obj = Mock()
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
False)
def test_has_object_permission_user_with(self):
self.request.user = self.profile.user
self.request.amo_user = self.profile
self.request.groups = self.profile.groups.all()
self.grant_permission(self.profile, 'Drinkers:Beer')
obj = Mock()
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
True)
def test_has_object_permission_anonymous(self):
obj = Mock()
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
False)
class TestByHttpMethodPermission(TestCase):
def setUp(self):
self.get_permission = Mock
self.patch_permission = Mock
self.post_permission = Mock
self.put_permission = Mock
self.permission = ByHttpMethod({
'get': self.get_permission,
})
self.set_permission_mock('get', True)
def set_permission_mock(self, method, value):
mock = self.permission.method_permissions[method]
mock.has_permission.return_value = value
def set_object_permission_mock(self, method, value):
mock = self.permission.method_permissions[method]
mock.has_object_permission.return_value = value
def test_get(self):
self.request = RequestFactory().get('/')
eq_(self.permission.has_permission(self.request, 'myview'), True)
self.set_permission_mock('get', False)
eq_(self.permission.has_permission(self.request, 'myview'), False)
def test_get_obj(self):
obj = Mock()
self.request = RequestFactory().get('/')
self.set_object_permission_mock('get', True)
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
True)
self.set_object_permission_mock('get', False)
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
False)
def test_missing_method(self):
self.request = RequestFactory().post('/')
eq_(self.permission.has_permission(self.request, 'myview'), False)
obj = Mock()
self.request = RequestFactory().post('/')
eq_(self.permission.has_object_permission(self.request, 'myview', obj),
False)
self.request = RequestFactory().options('/')
eq_(self.permission.has_permission(self.request, 'myview'), False)
| 36.739558
| 79
| 0.657995
| 1,741
| 14,953
| 5.433084
| 0.079265
| 0.144201
| 0.106565
| 0.080347
| 0.810234
| 0.767417
| 0.737499
| 0.734221
| 0.694365
| 0.657258
| 0
| 0.006923
| 0.227178
| 14,953
| 406
| 80
| 36.830049
| 0.811613
| 0.010968
| 0
| 0.689655
| 0
| 0
| 0.034833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178683
| false
| 0
| 0.031348
| 0.00627
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fe59735f3fbc8036ba1f1c2a99d0922e52a92b11
| 47
|
py
|
Python
|
2022/BaekJoon/11720.py
|
dongdong97/TIL
|
22fab3bc5509ac46510071cb6a7ce390fd4df75a
|
[
"MIT"
] | null | null | null |
2022/BaekJoon/11720.py
|
dongdong97/TIL
|
22fab3bc5509ac46510071cb6a7ce390fd4df75a
|
[
"MIT"
] | null | null | null |
2022/BaekJoon/11720.py
|
dongdong97/TIL
|
22fab3bc5509ac46510071cb6a7ce390fd4df75a
|
[
"MIT"
] | null | null | null |
a = int(input())
print(sum(map(int,input())))
| 11.75
| 28
| 0.595745
| 8
| 47
| 3.5
| 0.75
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 3
| 29
| 15.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
fe8e76865a32060552cfde5f5007bd730c3d4617
| 921
|
py
|
Python
|
app/main.py
|
sayyamsachdev/leaninindia2.0
|
45aef47fc9115413b4ff5e326d38db108ab184fc
|
[
"MIT"
] | null | null | null |
app/main.py
|
sayyamsachdev/leaninindia2.0
|
45aef47fc9115413b4ff5e326d38db108ab184fc
|
[
"MIT"
] | null | null | null |
app/main.py
|
sayyamsachdev/leaninindia2.0
|
45aef47fc9115413b4ff5e326d38db108ab184fc
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, jsonify, request
import json as JSON
from flask_bower import Bower
app = Flask(__name__)
Bower(app)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/about", methods = ["GET"])
def render_team():
return render_template("index.html")
@app.route("/circles", methods = ["GET"])
def render_circles():
return render_template("index.html")
@app.route("/events", methods = ["GET"])
def render_events():
return render_template("events.html")
@app.route("/awarenees", methods = ["GET"])
def render_awareness():
return render_template("index.html")
@app.route("/blog", methods = ["GET"])
def render_blog():
return render_template("index.html")
@app.route("/contact-us", methods = ["GET"])
def render_contact_us():
return render_template("index.html")
@app.route('/<path:p>')
def ui(p):
return render_template("index.html")
app.run(debug=True)
| 23.025
| 58
| 0.710098
| 127
| 921
| 4.984252
| 0.267717
| 0.199052
| 0.252765
| 0.276461
| 0.401264
| 0.401264
| 0.350711
| 0
| 0
| 0
| 0
| 0
| 0.103149
| 921
| 40
| 59
| 23.025
| 0.766344
| 0
| 0
| 0.233333
| 0
| 0
| 0.169197
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.266667
| false
| 0
| 0.1
| 0.266667
| 0.633333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
fe94eceefdc4766f7b5421ef417c657bcf291fd0
| 293
|
py
|
Python
|
audio_separation/__init__.py
|
SAGNIKMJR/move2hear-active-AV-separation
|
3c6887aeb94b2a07983469bfd517ca277bd4124a
|
[
"MIT"
] | 8
|
2021-10-05T08:03:32.000Z
|
2022-02-22T07:08:19.000Z
|
audio_separation/__init__.py
|
SAGNIKMJR/move2hear-active-AV-separation
|
3c6887aeb94b2a07983469bfd517ca277bd4124a
|
[
"MIT"
] | 1
|
2021-12-02T00:21:48.000Z
|
2021-12-28T19:07:14.000Z
|
audio_separation/__init__.py
|
SAGNIKMJR/move2hear-active-AV-separation
|
3c6887aeb94b2a07983469bfd517ca277bd4124a
|
[
"MIT"
] | null | null | null |
from audio_separation.rl.ppo.ppo_trainer import PPOTrainer, RolloutStoragePol, RolloutStorageSep
from audio_separation.pretrain.passive.passive_trainer import PassiveTrainer
__all__ = ["BaseTrainer", "BaseRLTrainer", "PPOTrainer", "RolloutStoragePol", "RolloutStorageSep", "PassiveTrainer"]
| 48.833333
| 116
| 0.836177
| 27
| 293
| 8.777778
| 0.592593
| 0.075949
| 0.160338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068259
| 293
| 5
| 117
| 58.6
| 0.868132
| 0
| 0
| 0
| 0
| 0
| 0.279863
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.666667
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
22b23b21f8d6616bd04a7a64373464cc32a07519
| 155
|
py
|
Python
|
docs/en/docs_src/get_updates/get_updates_package.py
|
AliRn76/rubika-bot
|
203da2e585f03d6b2cef96cbd7a68b471e010db7
|
[
"MIT"
] | 1
|
2022-03-30T10:33:33.000Z
|
2022-03-30T10:33:33.000Z
|
docs/fa/docs_src/get_updates/get_updates_package.py
|
AliRn76/rubika-bot
|
203da2e585f03d6b2cef96cbd7a68b471e010db7
|
[
"MIT"
] | null | null | null |
docs/fa/docs_src/get_updates/get_updates_package.py
|
AliRn76/rubika-bot
|
203da2e585f03d6b2cef96cbd7a68b471e010db7
|
[
"MIT"
] | null | null | null |
from rubika_bot.requests import get_updates
from rubika_bot.models import Update
updates, _ = get_updates(
token='SUPER_SECRET_TOKEN',
limit=10,
)
| 22.142857
| 43
| 0.774194
| 22
| 155
| 5.136364
| 0.636364
| 0.176991
| 0.230089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015152
| 0.148387
| 155
| 7
| 44
| 22.142857
| 0.840909
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
22fa3bd9ce0189b4f75b75b24b9aea58eb338e87
| 72
|
py
|
Python
|
hordak/models/__init__.py
|
PetrDlouhy/django-hordak
|
71c141928c5a2cc102bcfd710d7bdf17093933c9
|
[
"MIT"
] | 2
|
2016-09-05T08:58:53.000Z
|
2016-09-26T10:49:07.000Z
|
hordak/models/__init__.py
|
PetrDlouhy/django-hordak
|
71c141928c5a2cc102bcfd710d7bdf17093933c9
|
[
"MIT"
] | 3
|
2016-11-06T13:14:29.000Z
|
2016-11-06T13:57:58.000Z
|
hordak/models/__init__.py
|
waldocollective/django-hordak
|
dc9b8e5008954ca0f4b089d89348e7dec4301f65
|
[
"MIT"
] | null | null | null |
from .core import * # noqa
from .statement_csv_import import * # noqa
| 24
| 43
| 0.722222
| 10
| 72
| 5
| 0.6
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194444
| 72
| 2
| 44
| 36
| 0.862069
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fe0288cd5be80df45449a3d577648d814ebf2a42
| 13,622
|
py
|
Python
|
protos/gen/python/protos/public/monitoring/DataMonitoringService_pb2_grpc.py
|
stefan-petrov-toptal/modeldb
|
a8a9b9da6ed964c91351230b2f0d2703c75794de
|
[
"Apache-2.0"
] | 835
|
2017-02-08T20:14:24.000Z
|
2020-03-12T17:37:49.000Z
|
protos/gen/python/protos/public/monitoring/DataMonitoringService_pb2_grpc.py
|
stefan-petrov-toptal/modeldb
|
a8a9b9da6ed964c91351230b2f0d2703c75794de
|
[
"Apache-2.0"
] | 651
|
2019-04-18T12:55:07.000Z
|
2022-03-31T23:45:09.000Z
|
protos/gen/python/protos/public/monitoring/DataMonitoringService_pb2_grpc.py
|
stefan-petrov-toptal/modeldb
|
a8a9b9da6ed964c91351230b2f0d2703c75794de
|
[
"Apache-2.0"
] | 170
|
2017-02-13T14:49:22.000Z
|
2020-02-19T17:59:12.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from ..monitoring import DataMonitoringService_pb2 as monitoring_dot_DataMonitoringService__pb2
class DataMonitoringServiceStub(object):
"""Service definitions
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.createMonitoredEntity = channel.unary_unary(
'/ai.verta.monitoring.DataMonitoringService/createMonitoredEntity',
request_serializer=monitoring_dot_DataMonitoringService__pb2.CreateMonitoredEntityRequest.SerializeToString,
response_deserializer=monitoring_dot_DataMonitoringService__pb2.CreateMonitoredEntityRequest.Response.FromString,
)
self.updateMonitoredEntity = channel.unary_unary(
'/ai.verta.monitoring.DataMonitoringService/updateMonitoredEntity',
request_serializer=monitoring_dot_DataMonitoringService__pb2.UpdateMonitoredEntityRequest.SerializeToString,
response_deserializer=monitoring_dot_DataMonitoringService__pb2.UpdateMonitoredEntityRequest.Response.FromString,
)
self.findMonitoredEntity = channel.unary_unary(
'/ai.verta.monitoring.DataMonitoringService/findMonitoredEntity',
request_serializer=monitoring_dot_DataMonitoringService__pb2.FindMonitoredEntityRequest.SerializeToString,
response_deserializer=monitoring_dot_DataMonitoringService__pb2.FindMonitoredEntityRequest.Response.FromString,
)
self.deleteMonitoredEntity = channel.unary_unary(
'/ai.verta.monitoring.DataMonitoringService/deleteMonitoredEntity',
request_serializer=monitoring_dot_DataMonitoringService__pb2.DeleteMonitoredEntityRequest.SerializeToString,
response_deserializer=monitoring_dot_DataMonitoringService__pb2.DeleteMonitoredEntityRequest.Response.FromString,
)
self.getProfiler = channel.unary_unary(
'/ai.verta.monitoring.DataMonitoringService/getProfiler',
request_serializer=monitoring_dot_DataMonitoringService__pb2.GetProfilerRequest.SerializeToString,
response_deserializer=monitoring_dot_DataMonitoringService__pb2.GetProfilerRequest.Response.FromString,
)
self.createProfiler = channel.unary_unary(
'/ai.verta.monitoring.DataMonitoringService/createProfiler',
request_serializer=monitoring_dot_DataMonitoringService__pb2.CreateProfilerRequest.SerializeToString,
response_deserializer=monitoring_dot_DataMonitoringService__pb2.CreateProfilerRequest.Response.FromString,
)
self.updateProfiler = channel.unary_unary(
'/ai.verta.monitoring.DataMonitoringService/updateProfiler',
request_serializer=monitoring_dot_DataMonitoringService__pb2.UpdateProfilerRequest.SerializeToString,
response_deserializer=monitoring_dot_DataMonitoringService__pb2.UpdateProfilerRequest.Response.FromString,
)
self.listProfilers = channel.unary_unary(
'/ai.verta.monitoring.DataMonitoringService/listProfilers',
request_serializer=monitoring_dot_DataMonitoringService__pb2.ListProfilersRequest.SerializeToString,
response_deserializer=monitoring_dot_DataMonitoringService__pb2.ListProfilersRequest.Response.FromString,
)
self.deleteProfiler = channel.unary_unary(
'/ai.verta.monitoring.DataMonitoringService/deleteProfiler',
request_serializer=monitoring_dot_DataMonitoringService__pb2.DeleteProfilerRequest.SerializeToString,
response_deserializer=monitoring_dot_DataMonitoringService__pb2.DeleteProfilerRequest.Response.FromString,
)
self.getProfilerStatus = channel.unary_unary(
'/ai.verta.monitoring.DataMonitoringService/getProfilerStatus',
request_serializer=monitoring_dot_DataMonitoringService__pb2.GetProfilerStatusRequest.SerializeToString,
response_deserializer=monitoring_dot_DataMonitoringService__pb2.GetProfilerStatusRequest.Response.FromString,
)
self.findProfilersForMonitoredEntity = channel.unary_unary(
'/ai.verta.monitoring.DataMonitoringService/findProfilersForMonitoredEntity',
request_serializer=monitoring_dot_DataMonitoringService__pb2.FindProfilersForMonitoredEntityRequest.SerializeToString,
response_deserializer=monitoring_dot_DataMonitoringService__pb2.FindProfilersForMonitoredEntityRequest.Response.FromString,
)
self.enableProfiler = channel.unary_unary(
'/ai.verta.monitoring.DataMonitoringService/enableProfiler',
request_serializer=monitoring_dot_DataMonitoringService__pb2.EnableProfilerRequest.SerializeToString,
response_deserializer=monitoring_dot_DataMonitoringService__pb2.EnableProfilerRequest.Response.FromString,
)
self.disableProfiler = channel.unary_unary(
'/ai.verta.monitoring.DataMonitoringService/disableProfiler',
request_serializer=monitoring_dot_DataMonitoringService__pb2.DisableProfilerRequest.SerializeToString,
response_deserializer=monitoring_dot_DataMonitoringService__pb2.DisableProfilerRequest.Response.FromString,
)
class DataMonitoringServiceServicer(object):
"""Service definitions
"""
def createMonitoredEntity(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def updateMonitoredEntity(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def findMonitoredEntity(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def deleteMonitoredEntity(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getProfiler(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def createProfiler(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def updateProfiler(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def listProfilers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def deleteProfiler(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getProfilerStatus(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def findProfilersForMonitoredEntity(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def enableProfiler(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def disableProfiler(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DataMonitoringServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'createMonitoredEntity': grpc.unary_unary_rpc_method_handler(
servicer.createMonitoredEntity,
request_deserializer=monitoring_dot_DataMonitoringService__pb2.CreateMonitoredEntityRequest.FromString,
response_serializer=monitoring_dot_DataMonitoringService__pb2.CreateMonitoredEntityRequest.Response.SerializeToString,
),
'updateMonitoredEntity': grpc.unary_unary_rpc_method_handler(
servicer.updateMonitoredEntity,
request_deserializer=monitoring_dot_DataMonitoringService__pb2.UpdateMonitoredEntityRequest.FromString,
response_serializer=monitoring_dot_DataMonitoringService__pb2.UpdateMonitoredEntityRequest.Response.SerializeToString,
),
'findMonitoredEntity': grpc.unary_unary_rpc_method_handler(
servicer.findMonitoredEntity,
request_deserializer=monitoring_dot_DataMonitoringService__pb2.FindMonitoredEntityRequest.FromString,
response_serializer=monitoring_dot_DataMonitoringService__pb2.FindMonitoredEntityRequest.Response.SerializeToString,
),
'deleteMonitoredEntity': grpc.unary_unary_rpc_method_handler(
servicer.deleteMonitoredEntity,
request_deserializer=monitoring_dot_DataMonitoringService__pb2.DeleteMonitoredEntityRequest.FromString,
response_serializer=monitoring_dot_DataMonitoringService__pb2.DeleteMonitoredEntityRequest.Response.SerializeToString,
),
'getProfiler': grpc.unary_unary_rpc_method_handler(
servicer.getProfiler,
request_deserializer=monitoring_dot_DataMonitoringService__pb2.GetProfilerRequest.FromString,
response_serializer=monitoring_dot_DataMonitoringService__pb2.GetProfilerRequest.Response.SerializeToString,
),
'createProfiler': grpc.unary_unary_rpc_method_handler(
servicer.createProfiler,
request_deserializer=monitoring_dot_DataMonitoringService__pb2.CreateProfilerRequest.FromString,
response_serializer=monitoring_dot_DataMonitoringService__pb2.CreateProfilerRequest.Response.SerializeToString,
),
'updateProfiler': grpc.unary_unary_rpc_method_handler(
servicer.updateProfiler,
request_deserializer=monitoring_dot_DataMonitoringService__pb2.UpdateProfilerRequest.FromString,
response_serializer=monitoring_dot_DataMonitoringService__pb2.UpdateProfilerRequest.Response.SerializeToString,
),
'listProfilers': grpc.unary_unary_rpc_method_handler(
servicer.listProfilers,
request_deserializer=monitoring_dot_DataMonitoringService__pb2.ListProfilersRequest.FromString,
response_serializer=monitoring_dot_DataMonitoringService__pb2.ListProfilersRequest.Response.SerializeToString,
),
'deleteProfiler': grpc.unary_unary_rpc_method_handler(
servicer.deleteProfiler,
request_deserializer=monitoring_dot_DataMonitoringService__pb2.DeleteProfilerRequest.FromString,
response_serializer=monitoring_dot_DataMonitoringService__pb2.DeleteProfilerRequest.Response.SerializeToString,
),
'getProfilerStatus': grpc.unary_unary_rpc_method_handler(
servicer.getProfilerStatus,
request_deserializer=monitoring_dot_DataMonitoringService__pb2.GetProfilerStatusRequest.FromString,
response_serializer=monitoring_dot_DataMonitoringService__pb2.GetProfilerStatusRequest.Response.SerializeToString,
),
'findProfilersForMonitoredEntity': grpc.unary_unary_rpc_method_handler(
servicer.findProfilersForMonitoredEntity,
request_deserializer=monitoring_dot_DataMonitoringService__pb2.FindProfilersForMonitoredEntityRequest.FromString,
response_serializer=monitoring_dot_DataMonitoringService__pb2.FindProfilersForMonitoredEntityRequest.Response.SerializeToString,
),
'enableProfiler': grpc.unary_unary_rpc_method_handler(
servicer.enableProfiler,
request_deserializer=monitoring_dot_DataMonitoringService__pb2.EnableProfilerRequest.FromString,
response_serializer=monitoring_dot_DataMonitoringService__pb2.EnableProfilerRequest.Response.SerializeToString,
),
'disableProfiler': grpc.unary_unary_rpc_method_handler(
servicer.disableProfiler,
request_deserializer=monitoring_dot_DataMonitoringService__pb2.DisableProfilerRequest.FromString,
response_serializer=monitoring_dot_DataMonitoringService__pb2.DisableProfilerRequest.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'ai.verta.monitoring.DataMonitoringService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 54.270916
| 138
| 0.799002
| 1,153
| 13,622
| 9.114484
| 0.079792
| 0.123323
| 0.171472
| 0.186602
| 0.804358
| 0.804358
| 0.528214
| 0.241222
| 0.241222
| 0.241222
| 0
| 0.00459
| 0.136397
| 13,622
| 250
| 139
| 54.488
| 0.888728
| 0.065262
| 0
| 0.315534
| 1
| 0
| 0.129856
| 0.072414
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072816
| false
| 0.063107
| 0.009709
| 0
| 0.092233
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
fe065fde30eae1593305d8b80ea065d6f6da2ca7
| 77
|
py
|
Python
|
Python 101/Chapter 2/String_Formatting_4.py
|
enemy123456789/Python-101-Notes
|
aafd38826f18b3af11d5ce4c16d29bbf3de915cd
|
[
"Apache-2.0"
] | null | null | null |
Python 101/Chapter 2/String_Formatting_4.py
|
enemy123456789/Python-101-Notes
|
aafd38826f18b3af11d5ce4c16d29bbf3de915cd
|
[
"Apache-2.0"
] | null | null | null |
Python 101/Chapter 2/String_Formatting_4.py
|
enemy123456789/Python-101-Notes
|
aafd38826f18b3af11d5ce4c16d29bbf3de915cd
|
[
"Apache-2.0"
] | null | null | null |
print("%(lang)s is fun!" % {"lang":"test"})
#Output
"""
Python is fun!
"""
| 9.625
| 43
| 0.519481
| 11
| 77
| 3.636364
| 0.727273
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168831
| 77
| 7
| 44
| 11
| 0.625
| 0.077922
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
a3ad105e291566b132b2a5f3b369487748aa1efd
| 40
|
py
|
Python
|
mw/__init__.py
|
he7d3r/Mediawiki-Utilities
|
717c30f8e74fa1d9975900b16bc7dff53fe9deb2
|
[
"MIT"
] | null | null | null |
mw/__init__.py
|
he7d3r/Mediawiki-Utilities
|
717c30f8e74fa1d9975900b16bc7dff53fe9deb2
|
[
"MIT"
] | null | null | null |
mw/__init__.py
|
he7d3r/Mediawiki-Utilities
|
717c30f8e74fa1d9975900b16bc7dff53fe9deb2
|
[
"MIT"
] | null | null | null |
from .types import Timestamp, Namespace
| 20
| 39
| 0.825
| 5
| 40
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 40
| 1
| 40
| 40
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a3ba84ec1b0260ff7d51086309ce170aac667a2e
| 229
|
py
|
Python
|
example/vault/write.py
|
sephiartlist/dynaconf
|
9c5f60b289c1f0fa3f899f1962a8fe5712c74eab
|
[
"MIT"
] | 2,293
|
2015-08-14T22:39:31.000Z
|
2022-03-31T12:44:49.000Z
|
example/vault/write.py
|
sephiartlist/dynaconf
|
9c5f60b289c1f0fa3f899f1962a8fe5712c74eab
|
[
"MIT"
] | 676
|
2015-08-20T19:29:56.000Z
|
2022-03-31T13:45:51.000Z
|
example/vault/write.py
|
sephiartlist/dynaconf
|
9c5f60b289c1f0fa3f899f1962a8fe5712c74eab
|
[
"MIT"
] | 255
|
2015-12-02T21:16:33.000Z
|
2022-03-20T22:03:46.000Z
|
from dynaconf import settings
from dynaconf.loaders import vault_loader
vault_loader.write(settings, {"SECRET": "vault_works"})
with settings.using_env("dev"):
vault_loader.write(settings, {"SECRET": "vault_works_in_dev"})
| 28.625
| 66
| 0.777293
| 31
| 229
| 5.483871
| 0.483871
| 0.194118
| 0.188235
| 0.282353
| 0.470588
| 0.470588
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0.09607
| 229
| 7
| 67
| 32.714286
| 0.821256
| 0
| 0
| 0
| 0
| 0
| 0.19214
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a3cd6a56b82bcf0ca093a5e3977186d5f8d93876
| 78
|
py
|
Python
|
5 Star Python/Arithmetic Operator.py
|
TheCodeAlpha26/Hackerrank-Demystified
|
03713a8f3a05e5d6dfed6f6808b06340558e2310
|
[
"Apache-2.0"
] | 6
|
2021-04-26T17:09:54.000Z
|
2021-07-08T17:36:16.000Z
|
5 Star Python/Arithmetic Operator.py
|
TheCodeAlpha26/Hackerrank-Demystified
|
03713a8f3a05e5d6dfed6f6808b06340558e2310
|
[
"Apache-2.0"
] | null | null | null |
5 Star Python/Arithmetic Operator.py
|
TheCodeAlpha26/Hackerrank-Demystified
|
03713a8f3a05e5d6dfed6f6808b06340558e2310
|
[
"Apache-2.0"
] | null | null | null |
a = int(input())
b = int(input())
print(str(a+b)+"\n"+str(a-b)+"\n"+str(a*b))
| 19.5
| 43
| 0.512821
| 18
| 78
| 2.222222
| 0.388889
| 0.3
| 0.375
| 0.3
| 0.425
| 0.425
| 0.425
| 0
| 0
| 0
| 0
| 0
| 0.089744
| 78
| 3
| 44
| 26
| 0.56338
| 0
| 0
| 0
| 0
| 0
| 0.051282
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4a33cb52c86f5e5addffa7248d8dcd5fa38d6550
| 168
|
py
|
Python
|
python/tinyusdz/UsdGeom/__init__.py
|
GermanAizek/tinyusdz
|
42358383f363143ad8dd512939a4851902d4f339
|
[
"MIT"
] | 159
|
2020-04-14T15:59:35.000Z
|
2022-03-31T14:19:05.000Z
|
python/tinyusdz/UsdGeom/__init__.py
|
GermanAizek/tinyusdz
|
42358383f363143ad8dd512939a4851902d4f339
|
[
"MIT"
] | 16
|
2020-05-21T06:00:40.000Z
|
2022-02-26T08:50:33.000Z
|
python/tinyusdz/UsdGeom/__init__.py
|
GermanAizek/tinyusdz
|
42358383f363143ad8dd512939a4851902d4f339
|
[
"MIT"
] | 8
|
2020-07-01T04:13:42.000Z
|
2022-01-30T17:50:52.000Z
|
from . import Tokens
def SetStageUpAxis(cls, stage: Stage, axis: Tokens):
assert axis == Tokens.x or axis == Tokens.y or axis == Tokens.z
stage.upAxis = axis
| 24
| 67
| 0.678571
| 25
| 168
| 4.56
| 0.56
| 0.350877
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208333
| 168
| 6
| 68
| 28
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4a63c57d312ddc1d32c108936d0096513f0c5bb1
| 36,502
|
py
|
Python
|
tensorflow_graphics/rendering/opengl/tests/math_test.py
|
jackd/graphics
|
736b99a3306e302674a9b7599e3e2857b85fdb74
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_graphics/rendering/opengl/tests/math_test.py
|
jackd/graphics
|
736b99a3306e302674a9b7599e3e2857b85fdb74
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_graphics/rendering/opengl/tests/math_test.py
|
jackd/graphics
|
736b99a3306e302674a9b7599e3e2857b85fdb74
|
[
"Apache-2.0"
] | 1
|
2020-04-11T10:37:36.000Z
|
2020-04-11T10:37:36.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for OpenGL math routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.rendering.opengl import math as glm
from tensorflow_graphics.util import test_case
class MathTest(test_case.TestCase):
def test_perspective_right_handed_preset(self):
"""Tests that perspective_right_handed generates expected results.."""
vertical_field_of_view = ((60.0 * math.pi / 180.0,),
(50.0 * math.pi / 180.0,))
aspect_ratio = ((1.5,), (1.1,))
near = ((1.0,), (1.2,))
far = ((10.0,), (5.0,))
pred = glm.perspective_right_handed(vertical_field_of_view, aspect_ratio,
near, far)
gt = (((1.15470052, 0.0, 0.0, 0.0), (0.0, 1.73205066, 0.0, 0.0),
(0.0, 0.0, -1.22222221, -2.22222233), (0.0, 0.0, -1.0, 0.0)),
((1.9495517, 0.0, 0.0, 0.0), (0.0, 2.14450693, 0.0, 0.0),
(0.0, 0.0, -1.63157892, -3.15789485), (0.0, 0.0, -1.0, 0.0)))
self.assertAllClose(pred, gt)
@parameterized.parameters(
((1,), (1,), (1,), (1,)),
((None, 1), (None, 1), (None, 1), (None, 1)),
((None, 3, 1), (None, 3, 1), (None, 3, 1), (None, 3, 1)),
)
def test_perspective_right_handed_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(glm.perspective_right_handed, shapes)
@parameterized.parameters(
("Not all batch dimensions are identical", (1,), (3, 1), (3, 1), (3, 1)),
("Not all batch dimensions are identical", (3, 1), (None, 3, 1), (3, 1),
(3, 1)),
)
def test_perspective_right_handed_shape_exception_raised(
self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(glm.perspective_right_handed, error_msg,
shapes)
@parameterized.parameters(
((1.0,),
(1.0,), np.random.uniform(-1.0, 0.0, size=(1,)).astype(np.float32),
(1.0,)),
((1.0,), (1.0,), (0.0,), (1.0,)),
((1.0,), np.random.uniform(-1.0, 0.0, size=(1,)).astype(np.float32),
(0.1,), (1.0,)),
((1.0,), (0.0,), (0.1,), (1.0,)),
((1.0,),
(1.0,), np.random.uniform(1.0, 2.0, size=(1,)).astype(np.float32),
np.random.uniform(0.1, 0.5, size=(1,)).astype(np.float32)),
((1.0,), (1.0,), (0.1,), (0.1,)),
(np.random.uniform(-math.pi, 0.0, size=(1,)).astype(np.float32), (1.0,),
(0.1,), (1.0,)),
(np.random.uniform(math.pi, 2.0 * math.pi, size=(1,)).astype(np.float32),
(1.0,), (0.1,), (1.0,)),
((0.0,), (1.0,), (0.1,), (1.0,)),
((math.pi,), (1.0,), (0.1,), (1.0,)),
)
def test_perspective_right_handed_valid_range_exception_raised(
self, vertical_field_of_view, aspect_ratio, near, far):
"""Tests that an exception is raised with out of bounds values."""
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(
glm.perspective_right_handed(vertical_field_of_view, aspect_ratio,
near, far))
def test_perspective_right_handed_cross_jacobian_preset(self):
"""Tests the Jacobian of perspective_right_handed."""
vertical_field_of_view_init = np.array((1.0,))
aspect_ratio_init = np.array((1.0,))
near_init = np.array((1.0,))
far_init = np.array((10.0,))
self.assert_jacobian_is_correct_fn(
glm.perspective_right_handed,
[vertical_field_of_view_init, aspect_ratio_init, near_init, far_init])
def test_perspective_right_handed_cross_jacobian_random(self):
"""Tests the Jacobian of perspective_right_handed."""
tensor_size = np.random.randint(1, 3)
tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist()
eps = np.finfo(np.float64).eps
vertical_field_of_view_init = np.random.uniform(
eps, math.pi - eps, size=tensor_shape + [1])
aspect_ratio_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1])
near_init = np.random.uniform(eps, 10.0, size=tensor_shape + [1])
far_init = np.random.uniform(10 + eps, 100.0, size=tensor_shape + [1])
self.assert_jacobian_is_correct_fn(
glm.perspective_right_handed,
[vertical_field_of_view_init, aspect_ratio_init, near_init, far_init])
def test_look_at_right_handed_preset(self):
"""Tests that look_at_right_handed generates expected results.."""
camera_position = ((0.0, 0.0, 0.0), (0.1, 0.2, 0.3))
look_at = ((0.0, 0.0, 1.0), (0.4, 0.5, 0.6))
up_vector = ((0.0, 1.0, 0.0), (0.7, 0.8, 0.9))
pred = glm.look_at_right_handed(camera_position, look_at, up_vector)
gt = (((-1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.0, 0.0, -1.0, 0.0),
(0.0, 0.0, 0.0, 1.0)),
((4.08248186e-01, -8.16496551e-01, 4.08248395e-01, -2.98023224e-08),
(-7.07106888e-01, 1.19209290e-07, 7.07106769e-01, -1.41421378e-01),
(-5.77350318e-01, -5.77350318e-01, -5.77350318e-01,
3.46410215e-01), (0.0, 0.0, 0.0, 1.0)))
self.assertAllClose(pred, gt)
@parameterized.parameters(
((3,), (3,), (3,)),
((None, 3), (None, 3), (None, 3)),
((None, 2, 3), (None, 2, 3), (None, 2, 3)),
)
def test_look_at_right_handed_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(glm.look_at_right_handed, shapes)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (2,), (3,), (3,)),
("must have exactly 3 dimensions in axis -1", (3,), (2,), (3,)),
("must have exactly 3 dimensions in axis -1", (3,), (3,), (1,)),
("Not all batch dimensions are identical", (3,), (3, 3), (3, 3)),
)
def test_look_at_right_handed_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(glm.look_at_right_handed, error_msg, shapes)
def test_look_at_right_handed_jacobian_preset(self):
"""Tests the Jacobian of look_at_right_handed."""
camera_position_init = np.array(((0.0, 0.0, 0.0), (0.1, 0.2, 0.3)))
look_at_init = np.array(((0.0, 0.0, 1.0), (0.4, 0.5, 0.6)))
up_vector_init = np.array(((0.0, 1.0, 0.0), (0.7, 0.8, 0.9)))
self.assert_jacobian_is_correct_fn(
glm.look_at_right_handed,
[camera_position_init, look_at_init, up_vector_init])
def test_look_at_right_handed_jacobian_random(self):
"""Tests the Jacobian of look_at_right_handed."""
tensor_size = np.random.randint(1, 3)
tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist()
camera_position_init = np.random.uniform(size=tensor_shape + [3])
look_at_init = np.random.uniform(size=tensor_shape + [3])
up_vector_init = np.random.uniform(size=tensor_shape + [3])
self.assert_jacobian_is_correct_fn(
glm.look_at_right_handed,
[camera_position_init, look_at_init, up_vector_init])
def test_model_to_eye_preset(self):
"""Tests that model_to_eye generates expected results.."""
point = ((2.0, 3.0, 4.0), (3.0, 4.0, 5.0))
camera_position = ((0.0, 0.0, 0.0), (0.1, 0.2, 0.3))
look_at = ((0.0, 0.0, 1.0), (0.4, 0.5, 0.6))
up_vector = ((0.0, 1.0, 0.0), (0.7, 0.8, 0.9))
pred = glm.model_to_eye(point, camera_position, look_at, up_vector)
gt = ((-2.0, 3.0, -4.0), (2.08616257e-07, 1.27279234, -6.58179379))
self.assertAllClose(pred, gt)
@parameterized.parameters(
((3,), (3,), (3,), (3,)),
((None, 3), (None, 3), (None, 3), (None, 3)),
((100, 3), (3,), (3,), (3,)),
((None, 1, 3), (None, 2, 3), (None, 2, 3), (None, 2, 3)),
)
def test_model_to_eye_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(glm.model_to_eye, shapes)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (2,), (3,), (3,), (3,)),
("must have exactly 3 dimensions in axis -1", (3,), (2,), (3,), (3,)),
("must have exactly 3 dimensions in axis -1", (3,), (3,), (2,), (3,)),
("must have exactly 3 dimensions in axis -1", (3,), (3,), (3,), (2,)),
("Not all batch dimensions are identical", (3,), (2, 3), (3, 3), (3, 3)),
("Not all batch dimensions are broadcast-compatible", (2, 3), (3, 3),
(3, 3), (3, 3)),
)
def test_model_to_eye_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(glm.model_to_eye, error_msg, shapes)
def test_model_to_eye_jacobian_preset(self):
"""Tests the Jacobian of model_to_eye."""
point_init = np.array(((2.0, 3.0, 4.0), (3.0, 4.0, 5.0)))
camera_position_init = np.array(((0.0, 0.0, 0.0), (0.1, 0.2, 0.3)))
look_at_init = np.array(((0.0, 0.0, 1.0), (0.4, 0.5, 0.6)))
up_vector_init = np.array(((0.0, 1.0, 0.0), (0.7, 0.8, 0.9)))
self.assert_jacobian_is_correct_fn(
glm.model_to_eye,
[point_init, camera_position_init, look_at_init, up_vector_init])
def test_model_to_eye_jacobian_random(self):
"""Tests the Jacobian of model_to_eye."""
tensor_size = np.random.randint(1, 3)
tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist()
point_init = np.random.uniform(size=tensor_shape + [3])
camera_position_init = np.random.uniform(size=tensor_shape + [3])
look_at_init = np.random.uniform(size=tensor_shape + [3])
up_vector_init = np.random.uniform(size=tensor_shape + [3])
self.assert_jacobian_is_correct_fn(
glm.model_to_eye,
[point_init, camera_position_init, look_at_init, up_vector_init])
def test_eye_to_clip_preset(self):
"""Tests that eye_to_clip generates expected results."""
point = ((2.0, 3.0, 4.0), (3.0, 4.0, 5.0))
vertical_field_of_view = ((60.0 * math.pi / 180.0,),
(50.0 * math.pi / 180.0,))
aspect_ratio = ((1.5,), (1.6,))
near_plane = ((1.0,), (2.0,))
far_plane = ((10.0,), (11.0,))
pred = glm.eye_to_clip(point, vertical_field_of_view, aspect_ratio,
near_plane, far_plane)
gt = ((2.30940104, 5.19615173, -7.11111116, -4.0), (4.02095032, 8.57802773,
-12.11111069, -5.0))
self.assertAllClose(pred, gt)
@parameterized.parameters(
((3,), (1,), (1,), (1,), (1,)),
((None, 3), (None, 1), (None, 1), (None, 1), (None, 1)),
((None, 5, 3), (None, 5, 1), (None, 5, 1), (None, 5, 1), (None, 5, 1)),
)
def test_eye_to_clip_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(glm.eye_to_clip, shapes)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (2,), (1,), (1,), (1,),
(1,)),
("must have exactly 1 dimensions in axis -1", (3,), (2,), (1,), (1,),
(1,)),
("must have exactly 1 dimensions in axis -1", (3,), (1,), (2,), (1,),
(1,)),
("must have exactly 1 dimensions in axis -1", (3,), (1,), (1,), (2,),
(1,)),
("must have exactly 1 dimensions in axis -1", (3,), (1,), (1,), (1,),
(2,)),
("Not all batch dimensions are broadcast-compatible", (3, 3), (2, 1),
(1,), (1,), (1,)),
)
def test_eye_to_clip_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(glm.eye_to_clip, error_msg, shapes)
def test_eye_to_clip_jacobian_preset(self):
"""Tests the Jacobian of eye_to_clip."""
point_init = np.array(((2.0, 3.0, 4.0), (3.0, 4.0, 5.0)))
vertical_field_of_view_init = np.array(
((60.0 * math.pi / 180.0,), (50.0 * math.pi / 180.0,)))
aspect_ratio_init = np.array(((1.5,), (1.6,)))
near_init = np.array(((1.0,), (2.0,)))
far_init = np.array(((10.0,), (11.0,)))
self.assert_jacobian_is_correct_fn(
glm.eye_to_clip, [
point_init, vertical_field_of_view_init, aspect_ratio_init,
near_init, far_init
],
atol=1e-5)
def test_eye_to_clip_jacobian_random(self):
"""Tests the Jacobian of eye_to_clip."""
tensor_size = np.random.randint(1, 3)
tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist()
point_init = np.random.uniform(size=tensor_shape + [3])
eps = np.finfo(np.float64).eps
vertical_field_of_view_init = np.random.uniform(
eps, math.pi - eps, size=tensor_shape + [1])
aspect_ratio_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1])
near_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1])
far_init = near_init + np.random.uniform(eps, 10.0, size=tensor_shape + [1])
self.assert_jacobian_is_correct_fn(
glm.eye_to_clip, [
point_init, vertical_field_of_view_init, aspect_ratio_init,
near_init, far_init
],
atol=5e-06)
def test_clip_to_ndc_preset(self):
"""Tests that clip_to_ndc generates expected results."""
point = ((4.0, 8.0, 16.0, 2.0), (4.0, 8.0, 16.0, 1.0))
pred = glm.clip_to_ndc(point)
gt = ((2.0, 4.0, 8.0), (4.0, 8.0, 16.0))
self.assertAllClose(pred, gt)
@parameterized.parameters(
((4,)),
((None, 4),),
((None, 5, 4),),
)
def test_clip_to_ndc_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(glm.clip_to_ndc, shapes)
def test_clip_to_ndc_exception_raised(self):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(
glm.clip_to_ndc, "must have exactly 4 dimensions in axis -1", ((2,),))
def test_clip_to_ndc_jacobian_preset(self):
"""Tests the Jacobian of clip_to_ndc."""
point_init = np.array(((4.0, 8.0, 16.0, 2.0), (4.0, 8.0, 16.0, 1.0)))
self.assert_jacobian_is_correct_fn(glm.clip_to_ndc, [point_init])
def test_clip_to_ndc_jacobian_random(self):
"""Tests the Jacobian of clip_to_ndc."""
tensor_size = np.random.randint(1, 3)
tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist()
point_init = np.random.uniform(size=tensor_shape + [4])
self.assert_jacobian_is_correct_fn(glm.clip_to_ndc, [point_init])
def test_ndc_to_screen_preset(self):
"""Tests that ndc_to_screen generates expected results."""
point = ((1.1, 2.2, 3.3), (5.1, 5.2, 5.3))
lower_left_corner = ((6.4, 4.8), (0.0, 0.0))
screen_dimensions = ((640.0, 480.0), (300.0, 400.0))
near = ((1.0,), (11.0,))
far = ((10.0,), (100.0,))
pred = glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near,
far)
gt = ((678.40002441, 772.79998779, 20.34999847), (915.0, 1240.0,
291.3500061))
self.assertAllClose(pred, gt)
@parameterized.parameters(
((3,), (2,), (2,), (1,), (1,)),
((None, 3), (None, 2), (None, 2), (None, 1), (None, 1)),
((None, 5, 3), (None, 5, 2), (None, 5, 2), (None, 5, 1), (None, 5, 1)),
)
def test_ndc_to_screen_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(glm.ndc_to_screen, shapes)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (2,), (2,), (2,), (1,),
(1,)),
("must have exactly 2 dimensions in axis -1", (3,), (1,), (2,), (1,),
(1,)),
("must have exactly 2 dimensions in axis -1", (3,), (2,), (3,), (1,),
(1,)),
("must have exactly 1 dimensions in axis -1", (3,), (2,), (2,), (2,),
(1,)),
("must have exactly 1 dimensions in axis -1", (3,), (2,), (2,), (1,),
(3,)),
("Not all batch dimensions are identical", (3,), (2, 2), (3, 2), (3, 1),
(3, 1)),
("Not all batch dimensions are broadcast-compatible", (4, 3), (3, 2),
(3, 2), (3, 1), (3, 1)),
)
def test_ndc_to_screen_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(glm.ndc_to_screen, error_msg, shapes)
def test_ndc_to_screen_exception_near_raised(self):
"""Tests that an exception is raised when `near` is not strictly positive."""
point = np.random.uniform(size=(3,))
lower_left_corner = np.random.uniform(size=(2,))
screen_dimensions = np.random.uniform(1.0, 2.0, size=(2,))
near = np.random.uniform(-1.0, 0.0, size=(1,))
far = np.random.uniform(1.0, 2.0, size=(1,))
with self.subTest("negative_near"):
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(
glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near,
far))
with self.subTest("zero_near"):
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(
glm.ndc_to_screen(point, lower_left_corner, screen_dimensions,
np.array((0.0,)), far))
def test_ndc_to_screen_exception_far_raised(self):
"""Tests that an exception is raised if `far` is not greater than `near`."""
point = np.random.uniform(size=(3,))
lower_left_corner = np.random.uniform(size=(2,))
screen_dimensions = np.random.uniform(1.0, 2.0, size=(2,))
near = np.random.uniform(1.0, 10.0, size=(1,))
far = near + np.random.uniform(-1.0, 0.0, size=(1,))
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(
glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near,
far))
def test_ndc_to_screen_exception_screen_dimensions_raised(self):
"""Tests that an exception is raised when `screen_dimensions` is not strictly positive."""
point = np.random.uniform(size=(3,))
lower_left_corner = np.random.uniform(size=(2,))
screen_dimensions = np.random.uniform(-1.0, 0.0, size=(2,))
near = np.random.uniform(1.0, 10.0, size=(1,))
far = near + np.random.uniform(0.1, 1.0, size=(1,))
with self.subTest("negative_screen_dimensions"):
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(
glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near,
far))
with self.subTest("zero_screen_dimensions"):
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(
glm.ndc_to_screen(point, lower_left_corner, np.array((0.0, 0.0)),
near, far))
def test_ndc_to_screen_jacobian_preset(self):
"""Tests the Jacobian of ndc_to_screen."""
point_init = np.array(((1.1, 2.2, 3.3), (5.1, 5.2, 5.3)))
lower_left_corner_init = np.array(((6.4, 4.8), (0.0, 0.0)))
screen_dimensions_init = np.array(((640.0, 480.0), (300.0, 400.0)))
near_init = np.array(((1.0,), (11.0,)))
far_init = np.array(((10.0,), (100.0,)))
self.assert_jacobian_is_correct_fn(glm.ndc_to_screen, [
point_init, lower_left_corner_init, screen_dimensions_init, near_init,
far_init
])
def test_ndc_to_screen_jacobian_random(self):
"""Tests the Jacobian of ndc_to_screen."""
tensor_size = np.random.randint(1, 3)
tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist()
point_init = np.random.uniform(size=tensor_shape + [3])
lower_left_corner_init = np.random.uniform(size=tensor_shape + [2])
screen_dimensions_init = np.random.uniform(
1.0, 1000.0, size=tensor_shape + [2])
near_init = np.random.uniform(1.0, 10.0, size=tensor_shape + [1])
far_init = near_init + np.random.uniform(0.1, 1.0, size=(1,))
self.assert_jacobian_is_correct_fn(glm.ndc_to_screen, [
point_init, lower_left_corner_init, screen_dimensions_init, near_init,
far_init
])
def test_model_to_screen_preset(self):
"""Tests that model_to_screen generates expected results."""
point_world_space = ((3.1, 4.1, 5.1), (-1.1, 2.2, -3.1))
camera_position = ((0.0, 0.0, 0.0), (0.4, -0.8, 0.1))
camera_up = ((0.0, 1.0, 0.0), (0.0, 0.0, 1.0))
look_at = ((0.0, 0.0, 1.0), (0.0, 1.0, 0.0))
vertical_field_of_view = ((60.0 * math.pi / 180.0,), (65 * math.pi / 180,))
lower_left_corner = ((0.0, 0.0), (10.0, 20.0))
screen_dimensions = ((501.0, 501.0), (400.0, 600.0))
near = ((0.01,), (1.0,))
far = ((4.0,), (3.0,))
pred_screen, pred_w = glm.model_to_screen(point_world_space,
camera_position, look_at,
camera_up, vertical_field_of_view,
screen_dimensions, near, far,
lower_left_corner)
gt_screen = ((-13.23016357, 599.30444336, 4.00215721),
(98.07017517, -95.40383911, 3.1234405))
gt_w = ((5.1,), (3.42247,))
self.assertAllClose(pred_screen, gt_screen, atol=1e-5, rtol=1e-5)
self.assertAllClose(pred_w, gt_w)
@parameterized.parameters(
((3,), (3,), (3,), (3,), (1,), (2,), (1,), (1,), (2,)),
((640, 480, 3), (3,), (3,), (3,), (1,), (2,), (1,), (1,), (2,)),
((None, 3), (None, 3), (None, 3), (None, 3), (None, 1), (None, 2),
(None, 1), (None, 1), (None, 2)),
((3,), (None, 1, 3), (None, 1, 3), (None, 1, 3), (None, 1, 1),
(None, 1, 2), (None, 1, 1), (None, 1, 1), (None, 1, 2)),
)
def test_model_to_screen_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(glm.model_to_screen, shapes)
@parameterized.parameters(
("point_model_space must have exactly 3 dimensions in axis -1", (1.0,),
(1.0, 1.0), (1.0,), (2.0,), (0.0, 0.0), (2,), (3,), (3,), (3,)),
("camera_position must have exactly 3 dimensions in axis -1", (1.0,),
(1.0, 1.0), (1.0,), (2.0,), (0.0, 0.0), (3,), (2,), (3,), (3,)),
("look_at must have exactly 3 dimensions in axis -1", (1.0,), (1.0, 1.0),
(1.0,), (2.0,), (0.0, 0.0), (3,), (3,), (2,), (3,)),
("up_vector must have exactly 3 dimensions in axis -1", (1.0,),
(1.0, 1.0), (1.0,), (2.0,), (0.0, 0.0), (3,), (3,), (3,), (2,)),
("vertical_field_of_view must have exactly 1 dimensions in axis -1",
(1.0, 1.0), (1.0, 1.0), (1.0,), (2.0,), (0.0, 0.0), (3,), (3,), (3,),
(3,)),
("screen_dimensions must have exactly 2 dimensions in axis -1", (1.0,),
(1.0,), (1.0,), (2.0,), (0.0, 0.0), (3,), (3,), (3,), (3,)),
("near must have exactly 1 dimensions in axis -1", (1.0,), (1.0, 1.0),
(1.0, 1.0), (2.0,), (0.0, 0.0), (3,), (3,), (3,), (3,)),
("far must have exactly 1 dimensions in axis -1", (1.0,), (1.0, 1.0),
(1.0,), (2.0, 2.0), (0.0, 0.0), (3,), (3,), (3,), (3,)),
("lower_left_corner must have exactly 2 dimensions in axis -1", (1.0,),
(1.0, 1.0), (1.0,), (2.0,), (0.0,), (3,), (3,), (3,), (3,)),
("Not all batch dimensions are broadcast-compatible", ((1.0,), (1.0,)),
((1.0, 1.0), (1.0, 1.0)), ((1.0,), (1.0,)), ((2.0,), (2.0,)),
((0.0, 0.0), (0.0, 0.0)), (5, 3), (2, 3), (2, 3), (2, 3)),
("Not all batch dimensions are identical", (1.0,), (1.0, 1.0), (1.0,),
(2.0,), (0.0, 0.0), (3,), (2, 3), (3,), (3,)),
("Not all batch dimensions are identical", (1.0,), (1.0, 1.0), (1.0,),
(2.0,), (0.0, 0.0), (3,), (3,), (2, 3), (3,)),
("Not all batch dimensions are identical", (1.0,), (1.0, 1.0), (1.0,),
(2.0,), (0.0, 0.0), (3,), (3,), (3,), (2, 3)),
("Not all batch dimensions are identical", ((1.0,),), (1.0, 1.0), (1.0,),
(2.0,), (0.0, 0.0), (3,), (3,), (3,), (3,)),
("Not all batch dimensions are identical", (1.0,), ((1.0, 1.0),), (1.0,),
(2.0,), (0.0, 0.0), (3,), (3,), (3,), (3,)),
("Not all batch dimensions are identical", (1.0,), (1.0, 1.0), ((1.0,),),
(2.0,), (0.0, 0.0), (3,), (3,), (3,), (3,)),
("Not all batch dimensions are identical", (1.0,), (1.0, 1.0), (1.0,),
((2.0,),), (0.0, 0.0), (3,), (3,), (3,), (3,)),
("Not all batch dimensions are identical", (1.0,), (1.0, 1.0), (1.0,),
(2.0,), ((0.0, 0.0),), (3,), (3,), (3,), (3,)),
)
def test_model_to_screen_exception_raised(self, error_msg,
vertical_field_of_view,
screen_dimensions, near, far,
lower_left_corner, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(
func=glm.model_to_screen,
error_msg=error_msg,
shapes=shapes,
vertical_field_of_view=vertical_field_of_view,
screen_dimensions=screen_dimensions,
near=near,
far=far,
lower_left_corner=lower_left_corner)
def test_model_to_screen_jacobian_preset(self):
"""Tests the Jacobian of model_to_screen."""
point_world_space_init = np.array(((3.1, 4.1, 5.1), (-1.1, 2.2, -3.1)))
camera_position_init = np.array(((0.0, 0.0, 0.0), (0.4, -0.8, 0.1)))
camera_up_init = np.array(((0.0, 1.0, 0.0), (0.0, 0.0, 1.0)))
look_at_init = np.array(((0.0, 0.0, 1.0), (0.0, 1.0, 0.0)))
vertical_field_of_view_init = np.array(
((60.0 * math.pi / 180.0,), (65 * math.pi / 180,)))
lower_left_corner_init = np.array(((0.0, 0.0), (10.0, 20.0)))
screen_dimensions_init = np.array(((501.0, 501.0), (400.0, 600.0)))
near_init = np.array(((0.01,), (1.0,)))
far_init = np.array(((4.0,), (3.0,)))
args = [
point_world_space_init, camera_position_init, look_at_init,
camera_up_init, vertical_field_of_view_init, screen_dimensions_init,
near_init, far_init, lower_left_corner_init
]
with self.subTest(name="jacobian_y_projection"):
self.assert_jacobian_is_correct_fn(
lambda *args: glm.model_to_screen(*args)[0], args)
with self.subTest(name="jacobian_w"):
partial_fn = functools.partial(
glm.model_to_screen, lower_left_corner=lower_left_corner_init)
self.assert_jacobian_is_correct_fn(lambda *args: partial_fn(*args)[1],
args[:-1])
def test_model_to_screen_jacobian_random(self):
"""Tests the Jacobian of model_to_screen."""
tensor_size = np.random.randint(1, 3)
tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist()
point_world_space_init = np.random.uniform(size=tensor_shape + [3])
camera_position_init = np.random.uniform(size=tensor_shape + [3])
camera_up_init = np.random.uniform(size=tensor_shape + [3])
look_at_init = np.random.uniform(size=tensor_shape + [3])
vertical_field_of_view_init = np.random.uniform(
0.1, 1.0, size=tensor_shape + [1])
lower_left_corner_init = np.random.uniform(size=tensor_shape + [2])
screen_dimensions_init = np.random.uniform(
0.1, 1.0, size=tensor_shape + [2])
near_init = np.random.uniform(0.1, 1.0, size=tensor_shape + [1])
far_init = near_init + np.random.uniform(0.1, 1.0, size=tensor_shape + [1])
args = [
point_world_space_init, camera_position_init, look_at_init,
camera_up_init, vertical_field_of_view_init, screen_dimensions_init,
near_init, far_init, lower_left_corner_init
]
with self.subTest(name="jacobian_y_projection"):
self.assert_jacobian_is_correct_fn(
lambda *args: glm.model_to_screen(*args)[0], args)
with self.subTest(name="jacobian_w"):
partial_fn = functools.partial(
glm.model_to_screen, lower_left_corner=lower_left_corner_init)
self.assert_jacobian_is_correct_fn(lambda *args: partial_fn(*args)[1],
args[:-1])
def test_perspective_correct_interpolation_preset(self):
"""Tests that perspective_correct_interpolation generates expected results."""
camera_origin = np.array((0.0, 0.0, 0.0))
camera_up = np.array((0.0, 1.0, 0.0))
look_at = np.array((0.0, 0.0, 1.0))
fov = np.array((90.0 * np.math.pi / 180.0,))
bottom_left = np.array((0.0, 0.0))
image_size = np.array((501.0, 501.0))
near_plane = np.array((0.01,))
far_plane = np.array((10.0,))
batch_size = np.random.randint(5)
triangle_x_y = np.random.uniform(-10.0, 10.0, (batch_size, 3, 2))
triangle_z = np.random.uniform(2.0, 10.0, (batch_size, 3, 1))
triangles = np.concatenate((triangle_x_y, triangle_z), axis=-1)
# Builds barycentric weights.
barycentric_weights = np.random.uniform(size=(batch_size, 3))
barycentric_weights = barycentric_weights / np.sum(
barycentric_weights, axis=-1, keepdims=True)
# Barycentric interpolation of vertex positions.
convex_combination = np.einsum("ba, bac -> bc", barycentric_weights,
triangles)
# Computes where those points project in screen coordinates.
pixel_position, _ = glm.model_to_screen(convex_combination, camera_origin,
look_at, camera_up, fov, image_size,
near_plane, far_plane, bottom_left)
# Builds attributes.
num_pixels = pixel_position.shape[0]
attribute_size = np.random.randint(10)
attributes = np.random.uniform(size=(num_pixels, 3, attribute_size))
prediction = glm.perspective_correct_interpolation(
triangles, attributes, pixel_position[..., 0:2], camera_origin, look_at,
camera_up, fov, image_size, near_plane, far_plane, bottom_left)
groundtruth = np.einsum("ba, bac -> bc", barycentric_weights, attributes)
self.assertAllClose(prediction, groundtruth)
@parameterized.parameters(
((500, 400, 3, 3), (3, 7), (2,), (3,), (3,), (3,), (1,), (2,), (1,), (1,),
(2,)),
((3, 3), (3, 7), (2,), (3,), (3,), (3,), (1,), (2,), (1,), (1,), (2,)),
((None, 3, 3), (None, 3, 7), (None, 2), (None, 3), (None, 3), (None, 3),
(None, 1), (None, 2), (None, 1), (None, 1), (None, 2)),
)
def test_perspective_correct_interpolation_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(glm.perspective_correct_interpolation,
shapes)
@parameterized.parameters(
("point_model_space must have exactly 3 dimensions in axis -1", (1.0,),
(1.0, 1.0), (1.0,), (2.0,), (0.0, 0.0), (3, 2), (3, 7), (2,), (3,), (3,),
(3,)),
("must have exactly 3 dimensions in axis -2", (1.0,), (1.0, 1.0), (1.0,),
(2.0,), (0.0, 0.0), (2, 3), (3, 7), (2,), (3,), (3,), (3,)),
("attribute must have exactly 3 dimensions in axis -2", (1.0,),
(1.0, 1.0), (1.0,), (2.0,), (0.0, 0.0), (3, 3), (2, 7), (2,), (3,), (3,),
(3,)),
("must have exactly 2 dimensions in axis -1", (1.0,), (1.0, 1.0), (1.0,),
(2.0,), (0.0, 0.0), (3, 3), (3, 7), (1,), (3,), (3,), (3,)),
("camera_position must have exactly 3 dimensions in axis -1", (1.0,),
(1.0, 1.0), (1.0,), (2.0,), (0.0, 0.0), (3, 3), (3, 7), (2,), (4,), (3,),
(3,)),
("look_at must have exactly 3 dimensions in axis -1", (1.0,), (1.0, 1.0),
(1.0,), (2.0,), (0.0, 0.0), (3, 3), (3, 7), (2,), (3,), (1,), (3,)),
("up_vector must have exactly 3 dimensions in axis -1", (1.0,),
(1.0, 1.0), (1.0,), (2.0,), (0.0, 0.0), (3, 3), (3, 7), (2,), (3,), (3,),
(2,)),
("vertical_field_of_view must have exactly 1 dimensions in axis -1",
(1.0, 1.0), (1.0, 1.0), (1.0,), (2.0,), (0.0, 0.0), (3, 3), (3, 7), (2,),
(3,), (3,), (3,)),
("screen_dimensions must have exactly 2 dimensions in axis -1", (1.0,),
(1.0,), (1.0,), (2.0,), (0.0, 0.0), (3, 3), (3, 7), (2,), (3,), (3,),
(3,)),
("near must have exactly 1 dimensions in axis -1", (1.0,), (1.0, 1.0),
(1.0, 1.0), (2.0,), (0.0, 0.0), (3, 3), (3, 7), (2,), (3,), (3,), (3,)),
("far must have exactly 1 dimensions in axis -1", (1.0,), (1.0, 1.0),
(1.0,), (2.0, 2.0), (0.0, 0.0), (3, 3), (3, 7), (2,), (3,), (3,), (3,)),
("lower_left_corner must have exactly 2 dimensions in axis -1", (1.0,),
(1.0, 1.0), (1.0,), (2.0,), (0.0,), (3, 3), (3, 7), (2,), (3,), (3,),
(3,)),
)
def test_perspective_correct_interpolation_exception_raised(
self, error_msg, vertical_field_of_view, screen_dimensions, near, far,
lower_left_corner, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(
func=glm.perspective_correct_interpolation,
error_msg=error_msg,
shapes=shapes,
vertical_field_of_view=vertical_field_of_view,
screen_dimensions=screen_dimensions,
near=near,
far=far,
lower_left_corner=lower_left_corner)
def test_perspective_correct_interpolation_jacobian_preset(self):
"""Tests the Jacobian of perspective_correct_interpolation."""
vertices_init = np.tile(
((-0.2857143, 0.2857143, 5.0), (0.2857143, 0.2857143, 0.5),
(0.0, -0.2857143, 1.0)), (2, 1, 1))
attributes_init = np.tile(
(((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))), (2, 1, 1))
pixel_position_init = np.array(((125.5, 375.5), (250.5, 250.5)))
camera_position_init = np.tile((0.0, 0.0, 0.0), (2, 3, 1))
look_at_init = np.tile((0.0, 0.0, 1.0), (2, 3, 1))
up_vector_init = np.tile((0.0, 1.0, 0.0), (2, 3, 1))
vertical_field_of_view_init = np.tile((1.0471975511965976,), (2, 3, 1))
screen_dimensions_init = np.tile((501.0, 501.0), (2, 3, 1))
near_init = np.tile((0.01,), (2, 3, 1))
far_init = np.tile((10.0,), (2, 3, 1))
lower_left_corner_init = np.tile((0.0, 0.0), (2, 3, 1))
self.assert_jacobian_is_correct_fn(glm.perspective_correct_interpolation, [
vertices_init, attributes_init, pixel_position_init,
camera_position_init, look_at_init, up_vector_init,
vertical_field_of_view_init, screen_dimensions_init, near_init,
far_init, lower_left_corner_init
])
def test_perspective_correct_interpolation_jacobian_random(self):
"""Tests the Jacobian of perspective_correct_interpolation."""
tensor_size = np.random.randint(1, 3)
tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist()
vertices_init = np.random.uniform(size=tensor_shape + [3, 3])
num_attributes = np.random.randint(1, 10)
attributes_init = np.random.uniform(size=tensor_shape + [3, num_attributes])
pixel_position_init = np.random.uniform(size=tensor_shape + [2])
camera_position_init = np.random.uniform(size=tensor_shape + [3, 3])
look_at_init = np.random.uniform(size=tensor_shape + [3, 3])
up_vector_init = np.random.uniform(size=tensor_shape + [3, 3])
vertical_field_of_view_init = np.random.uniform(
0.1, 1.0, size=tensor_shape + [3, 1])
screen_dimensions_init = np.random.uniform(
1.0, 10.0, size=tensor_shape + [3, 2])
near_init = np.random.uniform(1.0, 10.0, size=tensor_shape + [3, 1])
far_init = near_init + np.random.uniform(
0.1, 1.0, size=tensor_shape + [3, 1])
lower_left_corner_init = np.random.uniform(size=tensor_shape + [3, 2])
self.assert_jacobian_is_correct_fn(glm.perspective_correct_interpolation, [
vertices_init, attributes_init, pixel_position_init,
camera_position_init, look_at_init, up_vector_init,
vertical_field_of_view_init, screen_dimensions_init, near_init,
far_init, lower_left_corner_init
])
if __name__ == "__main__":
test_case.main()
| 46.677749
| 94
| 0.594598
| 5,783
| 36,502
| 3.549023
| 0.055508
| 0.035568
| 0.037566
| 0.032937
| 0.856558
| 0.813048
| 0.772169
| 0.716186
| 0.673553
| 0.622929
| 0
| 0.094886
| 0.214098
| 36,502
| 781
| 95
| 46.737516
| 0.62056
| 0.080927
| 0
| 0.423323
| 0
| 0
| 0.081217
| 0.004023
| 0
| 0
| 0
| 0
| 0.078275
| 1
| 0.070288
| false
| 0
| 0.015974
| 0
| 0.087859
| 0.001597
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4a65c999ba3027fe1616c4445a8c053ddc244f71
| 112
|
py
|
Python
|
python/testData/inspections/PyUnresolvedReferencesInspection3K/PreferImportedModuleOverNamespacePackage/a.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyUnresolvedReferencesInspection3K/PreferImportedModuleOverNamespacePackage/a.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyUnresolvedReferencesInspection3K/PreferImportedModuleOverNamespacePackage/a.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
import c
print(c.A().foo())
print(c.<warning descr="Cannot find reference 'b' in 'c.py'">b</warning>.A().foo())
| 28
| 83
| 0.642857
| 21
| 112
| 3.428571
| 0.619048
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 112
| 4
| 83
| 28
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0.309735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
4aa728600a94fa4e0f719b37efe33a9625f121b5
| 2,707
|
py
|
Python
|
tests/calculators/test_non_working_days_in_ago.py
|
FrappucinoGithub/school_meal_forecast_xgboost
|
dc2411765fb78f92c7f8e2af3b8afda88d58347e
|
[
"MIT"
] | 6
|
2020-12-15T09:31:02.000Z
|
2021-12-12T09:42:05.000Z
|
tests/calculators/test_non_working_days_in_ago.py
|
fBedecarrats/school_meal_forecast_xgboost
|
ebb10a8395b9b8158685953b030e664337cf20e0
|
[
"MIT"
] | 2
|
2021-12-12T09:57:38.000Z
|
2022-01-27T22:01:22.000Z
|
tests/calculators/test_non_working_days_in_ago.py
|
fBedecarrats/school_meal_forecast_xgboost
|
ebb10a8395b9b8158685953b030e664337cf20e0
|
[
"MIT"
] | 3
|
2021-02-25T07:49:31.000Z
|
2022-01-10T09:57:39.000Z
|
#!/usr/bin/python3
import unittest
import pandas as pd
import app.calculators as calc
class TestNonWorkingDaysInAgo(unittest.TestCase):
# pylint: disable=too-many-statements
def test_add_feature_non_working_days_in_ago(self):
dtf = pd.DataFrame({
'index_date': ["2017-09-04", "2017-09-05", "2019-05-07", "2019-05-08", "2019-07-15"],
'date_col': ["2017-09-04", "2017-09-05", "2019-05-07", "2019-05-08", "2019-07-15"]})
dtf.set_index('index_date', inplace=True)
train_dtf = calc.add_feature_non_working_days_in_ago(dtf.copy(), 'date_col', "%Y-%m-%d", "tests/data")
print(train_dtf)
self.assertTrue('nom_jour_ferie' in train_dtf)
self.assertTrue('non_working_in' in train_dtf)
self.assertTrue('non_working_ago' in train_dtf)
self.assertEqual(train_dtf.shape, (5, 4))
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "jour_ouvre"]['date_col'].iloc[0], '2017-09-04')
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "jour_ouvre"]['non_working_in'].iloc[0], 58)
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "jour_ouvre"]['non_working_ago'].iloc[0], 0)
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "jour_ouvre"]['date_col'].iloc[1], '2017-09-05')
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "jour_ouvre"]['non_working_in'].iloc[1], 57)
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "jour_ouvre"]['non_working_ago'].iloc[1], 0)
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "jour_ouvre"]['date_col'].iloc[2], '2019-05-07')
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "jour_ouvre"]['non_working_in'].iloc[2], 1)
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "jour_ouvre"]['non_working_ago'].iloc[2], 6)
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "jour_ouvre"]['date_col'].iloc[3], '2019-07-15')
# latest day in the dataset, thus next non_working_in cannot be computed
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "jour_ouvre"]['non_working_in'].iloc[3], 0)
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "jour_ouvre"]['non_working_ago'].iloc[3], 1)
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "Victoire des alliés"]['date_col'].iloc[0], '2019-05-08')
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "Victoire des alliés"]['non_working_in'].iloc[0], 0)
self.assertEqual(train_dtf[train_dtf['nom_jour_ferie'] == "Victoire des alliés"]['non_working_ago'].iloc[0], 0)
if __name__ == '__main__':
unittest.main()
| 58.847826
| 123
| 0.683413
| 411
| 2,707
| 4.167883
| 0.206813
| 0.168126
| 0.112084
| 0.214828
| 0.718039
| 0.716287
| 0.711033
| 0.637478
| 0.637478
| 0.637478
| 0
| 0.064322
| 0.13853
| 2,707
| 45
| 124
| 60.155556
| 0.67024
| 0.045807
| 0
| 0
| 0
| 0
| 0.320543
| 0
| 0
| 0
| 0
| 0
| 0.59375
| 1
| 0.03125
| false
| 0
| 0.09375
| 0
| 0.15625
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4376d914ca001b7bf891fdff8239c6c45bbf171b
| 18,066
|
py
|
Python
|
dl4s/CGRNN/CGRNN.py
|
liu2231665/Project-dl4s
|
615d504caf6f05b676be1c25621d2dd94e41ec54
|
[
"MIT"
] | null | null | null |
dl4s/CGRNN/CGRNN.py
|
liu2231665/Project-dl4s
|
615d504caf6f05b676be1c25621d2dd94e41ec54
|
[
"MIT"
] | null | null | null |
dl4s/CGRNN/CGRNN.py
|
liu2231665/Project-dl4s
|
615d504caf6f05b676be1c25621d2dd94e41ec54
|
[
"MIT"
] | null | null | null |
"""#########################################################################
Author: Yingru Liu
Institute: Stony Brook University
Descriptions: the file contains the model description of CGRNN.
----2017.11.15
#########################################################################"""
from .utility import configCGRNN, CGCell
from dl4s.cores.tools import BernoulliNLL
from dl4s.cores.model import _model
import tensorflow as tf
import numpy as np
"""#########################################################################
Class: _CGRNN - the hyper abstraction of the CGRNN.
#########################################################################"""
class _CGRNN(_model, object):
"""#########################################################################
__init__:the initialization function.
input: Config - configuration class in ./utility.
output: None.
#########################################################################"""
def __init__(
self,
config=configCGRNN()
):
# Check the froward recurrent dimension configuration.
if config.dimRec == []:
raise (ValueError('The forward recurrent structure is empty!'))
super().__init__(config)
with self._graph.as_default():
# <scalar> the steps of Gibbs sampling.
self._gibbs = config.Gibbs
# <scalar> the number of samples of AIS.
self._aisRun = config.aisRun
# <scalar> the number of intermediate proposal distributions of AIS.
self._aisLevel = config.aisLevel
# <scalar> dimensions of input frame.
self._dimInput = config.dimIN
# <scalar> dimensions of stochastic states.
self._dimState = config.dimState
# <scalar list> the size of forward recurrent hidden layers.
self._dimRec = config.dimRec
# <scalar list> the size of feed-forward hidden layers.
self._dimMlp = config.dimMlp
# <string> the mode.
self._mode = config.mode
self.VAE = None
"""#########################################################################
Class: binCGRNN - the CGRNN mode for binary input.
#########################################################################"""
class binCGRNN(_CGRNN, object):
"""#########################################################################
__init__:the initialization function.
input: Config - configuration class in ./utility.
VAE - if a well trained VAE is provided. Using NVIL to estimate the
upper bound of the partition function.
output: None.
#########################################################################"""
def __init__(
self,
config=configCGRNN(),
VAE=None
):
super().__init__(config)
"""build the graph"""
with self._graph.as_default():
self.Cell = CGCell(config, inputType='binary')
state = self.Cell.zero_state(tf.shape(self.x)[0], dtype=tf.float32)
(self.newV, self.newH, self.newS, self.muV, self.muH, self.muS, bvt, bht), _ = \
tf.nn.dynamic_rnn(self.Cell, self.x, initial_state=state)
# update the RBM's bias with bvt & bht.
self.Cell.RBM._bh = bht
self.Cell.RBM._bv = bvt
# one step sample.
muV0, muH0, muS0 = self.Cell.RBM.GibbsSampling(self.x, k=1)[-3:]
# add the tensor computation of extracted feature.
self._outputs = muV0
self._feature = muH0
self._sparse_feature = muH0 * muS0
# the training loss is per bits.
Loss = self.Cell.RBM.ComputeLoss(V=self.x, samplesteps=config.Gibbs)
self._loss = BernoulliNLL(self.x, self.muV)
self._params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self._train_step = self._optimizer.minimize(Loss)
# Define the components to evaluate the partition function by whether NVIL or AIS.
if VAE is None:
self._logZ = self.Cell.RBM.AIS(self._aisRun, self._aisLevel,
tf.shape(self.x)[0], tf.shape(self.x)[1])
self._nll = tf.reduce_mean(self.Cell.RBM.FreeEnergy(self.x) + self._logZ)
#self._nll = self._logZ
#self._nll = self.Cell.RBM.FreeEnergy(self.x)
self.VAE = VAE
else:
self._logZ = self._NVIL_VAE(VAE) # X, logPz_X, logPx_Z, logPz, VAE.x
self.xx = tf.placeholder(dtype='float32', shape=[None, None, None, config.dimIN])
self.FEofSample = self.Cell.RBM.FreeEnergy(self.xx)
self.FEofInput = self.Cell.RBM.FreeEnergy(self.x)
self.VAE = VAE
"""define the process to generate samples."""
state = self.Cell.zero_state(1, dtype=tf.float32)
x_ = tf.zeros((1, self._dimInput), dtype='float32')
# TensorArray to save the output of the generating.
gen_operator = tf.TensorArray(tf.float32, self.sampleLen)
# condition and body of while loop (input: i-iteration, xx-RNN input, ss-RNN state)
i = tf.constant(0)
cond = lambda i, xx, ss, array: tf.less(i, self.sampleLen)
#
def body(i, xx, ss, array):
ii = i + 1
(new_xx, _, _, _, _, _, _, _), new_ss = self.Cell(xx, ss, gibbs=1)
new_array = array.write(i, new_xx)
return ii, new_xx, new_ss, new_array
gen_operator = tf.while_loop(cond, body, [i, x_, state, gen_operator])[-1]
self._gen_operator = gen_operator.concat()
#
self._runSession()
"""#########################################################################
_NVIL_VAE: generate the graph to compute the NVIL upper bound of log Partition
function by a well-trained VAE.
input: VAE - the well-trained VAE(SRNN/VRNN).
output: the upper boundLogZ.
#########################################################################"""
def _NVIL_VAE(self, VAE):
# get the marginal and conditional distribution of the VAE.
probs = VAE._dec
Px_Z = tf.distributions.Bernoulli(probs=probs, dtype=tf.float32)
mu, std = VAE._enc
Pz_X = tf.distributions.Normal(loc=mu, scale=std)
mu, std = VAE._prior
Pz = tf.distributions.Normal(loc=mu, scale=std)
# generate the samples.
X = Px_Z.sample()
logPz_X = tf.reduce_sum(Pz_X.log_prob(VAE._Z), axis=[-1]) # shape = [batch, steps]
# logPx_Z = tf.reduce_prod(Px_Z.log_prob(X), axis=[-1])
logPx_Z = tf.reduce_sum(
(1 - X) * tf.log(tf.maximum(tf.minimum(1.0, 1 - probs), 1e-32))
+ X * tf.log(tf.maximum(tf.minimum(1.0, probs), 1e-32)),
axis=[-1]) # shape = [runs, batch, steps]
logPz = tf.reduce_sum(Pz.log_prob(VAE._Z), axis=[-1])
return X, logPz_X, logPx_Z, logPz, VAE.x
"""#########################################################################
ais_function: compute the approximated negative log-likelihood with partition
function computed by annealed importance sampling.
input: input - numerical input.
output: the negative log-likelihood value.
#########################################################################"""
def ais_function(self, input):
with self._graph.as_default():
if self.VAE is None:
loss_value = self._sess.run(self._nll, feed_dict={self.x: input})
else:
loss_value = []
X = []
logPz_X = []
logPx_Z = []
logPz = []
for i in range(self._aisRun):
Xi, logPz_Xi, logPx_Zi, logPzi = self.VAE._sess.run(self._logZ[0:-1], feed_dict={self._logZ[-1]: input})
X.append(Xi)
logPz_X.append(logPz_Xi)
logPx_Z.append(np.nan_to_num(logPx_Zi))
logPz.append(logPzi)
# shape = [runs, batch, steps]
X = np.asarray(X, dtype=np.float64)
logPz_X = np.asarray(logPz_X, dtype=np.float64)
logPx_Z = np.asarray(logPx_Z, dtype=np.float64)
logPz = np.asarray(logPz, dtype=np.float64)
FEofSample = self._sess.run(self.FEofSample, feed_dict={self.xx: X, self.x: input})
FEofSample = np.cast[np.float64](FEofSample)
logTerm = 2 * (-FEofSample + logPz_X - logPx_Z - logPz) / 1000 #self._dimInput
r_ais = np.mean(np.exp(logTerm), axis=0)
logZ = 0.5 * (np.log(r_ais+1e-38))
FEofInput = self._sess.run(self.FEofInput, feed_dict={self.x: input})
FEofInput = np.cast[np.float64](FEofInput)
loss_value.append(np.mean(FEofInput + logZ * 1000))#self._dimInput))
loss_value = np.asarray(loss_value).mean()
return loss_value
# TODO"
"""#########################################################################
Class: gaussCGRNN - the CGRNN mode for continuous input.
#########################################################################"""
class gaussCGRNN(_CGRNN, object):
"""#########################################################################
__init__:the initialization function.
input: Config - configuration class in ./utility.
VAE - if a well trained VAE is provided. Using NVIL to estimate the
upper bound of the partition function.
output: None.
#########################################################################"""
def __init__(
self,
config,
VAE=None
):
super().__init__(config)
"""build the graph"""
with self._graph.as_default():
self.Cell = CGCell(config, inputType='continuous')
state = self.Cell.zero_state(tf.shape(self.x)[0], dtype=tf.float32)
(self.newV, self.newH, self.newS, self.muV, self.muH, self.muS,
self.bvt, self.bht, self.gamma), _ = tf.nn.dynamic_rnn(self.Cell, self.x, initial_state=state)
# update the RBM's bias with bvt & bht, gamma.
self.Cell.RBM._bh = self.bht
self.Cell.RBM._bv = self.bvt
self.Cell.RBM._gamma = self.gamma
# one step sample.
muV0, muH0, muS0 = self.Cell.RBM.GibbsSampling(self.x, k=1)[-3:]
# add the tensor computation of extracted feature.
self._outputs = muV0
self._feature = muH0
self._sparse_feature = muH0 * muS0
# the training loss is per frame.
Loss = self.Cell.RBM.ComputeLoss(V=self.x, samplesteps=config.Gibbs)
# define the monitor.
monitor = tf.reduce_sum((self.x - self.muV) ** 2, axis=-1)
self._loss = tf.sqrt(tf.reduce_mean(monitor))
self._params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self._train_step = self._optimizer.minimize(Loss)
# add the computation of precision and covariance matrix of ssRBM.
newH = tf.expand_dims(self.newH, axis=2)
W = tf.expand_dims(tf.expand_dims(self.Cell.RBM._W, axis=0), axis=0)
term1 = newH * W / (self.Cell.RBM._alpha + 1e-8)
term1 = tf.tensordot(term1, self.Cell.RBM._W, [[-1], [-1]])
Cv_sh = 1 / (tf.expand_dims(self.Cell.RBM._gamma, axis=2) + tf.tensordot(newH, self.Cell.RBM._phi, [[-1], [0]]) + 1e-8)
term2 = Cv_sh * tf.eye(self._dimInput, batch_shape=[1, 1])
self.PreV_h = term2 + term1
self.CovV_h = tf.matrix_inverse(self.PreV_h)
#
if VAE is None:
self._logZ = self.Cell.RBM.AIS(self._aisRun, self._aisLevel,
tf.shape(self.x)[0], tf.shape(self.x)[1])
self._nll = tf.reduce_mean(self.Cell.RBM.FreeEnergy(self.x) + self._logZ)
self.VAE = VAE
else:
self._logZ = self._NVIL_VAE(VAE) # X, logPz_X, logPx_Z, logPz, VAE.x
self.xx = tf.placeholder(dtype='float32', shape=[None, None, None, config.dimIN])
self.FEofSample = self.Cell.RBM.FreeEnergy(self.xx)
self.FEofInput = self.Cell.RBM.FreeEnergy(self.x)
self.VAE = VAE
"""define the process to generate samples."""
state = self.Cell.zero_state(1, dtype=tf.float32)
x_ = tf.zeros((1, self._dimInput), dtype='float32')
# TensorArray to save the output of the generating.
gen_operator = tf.TensorArray(tf.float32, self.sampleLen)
# condition and body of while loop (input: i-iteration, xx-RNN input, ss-RNN state)
i = tf.constant(0)
cond = lambda i, xx, ss, array: tf.less(i, self.sampleLen)
#
def body(i, xx, ss, array):
ii = i + 1
(new_xx, _, _, _, _, _, _, _, _), new_ss = self.Cell(xx, ss, gibbs=1)
new_array = array.write(i, new_xx)
return ii, new_xx, new_ss, new_array
gen_operator = tf.while_loop(cond, body, [i, x_, state, gen_operator])[-1]
self._gen_operator = gen_operator.concat()
#
self._runSession()
"""#########################################################################
ais_function: compute the approximated negative log-likelihood with partition
function computed by annealed importance sampling.
input: input - numerical input.
output: the negative log-likelihood value.
#########################################################################"""
def ais_function(self, input):
with self._graph.as_default():
if self.VAE is None:
loss_value = self._sess.run(self._nll, feed_dict={self.x: input})
else:
loss_value = []
X = []
logPz_X = []
logPx_Z = []
logPz = []
for i in range(self._aisRun):
Xi, logPz_Xi, logPx_Zi, logPzi = self.VAE._sess.run(self._logZ[0:-1],
feed_dict={self._logZ[-1]: input})
X.append(Xi)
logPz_X.append(np.nan_to_num(logPz_Xi))
logPx_Z.append(np.nan_to_num(logPx_Zi))
logPz.append(np.nan_to_num(logPzi))
# shape = [runs, batch, steps]
X = np.asarray(X, dtype=np.float64)
logPz_X = np.asarray(logPz_X, dtype=np.float64)
logPx_Z = np.asarray(logPx_Z, dtype=np.float64)
logPz = np.asarray(logPz, dtype=np.float64)
FEofSample = self._sess.run(self.FEofSample, feed_dict={self.xx: X, self.x: input})
FEofSample = np.cast[np.float64](FEofSample)
logTerm = 2 * (-FEofSample + logPz_X - logPx_Z - logPz) / 1000 # self._dimInput
r_ais = np.mean(np.exp(logTerm), axis=0)
logZ = 0.5 * (np.log(r_ais + 1e-38))
FEofInput = self._sess.run(self.FEofInput, feed_dict={self.x: input})
FEofInput = np.cast[np.float64](FEofInput)
loss_value.append(np.mean(FEofInput + logZ * 1000)) # self._dimInput))
loss_value = np.asarray(loss_value).mean()
return loss_value
"""#########################################################################
_NVIL_VAE: generate the graph to compute the NVIL upper bound of log Partition
function by a well-trained VAE.
input: VAE - the well-trained VAE(SRNN/VRNN).
output: the upper boundLogZ.
#########################################################################"""
def _NVIL_VAE(self, VAE):
# get the marginal and conditional distribution of the VAE.
mu, std = VAE._dec
Px_Z = tf.distributions.Normal(loc=mu, scale=std)
mu1, std1 = VAE._enc
Pz_X = tf.distributions.Normal(loc=mu1, scale=std1)
mu, std = VAE._prior
Pz = tf.distributions.Normal(loc=mu, scale=std)
# generate the samples.
X = Px_Z.sample()
logPz_X = tf.reduce_sum(Pz_X.log_prob(VAE._Z), axis=[-1]) # shape = [batch, steps]
logPx_Z = tf.reduce_sum(Px_Z.log_prob(X), axis=[-1])
logPz = tf.reduce_sum(Pz.log_prob(VAE._Z), axis=[-1])
return X, logPz_X, logPx_Z, logPz, VAE.x
"""#########################################################################
cov_function: compute the covariance matrix Cv_h.
input: input - numerical input.
output: covariance matrix Cv_h.
#########################################################################"""
def cov_function(self, input):
with self._graph.as_default():
return self._sess.run(self.CovV_h, feed_dict={self.x: input})
"""#########################################################################
pre_function: compute the precision matrix Cv_h^{-1}.
input: input - numerical input.
output: precision matrix Cv_h^{-1}.
#########################################################################"""
def pre_function(self, input):
with self._graph.as_default():
return self._sess.run(self.PreV_h, feed_dict={self.x: input})
| 52.517442
| 132
| 0.496457
| 2,014
| 18,066
| 4.282522
| 0.149454
| 0.030609
| 0.029333
| 0.01113
| 0.791768
| 0.763014
| 0.749449
| 0.734957
| 0.727304
| 0.718261
| 0
| 0.013857
| 0.296967
| 18,066
| 343
| 133
| 52.670554
| 0.665223
| 0.163954
| 0
| 0.680365
| 0
| 0
| 0.007216
| 0
| 0
| 0
| 0
| 0.002915
| 0
| 1
| 0.050228
| false
| 0
| 0.022831
| 0
| 0.123288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
438c7596524513b6f5670e8c76aee46370387b73
| 44
|
py
|
Python
|
application/flicket_admin/scripts/__init__.py
|
abbas0001/flicket
|
547a5e783cccf157d10df88608440aa2919d7e7b
|
[
"MIT"
] | null | null | null |
application/flicket_admin/scripts/__init__.py
|
abbas0001/flicket
|
547a5e783cccf157d10df88608440aa2919d7e7b
|
[
"MIT"
] | null | null | null |
application/flicket_admin/scripts/__init__.py
|
abbas0001/flicket
|
547a5e783cccf157d10df88608440aa2919d7e7b
|
[
"MIT"
] | null | null | null |
#! usr/bin/python3
# -*- coding: utf8 -*-
#
| 11
| 22
| 0.522727
| 5
| 44
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.181818
| 44
| 3
| 23
| 14.666667
| 0.583333
| 0.863636
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
43b031f1eca4348d81d92e84e0d56dd356e8d071
| 52
|
py
|
Python
|
demo3.py
|
xinbaolai/we-are-a-team
|
27c8f55e85171a984fb1d86519f59889a065b05f
|
[
"Apache-2.0"
] | null | null | null |
demo3.py
|
xinbaolai/we-are-a-team
|
27c8f55e85171a984fb1d86519f59889a065b05f
|
[
"Apache-2.0"
] | null | null | null |
demo3.py
|
xinbaolai/we-are-a-team
|
27c8f55e85171a984fb1d86519f59889a065b05f
|
[
"Apache-2.0"
] | null | null | null |
print("aaaaaaaaaaa")
# aaaaaaaaaaaaaaaaaaaaaaaaaaaaa
| 26
| 31
| 0.865385
| 3
| 52
| 15
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 52
| 2
| 31
| 26
| 0.9
| 0.557692
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
43b2bb59051229391be6c756bbab9a9435d21340
| 61
|
py
|
Python
|
ex/24 (14).py
|
Time2003/lr7
|
b47edaf11ced014022764b6c5edef34e4c107c0b
|
[
"MIT"
] | null | null | null |
ex/24 (14).py
|
Time2003/lr7
|
b47edaf11ced014022764b6c5edef34e4c107c0b
|
[
"MIT"
] | null | null | null |
ex/24 (14).py
|
Time2003/lr7
|
b47edaf11ced014022764b6c5edef34e4c107c0b
|
[
"MIT"
] | null | null | null |
list_1 = [1, 2, 3]
list_2 = [4, 5, 6]
печать(list_1 + list_2)
| 20.333333
| 23
| 0.590164
| 15
| 61
| 2.133333
| 0.533333
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204082
| 0.196721
| 61
| 3
| 23
| 20.333333
| 0.44898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
43c1e1436a273de9761fb418a69acb4083f66610
| 48
|
py
|
Python
|
tests/admin_scripts/custom_templates/project_template/additional_dir/additional_file.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 61,676
|
2015-01-01T00:05:13.000Z
|
2022-03-31T20:37:54.000Z
|
tests/admin_scripts/custom_templates/project_template/additional_dir/additional_file.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 8,884
|
2015-01-01T00:12:05.000Z
|
2022-03-31T19:53:11.000Z
|
tests/admin_scripts/custom_templates/project_template/additional_dir/additional_file.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 33,143
|
2015-01-01T02:04:52.000Z
|
2022-03-31T19:42:46.000Z
|
# some file for {{ project_name }} test project
| 24
| 47
| 0.708333
| 7
| 48
| 4.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 48
| 1
| 48
| 48
| 0.846154
| 0.9375
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
43d74a652bd2a097c24aea08f097fbb7b43e735e
| 68,535
|
py
|
Python
|
UGI/UGInfos_Latin.py
|
protimient/Glyphs-Scripts
|
0481ea01153844667cff8cfa3fad97c33af09956
|
[
"Apache-2.0"
] | 2
|
2021-02-12T20:36:29.000Z
|
2021-11-03T08:04:01.000Z
|
UGI/UGInfos_Latin.py
|
protimient/Glyphs-Scripts
|
0481ea01153844667cff8cfa3fad97c33af09956
|
[
"Apache-2.0"
] | null | null | null |
UGI/UGInfos_Latin.py
|
protimient/Glyphs-Scripts
|
0481ea01153844667cff8cfa3fad97c33af09956
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from unifiedglyphinfo import CollectedGlyphInfos, xpos, ypos
def collect_infos(infos_dict):
return infos_dict.update(ugi.unified_infos)
ugi = CollectedGlyphInfos()
x = ugi('A')
x.addAnchor('ogonek', position_x=xpos.outline_right, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.stem_top_center, position_y=ypos.capHeight)
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x.addRecipe('V flip_horizontal flip_vertical')
x = ugi('AE')
x.addAnchor('top', position_x=xpos.stem_top_center, position_y=ypos.capHeight)
x.addKerning(left='AE', right='E')
x.addMetrics(left='AE', right='E')
x.addRecipe('A decompose', 'E decompose')
x = ugi('AEacute')
x.addKerning(left='AE', right='E')
x.addMetrics(left='AE', right='E')
x = ugi('Aacute')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x = ugi('Abreve')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x = ugi('Abreveacute')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x.addRecipe('A', 'brevecomb_acutecomb')
x = ugi('Abrevedotbelow')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x.addRecipe('A', 'brevecomb', 'dotbelowcomb')
x = ugi('Abrevegrave')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x.addRecipe('A', 'brevecomb_gravecomb')
x = ugi('Abrevehookabove')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x.addRecipe('A', 'brevecomb_hookabovecomb')
x = ugi('Abrevetilde')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x.addRecipe('A', 'brevecomb_tildecomb')
x = ugi('Acaron')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x = ugi('Acircumflex')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x = ugi('Acircumflexacute')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x.addRecipe('A', 'circumflexcomb_acutecomb')
x = ugi('Acircumflexdotbelow')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x.addRecipe('A', 'circumflexcomb', 'dotbelowcomb')
x = ugi('Acircumflexgrave')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x.addRecipe('A', 'circumflexcomb_gravecomb')
x = ugi('Acircumflexhookabove')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x.addRecipe('A', 'circumflexcomb_hookabovecomb')
x = ugi('Acircumflextilde')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x.addRecipe('A', 'circumflexcomb_tildecomb')
x = ugi('Adieresis')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x = ugi('Adotbelow')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x = ugi('Agrave')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x = ugi('Ahookabove')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x = ugi('Alpha')
x.addKerning(left='O', right='H')
x = ugi('Alpha-latin')
x.addRecipe('D flip_vertical flip_horizontal decompose', 'I decompose')
x = ugi('Alphaturned-latin')
x.addRecipe('Alpha-latin flip_vertical flip_horizontal')
x = ugi('Amacron')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x = ugi('Aogonek')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x = ugi('Aring')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x = ugi('Aringacute')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x.addRecipe('A', 'ringcomb', 'acutecomb')
x = ugi('Atilde')
x.addKerning(left='A', right='A')
x.addMetrics(left='A', right='A')
x = ugi('Aturned')
x.addKerning(left='V', right='V')
x.addRecipe('A flip_horizontal flip_vertical')
x = ugi('B')
x.addKerning(left='H', right='B')
x.addMetrics(left='H', right='B')
x = ugi('Bhook')
x.addKerning(left='Bhook', right='O')
x.addRecipe('B', '_part.Hookleft')
x = ugi('Bsmall')
x.addKerning(left='n', right='Bsmall')
x.addRecipe('ve-cy')
x = ugi('C')
x.addAnchor('bottom', position_x=xpos.apex_bottom)
x.addAnchor('top', position_x=xpos.apex_top, position_y=ypos.capHeight)
x.addKerning(left='O', right='C')
x.addMetrics(left='O', right='C')
x = ugi('Cacute')
x.addKerning(left='O', right='C')
x.addMetrics(left='C', right='C')
x = ugi('Ccaron')
x.addKerning(left='O', right='C')
x.addMetrics(left='C', right='C')
x = ugi('Ccedilla')
x.addKerning(left='O', right='C')
x.addMetrics(left='C', right='C')
x = ugi('Ccircumflex')
x.addKerning(left='O', right='C')
x.addMetrics(left='C', right='C')
x = ugi('Cdotaccent')
x.addKerning(left='O', right='C')
x.addMetrics(left='C', right='C')
x = ugi('Chook')
x.addKerning(left='O', right='C')
x.addRecipe('C', '_part.Hook')
x = ugi('D')
x.addKerning(left='H', right='O')
x.addMetrics(left='H', right='O')
x = ugi('Dafrican')
x.addKerning(left='H', right='O')
x.addRecipe('Eth')
x = ugi('Dcaron')
x.addKerning(left='H', right='O')
x.addMetrics(left='D', right='D')
x = ugi('Dcroat')
x.addKerning(left='Eth', right='O')
x.addMetrics(left='Eth', right='D')
x.addRecipe('Eth')
x = ugi('Dhook')
x.addKerning(left='Bhook', right='O')
x.addRecipe('D', '_part.Hookleft')
x = ugi('Dsmall')
x.addKerning(left='n', right='o')
x.addMetrics(left='n', right='o')
x = ugi('E')
x.addAnchor('bottom', position_x=xpos.stem_bottom_center, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.capHeight)
x.addKerning(left='H', right='E')
x.addMetrics(left='H', right='E')
x.addRecipe('_part.stem', '_part.bar', '_part.bar', '_part.bar')
x = ugi('Eacute')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', right='E')
x = ugi('Ebreve')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', right='E')
x = ugi('Ecaron')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', right='E')
x = ugi('Ecircumflex')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', width='E')
x = ugi('Ecircumflexacute')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', width='E')
x.addRecipe('E', 'circumflexcomb_acutecomb')
x = ugi('Ecircumflexdotbelow')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', width='E')
x.addRecipe('E', 'circumflexcomb', 'dotbelowcomb')
x = ugi('Ecircumflexgrave')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', width='E')
x.addRecipe('E', 'circumflexcomb_gravecomb')
x = ugi('Ecircumflexhookabove')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', width='E')
x.addRecipe('E', 'circumflexcomb_hookabovecomb')
x = ugi('Ecircumflextilde')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', width='E')
x.addRecipe('E', 'circumflexcomb_tildecomb')
x = ugi('Edieresis')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', width='E')
x = ugi('Edotaccent')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', width='E')
x = ugi('Edotbelow')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', width='E')
x = ugi('Egrave')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', width='E')
x = ugi('Ehookabove')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', width='E')
x = ugi('Emacron')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', width='E')
x = ugi('Eng')
x.addKerning(left='H', right='N')
x.addMetrics(left='H', right='N')
x.addRecipe('jdotless decompose', 'N decompose')
x = ugi('Eogonek')
x.addKerning(left='H', right='E')
x.addMetrics(left='E', right='E')
x = ugi('Eopen')
x.addKerning(left='S', right='C')
x.addRecipe('Ze-cy flip_horizontal decompose')
x = ugi('EreversedOpen')
x.addKerning(left='S', right='B')
x.addRecipe('Ze-cy')
x = ugi('Esh')
x.addKerning(left='X', right='E')
x.addRecipe('Sigma')
x = ugi('Eth')
x.addBuildString(u'önghljóðuðust')
x.addKerning(left='Eth', right='O')
x.addMetrics(left='Eth', right='D')
x.addRecipe('D', '_part.bar')
x.addRecipe('D', 'macroncomb decompose')
x = ugi('Etilde')
x.addKerning(left='H', right='E')
x.addMetrics(left='H', right='E')
x = ugi('Ezh')
x.addKerning(left='Ezh', right='Germandbls')
x.addRecipe('ezh decompose')
x = ugi('F')
x.addAnchor('#bar', position_x=xpos.stem_bottom_center, position_y=ypos.height_25)
x.addAnchor('bottom', position_x=xpos.stem_bottom_center, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.capHeight)
x.addKerning(left='H', right='F')
x.addMetrics(left='H', right='F')
x.addRecipe('E decompose')
x = ugi('G')
x.addAnchor('bottom', position_x=xpos.apex_bottom)
x.addAnchor('top', position_x=xpos.apex_top, position_y=ypos.capHeight)
x.addKerning(left='O', right='G')
x.addMetrics(left='O', right='G')
x = ugi('Gacute')
x.addKerning(left='O', right='G')
x = ugi('Gammaafrican')
x.addKerning(left='V', right='V')
x.addRecipe('gamma-latin decompose')
x = ugi('Gbreve')
x.addKerning(left='O', right='G')
x.addMetrics(left='G', right='G')
x = ugi('Gcircumflex')
x.addKerning(left='O', right='G')
x.addMetrics(left='G', right='G')
x = ugi('Gcommaaccent')
x.addKerning(left='O', right='G')
x.addMetrics(left='G', right='G')
x = ugi('Gdotaccent')
x.addKerning(left='O', right='G')
x.addMetrics(left='G', right='G')
x = ugi('Germandbls')
x.addKerning(left='Germandbls', right='Germandbls')
x.addMetrics(left='Germandbls', right='Germandbls')
x.addRecipe('I decompose', 'S decompose')
x = ugi('Ghook')
x.addKerning(left='O', right='G')
x.addRecipe('G', '_part.hook')
x = ugi('Glottalstop')
x.addRecipe('glottalstop decompose')
x = ugi('Gscript')
x.addKerning(left='O', right='H')
x.addRecipe('Alpha-latin decompose', 'gsingle decompose')
x = ugi('Gsmall')
x.addKerning(left='o', right='Gsmall')
x = ugi('Gsmallhook')
x.addKerning(left='o', right='dhook')
x.addMetrics(left='Gsmall', right='dhook')
x = ugi('H')
x.addAnchor('#bar', position_x=xpos.outline_center, position_y=ypos.height_75)
x.addKerning(left='H', right='H')
x.addMetrics(left='H', right='H')
x.addRecipe('_part.stem', '_part.bar', '_part.stem')
x = ugi('Hbar')
x.addKerning(left='H', right='H')
x.addMetrics(left='H', right='H')
x.addRecipe('H', '_part.bar')
x.addRecipe('H', 'macroncomb decompose')
x = ugi('Hcircumflex')
x.addKerning(left='H', right='H')
x.addMetrics(left='H', right='H')
x = ugi('Hhook')
x.addKerning(left='Bhook', right='H')
x.addRecipe('H', '_part.Hookleft')
x = ugi('Hsmall')
x.addKerning(left='n', right='u')
x.addRecipe('en-cy')
x = ugi('Hturned')
x.addKerning(left='Hturned', right='H')
x = ugi('I')
x.addAnchor('ogonek', position_x=xpos.outline_right, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.capHeight)
x.addAnchor('topleft', position_x=xpos.outline_left, position_y=ypos.capHeight)
x.addKerning(left='H', right='H')
x.addMetrics(left='H', right='H')
x.addRecipe('_part.stem')
x = ugi('IJ')
x.addKerning(left='H', right='J')
x.addMetrics(left='H', right='J')
x = ugi('Iacute')
x.addKerning(left='H', right='H')
x.addMetrics(left='I', width='I')
x = ugi('Ibreve')
x.addKerning(left='H', right='H')
x.addMetrics(left='I', right='I')
x = ugi('Icaron')
x.addKerning(left='H', right='H')
x.addMetrics(left='I', right='I')
x = ugi('Icircumflex')
x.addKerning(left='H', right='H')
x.addMetrics(left='I', right='I')
x = ugi('Idieresis')
x.addKerning(left='H', right='H')
x.addMetrics(left='I', right='I')
x = ugi('Idotaccent')
x.addKerning(left='H', right='H')
x.addMetrics(left='I', right='I')
x = ugi('Idotbelow')
x.addKerning(left='H', right='H')
x.addMetrics(left='I', right='I')
x = ugi('Igrave')
x.addKerning(left='H', right='H')
x.addMetrics(width='I', right='I')
x = ugi('Ihookabove')
x.addKerning(left='H', right='H')
x.addMetrics(left='I', right='I')
x = ugi('Imacron')
x.addKerning(left='H', right='H')
x.addMetrics(left='I', right='I')
x = ugi('Iogonek')
x.addKerning(left='H', right='H')
x.addMetrics(left='I', right='I')
x = ugi('Iotaafrican')
x.addKerning(left='H', right='Iotaafrican')
x.addRecipe('iota decompose')
x = ugi('Ismall')
x.addKerning(left='Ismall', right='Ismall')
x = ugi('Istroke')
x.addKerning(left='Eth', right='Istroke')
x.addRecipe('I', '_part.bar')
x = ugi('Itilde')
x.addKerning(left='H', right='H')
x.addMetrics(left='I', right='I')
x = ugi('J')
x.addAnchor('bottom', position_x=xpos.apex_bottom, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.stem_top_center, position_y=ypos.capHeight)
x.addKerning(left='J', right='J')
x.addMetrics(left='J', right='J')
x = ugi('Jcircumflex')
x.addKerning(left='J', right='J')
x.addMetrics(left='J', right='J')
x = ugi('Jcrossedtail')
x.addKerning(left='J', right='Jcrossedtail')
x.addRecipe('J decompose', 'jcrossedtail decompose')
x = ugi('K')
x.addAnchor('bottom', position_x=xpos.outline_center, position_y=ypos.base_line)
x.addKerning(left='H', right='K')
x.addMetrics(left='H', right='K')
x = ugi('Kcommaaccent')
x.addKerning(left='H', right='K')
x.addMetrics(left='K', right='K')
x = ugi('Khook')
x.addRecipe('K decompose', '_part.Hook decompose')
x = ugi('Kturned')
x.addKerning(left='X', right='H')
x.addRecipe('K flip_horizontal flip_vertical')
x.addRecipe('K flip_vertical flip_horizontal')
x = ugi('L')
x.addAnchor('#dot', position_x=xpos.width_75, position_y=ypos.outline_middle)
x.addAnchor('bottom', position_x=xpos.outline_center, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.stem_top_center, position_y=ypos.capHeight)
x.addAnchor('topright', position_x=xpos.stem_top_right, position_y=ypos.capHeight)
x.addKerning(left='H', right='L')
x.addMetrics(left='H', right='L')
x.addRecipe('E decompose')
x = ugi('Lacute')
x.addKerning(left='H', right='L')
x.addMetrics(left='L', right='L')
x = ugi('Lbelt')
x.addKerning(left='Lbelt', right='L')
x.addRecipe('L', 'lbelt decompose')
x = ugi('Lcaron')
x.addKerning(left='H', right='L')
x.addMetrics(left='L', right='L')
x = ugi('Lcommaaccent')
x.addKerning(left='H', right='L')
x.addMetrics(left='L', right='L')
x = ugi('Ldot')
x.addKerning(left='H', right='L')
x.addMetrics(left='L', right='L')
x.addRecipe('L', 'dotaccent')
x.addRecipe('L', 'periodcentered.loclCAT')
x = ugi('Lmiddletilde')
x.addKerning(left='Eth', right='L')
x.addRecipe('L', '_part.tilde')
x = ugi('Lslash')
x.addKerning(left='H', right='L')
x.addMetrics(left='Lslash', right='L')
x.addRecipe('macroncomb decompose', 'L')
x = ugi('Lsmall')
x.addKerning(left='n', right='Lsmall')
x = ugi('M')
x.addKerning(left='H', right='H')
x.addMetrics(left='H', right='H')
x = ugi('Mhook')
x.addKerning(left='H', right='H')
x.addRecipe('M', '_part.hook flip_horizontal flip_vertical')
x = ugi('Mturned')
x.addKerning(left='U', right='H')
x.addRecipe('u decompose', 'I decompose')
x = ugi('N')
x.addAnchor('bottom', position_x=xpos.outline_center)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.capHeight)
x.addKerning(left='H', right='N')
x.addMetrics(left='H', right='N')
x = ugi('Nacute')
x.addKerning(left='H', right='N')
x.addMetrics(left='N', right='N')
x = ugi('Napostrophe')
x.addMetrics(left='quoteright', right='N')
x = ugi('Ncaron')
x.addKerning(left='H', right='N')
x.addMetrics(left='N', right='N')
x = ugi('Ncommaaccent')
x.addKerning(left='H', right='N')
x.addMetrics(left='N', right='N')
x = ugi('Nhookleft')
x.addKerning(left='H', right='H')
x.addRecipe('N', '_part.hook flip_horizontal flip_vertical')
x = ugi('Nlongrightleg')
x.addKerning(left='H', right='H')
x.addRecipe('Shha-cy decompose')
x = ugi('Nsmall')
x.addKerning(left='n', right='u')
x = ugi('Ntilde')
x.addKerning(left='H', right='N')
x.addMetrics(left='N', right='N')
x = ugi('O')
x.addAnchor('#center', position_x=xpos.outline_center, position_y=ypos.outline_middle)
x.addAnchor('#topleft', position_x=xpos.outline_left, position_y=ypos.capHeight)
x.addAnchor('#topright', position_x=xpos.outline_right, position_y=ypos.capHeight)
x.addAnchor('bottom', position_x=xpos.apex_bottom, position_y=ypos.base_line)
x.addAnchor('center', suppress_auto=True)
x.addAnchor('top', position_x=xpos.apex_top, position_y=ypos.capHeight)
x.addAnchor('topleft', suppress_auto=True)
x.addAnchor('topright', suppress_auto=True)
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x = ugi('OE')
x.addKerning(left='O', right='E')
x.addMetrics(left='O', right='E')
x.addRecipe('O decompose', 'E')
x = ugi('OEsmall')
x.addKerning(left='o', right='OEsmall')
x = ugi('Oacute')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x = ugi('Obreve')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x = ugi('Ocaron')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x = ugi('Ocenteredtilde')
x.addKerning(left='O', right='O')
x.addRecipe('Obarred-cy')
x = ugi('Ocircumflex')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x = ugi('Ocircumflexacute')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x.addRecipe('O', 'circumflexcomb_acutecomb')
x = ugi('Ocircumflexdotbelow')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x.addRecipe('O', 'circumflexcomb', 'dotbelowcomb')
x = ugi('Ocircumflexgrave')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x.addRecipe('O', 'circumflexcomb_gravecomb')
x = ugi('Ocircumflexhookabove')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', width='O')
x.addRecipe('O', 'circumflexcomb_hookabovecomb')
x = ugi('Ocircumflextilde')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x.addRecipe('O', 'circumflexcomb_tildecomb')
x = ugi('Odieresis')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x = ugi('Odotbelow')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x = ugi('Ograve')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x = ugi('Ohookabove')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x = ugi('Ohorn')
x.addKerning(left='O', right='Ohorn')
x.addMetrics(left='O', right='Ohorn')
x = ugi('Ohornacute')
x.addKerning(left='O', right='Ohorn')
x.addMetrics(left='O', right='Ohorn')
x.addRecipe('Ohorn', 'acutecomb')
x.addRecipe('Ohorn', 'acutecomb.case')
x = ugi('Ohorndotbelow')
x.addKerning(left='O', right='Ohorn')
x.addMetrics(left='O', right='Ohorn')
x.addRecipe('Ohorn', 'dotbelowcomb')
x.addRecipe('Ohorn', 'dotbelowcomb.case')
x = ugi('Ohorngrave')
x.addKerning(left='O', right='Ohorn')
x.addMetrics(left='O', right='Ohorn')
x.addRecipe('Ohorn', 'gravecomb')
x.addRecipe('Ohorn', 'gravecomb.case')
x = ugi('Ohornhookabove')
x.addKerning(left='O', right='Ohorn')
x.addMetrics(left='O', right='Ohorn')
x.addRecipe('Ohorn', 'hookabovecomb')
x.addRecipe('Ohorn', 'hookabovecomb.case')
x = ugi('Ohorntilde')
x.addKerning(left='O', right='Ohorn')
x.addMetrics(left='O', right='Ohorn')
x.addRecipe('Ohorn', 'tildecomb')
x.addRecipe('Ohorn', 'tildecomb.case')
x = ugi('Ohungarumlaut')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x = ugi('Omacron')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x = ugi('Oopen')
x.addKerning(left='Oopen', right='O')
x.addRecipe('C flip_horizontal flip_vertical')
x = ugi('Oslash')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x.addRecipe('O', 'slash decompose')
x = ugi('Oslashacute')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x = ugi('Otilde')
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x = ugi('P')
x.addAnchor('bottom', position_x=xpos.stem_bottom_center, position_y=ypos.base_line)
x.addKerning(left='H', right='P')
x.addMetrics(left='H', right='P')
x = ugi('Phook')
x.addKerning(left='Bhook', right='P')
x.addRecipe('P', '_part.Hookleft')
x = ugi('Q')
x.addAnchor('top', position_x=xpos.apex_top, position_y=ypos.capHeight)
x.addKerning(left='O', right='O')
x.addMetrics(left='O', right='O')
x = ugi('Qhooktail')
x.addKerning(left='O', right='H')
x.addRecipe('Alpha-latin', '_part.Hook flip_vertical')
x = ugi('R')
x.addKerning(left='H', right='R')
x.addMetrics(left='H', right='R')
x = ugi('Racute')
x.addKerning(left='H', right='R')
x.addMetrics(left='R', right='R')
x = ugi('Rcaron')
x.addKerning(left='H', right='R')
x.addMetrics(left='R', right='R')
x = ugi('Rcommaaccent')
x.addKerning(left='H', right='R')
x.addMetrics(left='R', right='R')
x = ugi('Rsmall')
x.addKerning(left='n', right='Rsmall')
x = ugi('Rsmallinverted')
x.addKerning(left='n', right='Rsmallinverted')
x.addRecipe('Rsmall')
x = ugi('Rtail')
x.addKerning(left='H', right='R')
x.addRecipe('R', '_part.Hook flip_vertical')
x = ugi('S')
x.addAnchor('#center', position_x=xpos.outline_center, position_y=ypos.outline_middle)
x.addAnchor('bottom', position_x=xpos.apex_bottom, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.apex_top, position_y=ypos.capHeight)
x.addKerning(left='S', right='S')
x.addMetrics(left='S', right='S')
x = ugi('Sacute')
x.addKerning(left='S', right='S')
x.addMetrics(left='S', right='S')
x = ugi('Scaron')
x.addKerning(left='S', right='S')
x.addMetrics(left='S', right='S')
x = ugi('Scedilla')
x.addKerning(left='S', right='S')
x.addMetrics(left='S', right='S')
x = ugi('Schwa')
x.addKerning(left='O', right='O')
x.addMetrics(left='Schwa', right='O')
x.addRecipe('G decompose flip_horizontal')
x.addRecipe('O decompose', 'two decompose', 'H decompose', italic=True)
x.addRecipe('Schwa-cy')
x.addRecipe('schwa decompose')
x = ugi('Scircumflex')
x.addKerning(left='S', right='S')
x.addMetrics(left='S', right='S')
x = ugi('Scommaaccent')
x.addKerning(left='S', right='S')
x.addMetrics(left='S', right='S')
x = ugi('T')
x.addAnchor('#center', position_x=xpos.stem_bottom_center, position_y=ypos.outline_middle)
x.addAnchor('bottom', position_x=xpos.stem_bottom_center, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.outline_center)
x.addKerning(left='T', right='T')
x.addMetrics(left='T', right='T')
x.addRecipe('_part.stem', '_part.bar')
x = ugi('Tbar')
x.addKerning(left='T', right='F')
x.addMetrics(left='T', right='T')
x.addRecipe('T', '_part.bar')
x = ugi('Tcaron')
x.addKerning(left='T', right='T')
x.addMetrics(left='T', right='T')
x = ugi('Tcedilla')
x.addKerning(left='T', right='T')
x.addMetrics(left='T', right='T')
x.addRecipe('T', 'cedillacomb')
x = ugi('Tcommaaccent')
x.addKerning(left='T', right='T')
x.addMetrics(left='T', right='T')
x = ugi('Thook')
x.addKerning(left='Bhook', right='T')
x.addRecipe('T decompose', '_part.Hookleft decompose')
x = ugi('Thorn')
x.addKerning(left='H', right='Thorn')
x.addMetrics(left='H', right='Thorn')
x.addRecipe('P decompose', 'I decompose')
x = ugi('Tretroflexhook')
x.addKerning(left='T', right='T')
x.addRecipe('T', '_part.Hook flip_vertical')
x = ugi('Tturned')
x.addKerning(left='Tturned', right='L')
x.addRecipe('T flip_vertical flip_horizontal')
x = ugi('U')
x.addAnchor('bottom', position_x=xpos.apex_bottom, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.capHeight)
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Uacute')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Ubar')
x.addKerning(left='U', right='U')
x.addRecipe('U', '_part.bar')
x = ugi('Ubreve')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Ucaron')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Ucircumflex')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Udieresis')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Udieresisacute')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Udieresiscaron')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Udieresisgrave')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Udieresismacron')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Udotbelow')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Ugrave')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Uhookabove')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Uhorn')
x.addKerning(left='U', right='Uhorn')
x.addMetrics(left='U', right='Uhorn')
x = ugi('Uhornacute')
x.addKerning(left='U', right='Uhorn')
x.addMetrics(left='U', right='Uhorn')
x.addRecipe('Uhorn', 'acutecomb')
x.addRecipe('Uhorn', 'acutecomb.case')
x = ugi('Uhorndotbelow')
x.addKerning(left='U', right='Uhorn')
x.addMetrics(left='U', right='Uhorn')
x.addRecipe('Uhorn', 'dotbelowcomb')
x.addRecipe('Uhorn', 'dotbelowcomb.case')
x = ugi('Uhorngrave')
x.addKerning(left='U', right='Uhorn')
x.addMetrics(left='U', right='Uhorn')
x.addRecipe('Uhorn', 'gravecomb')
x.addRecipe('Uhorn', 'gravecomb.case')
x = ugi('Uhornhookabove')
x.addKerning(left='U', right='Uhorn')
x.addMetrics(left='U', right='Uhorn')
x.addRecipe('Uhorn', 'hookabovecomb')
x.addRecipe('Uhorn', 'hookabovecomb.case')
x = ugi('Uhorntilde')
x.addKerning(left='U', right='Uhorn')
x.addMetrics(left='U', right='Uhorn')
x.addRecipe('Uhorn', 'tildecomb')
x.addRecipe('Uhorn', 'tildecomb.case')
x = ugi('Uhungarumlaut')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Umacron')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Uogonek')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Upsilonafrican')
x.addKerning(left='O', right='O')
x.addRecipe('Omega flip_vertical flip_horizontal')
x = ugi('Uring')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('Usmall')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='=|u')
x.addRecipe('u decompose')
x = ugi('Utilde')
x.addKerning(left='U', right='U')
x.addMetrics(left='U', right='U')
x = ugi('V')
x.addKerning(left='V', right='V')
x.addMetrics(left='V', right='V')
x = ugi('Vhook')
x.addKerning(left='U', right='U')
x.addRecipe('vhook decompose', 'U decompose')
x = ugi('Vturned')
x.addKerning(left='A', right='A')
x.addRecipe('V flip_vertical flip_horizontal')
x = ugi('W')
x.addAnchor('bottom', position_x=xpos.outline_center)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.capHeight)
x.addKerning(left='V', right='V')
x.addMetrics(left='V', right='V')
x = ugi('Wacute')
x.addKerning(left='V', right='V')
x.addMetrics(left='W', right='W')
x = ugi('Wcircumflex')
x.addKerning(left='V', right='V')
x.addMetrics(left='W', right='W')
x = ugi('Wdieresis')
x.addKerning(left='V', right='V')
x.addMetrics(left='W', right='W')
x = ugi('Wgrave')
x.addKerning(left='V', right='V')
x.addMetrics(left='W', right='W')
x = ugi('Whook')
x.addKerning(left='V', right='V')
x.addRecipe('W decompose', 'Khook decompose')
x = ugi('X')
x.addKerning(left='X', right='K')
x.addMetrics(left='=|X', right='=K*1.05')
x = ugi('Y')
x.addAnchor('bottom', position_x=xpos.stem_bottom_center, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.capHeight)
x.addKerning(left='Y', right='Y')
x.addMetrics(left='V', right='V')
x = ugi('Yacute')
x.addKerning(left='Y', right='Y')
x.addMetrics(left='Y', right='Y')
x = ugi('Ycircumflex')
x.addKerning(left='Y', right='Y')
x.addMetrics(left='Y', right='Y')
x = ugi('Ydieresis')
x.addKerning(left='Y', right='Y')
x.addMetrics(left='Y', right='Y')
x = ugi('Ydotbelow')
x.addKerning(left='Y', right='Y')
x.addMetrics(left='Y', right='Y')
x = ugi('Ygrave')
x.addKerning(left='Y', right='Y')
x.addMetrics(left='Y', right='Y')
x = ugi('Yhookabove')
x.addKerning(left='Y', right='Y')
x.addMetrics(left='Y', right='Y')
x = ugi('Ysmall')
x.addKerning(left='v', right='v')
x = ugi('Ytilde')
x.addKerning(left='Y', right='Y')
x.addMetrics(left='Y', right='Y')
x = ugi('Z')
x.addKerning(left='Z', right='Z')
x.addMetrics(left='Z', right='Z')
x = ugi('Zacute')
x.addKerning(left='Z', right='Z')
x.addMetrics(left='Z', right='Z')
x = ugi('Zcaron')
x.addKerning(left='Z', right='Z')
x.addMetrics(left='Z', right='Z')
x = ugi('Zdotaccent')
x.addKerning(left='Z', right='Z')
x.addMetrics(left='Z', right='Z')
x = ugi('Zstroke')
x.addMetrics(left='Z', right='Z')
#
# --------------------------------
#
# Lowercase
#
# --------------------------------
#
x = ugi('a')
x.addAnchor('ogonek', position_x=xpos.outline_right, position_y=ypos.base_line)
x.addKerning(left='a', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', right='a', italic_left='o', italic_right='u')
x = ugi('aacute')
x.addKerning(left='a', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x = ugi('abreve')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x = ugi('abreveacute')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x.addRecipe('a', 'brevecomb_acutecomb')
x = ugi('abrevedotbelow')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x.addRecipe('a', 'brevecomb', 'dotbelowcomb')
x = ugi('abrevegrave')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x.addRecipe('a', 'brevecomb_gravecomb')
x = ugi('abrevehookabove')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x.addRecipe('a', 'brevecomb_hookabovecomb')
x = ugi('abrevetilde')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x.addRecipe('a', 'brevecomb_tildecomb')
x = ugi('acaron')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x = ugi('acircumflex')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x = ugi('acircumflexacute')
x.addKerning(left='abreve', right=None, italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x.addRecipe('a', 'circumflexcomb_acutecomb')
x = ugi('acircumflexdotbelow')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x.addRecipe('a', 'circumflexcomb', 'dotbelowcomb')
x = ugi('acircumflexgrave')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x.addRecipe('a', 'circumflexcomb_gravecomb')
x = ugi('acircumflexhookabove')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x.addRecipe('a', 'circumflexcomb_hookabovecomb')
x = ugi('acircumflextilde')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x.addRecipe('a', 'circumflexcomb_tildecomb')
x = ugi('acutegraveacutecomb')
x.addMetrics(left='=50', right='=50')
x.addRecipe('graveacutegravecomb flip_horizontal')
x = ugi('acutemacroncomb')
x.addMetrics(left='=50', right='=50')
x.addRecipe('macrongravecomb flip_horizontal')
x = ugi('adieresis')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x = ugi('adotbelow')
x.addKerning(left='a', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x = ugi('ae')
x.addKerning(left='a', right='e', italic_left='o')
x.addMetrics(left='a', right='e', italic_left='o')
x.addRecipe('a decompose', 'e decompose')
x = ugi('aeacute')
x.addKerning(left='a', right='e', italic_left='o')
x.addMetrics(left='ae', width='ae', italic_left='o')
x = ugi('agrave')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x = ugi('ahookabove')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x = ugi('alpha')
x.addKerning(left='o', right='u')
x = ugi('alpha-latin')
x.addRecipe('alpha')
x = ugi('alphaturned')
x.addKerning(left='n', right='o')
x = ugi('alphaturned-latin')
x.addRecipe('alpha')
x = ugi('amacron')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x = ugi('aogonek')
x.addKerning(left='a', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x = ugi('aring')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x = ugi('aringacute')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x = ugi('atilde')
x.addKerning(left='abreve', right='a', italic_left='o', italic_right='u')
x.addMetrics(left='a', width='a')
x = ugi('aturned')
x.addKerning(left='u', right='e')
x.addRecipe('a flip_vertical')
x = ugi('b')
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.ascender)
x.addKerning(left='b', right='o')
x.addMetrics(left='b', right='o')
x = ugi('bhook')
x.addKerning(left='b', right='o')
x = ugi('bilabialclick')
x.addKerning(left='bilabialclick', right='O')
x.addRecipe('O', 'dotaccentcomb')
x = ugi('brevebelowcomb')
x.addMetrics(left='brevecomb', right='brevecomb')
x = ugi('bridgebelowcomb')
x.addRecipe('minusbelowcomb decompose')
x = ugi('bridgeinvertedbelowcomb')
x.addRecipe('bridgebelowcomb flip_vertical')
x = ugi('c')
x.addAnchor('bottom', position_x=xpos.apex_bottom)
x.addAnchor('top', position_x=xpos.apex_top, position_y=ypos.xHeight)
x.addKerning(left='o', right='c')
x.addMetrics(left='o', right='c')
x = ugi('cacute')
x.addKerning(left='o', right='c')
x.addMetrics(left='c', width='c')
x = ugi('ccaron')
x.addKerning(left='o', right='c')
x.addMetrics(left='c', width='c')
x = ugi('ccedilla')
x.addKerning(left='o', right='c')
x.addMetrics(left='c', width='c')
x = ugi('ccircumflex')
x.addKerning(left='o', right='c')
x.addMetrics(left='c', width='c')
x = ugi('ccurl')
x.addKerning(left='o', right='c')
x = ugi('cdotaccent')
x.addKerning(left='o', right='c')
x.addMetrics(left='c', width='c')
x = ugi('chook')
x.addKerning(left='o', right='dhook')
x.addMetrics(left='c', right='dhook')
x.addRecipe('c', '_part.hook')
x = ugi('clickalveolar')
x.addKerning(left='clickalveolar', right='clickalveolar')
x.addRecipe('clickdental', '_part.bar', '_part.bar')
x = ugi('clickdental')
x.addKerning(left='clickdental', right='clickdental')
x.addRecipe('bar decompose')
x = ugi('clicklateral')
x.addKerning(left='clickdental', right='clickdental')
x.addRecipe('clickdental', 'clickdental')
x = ugi('clickretroflex')
x.addKerning(left='h', right='d')
x.addRecipe('exclam')
x = ugi('closeup')
x.addMetrics(left='undertie', right='undertie')
x.addRecipe('undertie', 'breveinverteddoublecomb')
x = ugi('colontriangularhalfmod')
x.addRecipe('periodcentered decompose')
x = ugi('colontriangularmod')
x.addRecipe('colontriangularhalfmod', 'colontriangularhalfmod flip_vertical')
x = ugi('d')
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.ascender)
x.addAnchor('topright', position_x=xpos.stem_top_right, position_y=ypos.ascender)
x.addKerning(left='o', right='d')
x.addMetrics(left='d', right='d')
x = ugi('dblarchinvertedbelowcomb')
x.addMetrics(left='seagullbelowcomb', right='seagullbelowcomb')
x.addRecipe('seagullbelowcomb flip_vertical flip_horizontal')
x = ugi('dblverticalbar')
x.addRecipe('bar', 'bar')
x = ugi('dcaron')
x.addKerning(left='o', right='dcaron')
x.addMetrics(left='d', width='d')
x = ugi('dcroat')
x.addKerning(left='o', right='d')
x.addMetrics(left='d', right='d')
x.addRecipe('d', '_part.bar')
x.addRecipe('d', 'macroncomb decompose')
x = ugi('dezh')
x.addKerning(left='o', right='ezh')
x.addRecipe('d', 'ezh')
x = ugi('dhook')
x.addKerning(left='o', right='dhook')
x.addMetrics(left='d')
x.addRecipe('d decompose', '_part.hook')
x = ugi('dhookandtail')
x.addKerning(left='o', right='dhook')
x.addMetrics(left='d', right='dhook')
x.addRecipe('dhook', '_part.hook')
x = ugi('downtackbelowcomb')
x.addRecipe('uptackbelowcomb flip_vertical flip_horizontal')
x = ugi('downtackmod')
x.addRecipe('uptackmod flip_vertical')
x = ugi('dtail')
x.addKerning(left='o', right='dtail')
x.addRecipe('d', '_part.hook')
x = ugi('dzaltone')
x.addKerning(left='o', right='z')
x.addMetrics(left='d', right='z')
x.addRecipe('d', 'z')
x = ugi('dzcurl')
x.addKerning(left='d', right='zcurl')
x.addMetrics(left='d', right='zcurl')
x.addRecipe('d', 'zcurl')
x = ugi('e')
x.addAnchor('bottom', position_x=xpos.apex_bottom)
x.addAnchor('ogonek', position_x=xpos.width_75, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.apex_top)
x.addKerning(left='o', right='e')
x.addMetrics(left='o', right='e')
x = ugi('eacute')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', width='e')
x = ugi('ebreve')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', width='e')
x = ugi('ecaron')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', width='e')
x = ugi('ecircumflex')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', width='e')
x = ugi('ecircumflexacute')
x.addKerning(left='o', right=None)
x.addMetrics(left='e', width='e')
x.addRecipe('e', 'circumflexcomb_acutecomb')
x = ugi('ecircumflexdotbelow')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', width='e')
x.addRecipe('e', 'circumflexcomb', 'dotbelowcomb')
x = ugi('ecircumflexgrave')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', width='e')
x.addRecipe('e', 'circumflexcomb_gravecomb')
x = ugi('ecircumflexhookabove')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', width='e')
x.addRecipe('e', 'circumflexcomb_hookabovecomb')
x = ugi('ecircumflextilde')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', width='e')
x.addRecipe('e', 'circumflexcomb_tildecomb')
x = ugi('edieresis')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', width='e')
x = ugi('edotaccent')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', width='e')
x = ugi('edotbelow')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', width='e')
x = ugi('egrave')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', width='e')
x = ugi('ehookabove')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', width='e')
x = ugi('emacron')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', width='e')
x = ugi('eng')
x.addKerning(left='n', right='j')
x.addMetrics(left='n', right='j')
x.addRecipe('jdotless decompose', 'n decompose')
x = ugi('eogonek')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', right='e')
x = ugi('eopen')
x.addKerning(left='s', right='c')
x.addRecipe('ze-cy decompose')
x = ugi('eopenreversed')
x.addKerning(left='eopenreversed', right='Bsmall')
x.addRecipe('ze-cy')
x = ugi('eopenreversedclosed')
x.addKerning(left='o', right='Bsmall')
x.addRecipe('ze-cy decompose')
x = ugi('eopenreversedhook')
x.addKerning(left='eopenreversed', right='eopenreversedhook')
x.addRecipe('eopenreversed', '_part.hook')
x = ugi('ereversed')
x.addKerning(left='o', right='o')
x.addRecipe('e decompose')
x = ugi('esh')
x.addKerning(left='j', right='dhook')
x.addMetrics(right='dhook')
x.addRecipe('f decompose')
x = ugi('eth')
x.addKerning(left='eth', right='eth')
x.addMetrics(left='eth', right='eth')
x = ugi('etilde')
x.addKerning(left='o', right='e')
x.addMetrics(left='e', right='e')
x = ugi('ezh')
x.addKerning(left='ezh', right='ezh')
x = ugi('f')
x.addAnchor('bottom', position_x=xpos.stem_bottom_center, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.apex_top, position_y=ypos.ascender)
x.addKerning(left='f', right='f')
x.addMetrics(left='f', right='f')
x = ugi('f_f')
x.addKerning(left='f', right='f')
x.addRecipe('f', 'f')
x = ugi('f_f_i')
x.addKerning(left='f', right='i')
x.addRecipe('f', 'f', 'i')
x = ugi('f_f_l')
x.addKerning(left='f', right='d')
x.addRecipe('f', 'f', 'l')
x = ugi('f_i')
x.addKerning(left='f', right='i')
x.addRecipe('fi')
x = ugi('f_l')
x.addKerning(left='f', right='d')
x.addRecipe('fl')
x = ugi('fi')
x.addKerning(left='f', right='i')
x.addRecipe('f_i')
x = ugi('fl')
x.addKerning(left='f', right='d')
x.addRecipe('f_l')
x = ugi('g')
x.addKerning(left='g', right='g')
x.addMetrics(left='g', right='g')
x = ugi('gacute')
x.addKerning(left='g', right='g')
x = ugi('gamma')
x.addKerning(left='v', right='v')
x = ugi('gamma-latin')
x.addRecipe('v decompose')
x = ugi('gbreve')
x.addKerning(left='g', right='g')
x.addMetrics(left='g', right='g')
x = ugi('gcircumflex')
x.addKerning(left='g', right='g')
x.addMetrics(left='g', right='g')
x = ugi('gcommaaccent')
x.addKerning(left='g', right='g')
x.addMetrics(left='g', right='g')
x.addRecipe('g', 'commaturnedabovecomb')
x = ugi('gdotaccent')
x.addKerning(left='g', right='g')
x.addMetrics(left='g', right='g')
x = ugi('germandbls')
x.addAnchor('top', position_x=xpos.apex_top, position_y=ypos.ascender)
x.addKerning(left='f', right='germandbls')
x.addMetrics(left='f', right='germandbls')
x.addRecipe('f decompose', 's decompose')
x = ugi('ghook')
x.addKerning(left='o', right='dhook')
x.addMetrics(left='g', right='dhook')
x.addRecipe('gsingle', '_part.hook')
x = ugi('glottalstop')
x.addKerning(left='glottalstop', right='glottalstop')
x.addRecipe('question decompose')
x = ugi('glottalstopreversed')
x.addKerning(left='glottalstopreversed', right='glottalstopreversed')
x.addRecipe('glottalstop flip_horizontal')
x = ugi('glottalstopsmall')
x.addRecipe('glottalstop decompose')
x = ugi('glottalstopstroke')
x.addKerning(left='glottalstopstroke', right='glottalstopstroke')
x.addRecipe('glottalstop', '_part.bar')
x = ugi('glottalstopstrokereversed')
x.addKerning(left='glottalstopstrokereversed', right='glottalstopstrokereversed')
x.addRecipe('glottalstopreversed', '_part.bar')
x = ugi('graveacutegravecomb')
x.addMetrics(left='=50', right='=50')
x.addRecipe('caron decompose', 'circumflex decompose')
x = ugi('gravemacroncomb')
x.addMetrics(left='=50', right='=50')
x.addRecipe('macronacutecomb flip_horizontal')
x = ugi('gsingle')
x.addKerning(left='o', right='u')
x.addRecipe('q decompose', 'y decompose')
x = ugi('h')
x.addKerning(left='h', right='n')
x.addMetrics(left='h', right='n')
x = ugi('hbar')
x.addKerning(left='h', right='n')
x.addMetrics(width='h', right='h')
x.addRecipe('h', '_part.bar')
x.addRecipe('h', 'macroncomb decompose')
x = ugi('hcircumflex')
x.addKerning(left='h', right='n')
x.addMetrics(width='h', right='h')
x = ugi('henghook')
x.addKerning(left='j', right='n')
x.addRecipe('hhook', '_part.hook')
x = ugi('hhook')
x.addKerning(left='h', right='n')
x.addRecipe('n', '_part.hook')
x = ugi('hturned')
x.addKerning(left='u', right='q')
x.addRecipe('h flip_horizontal flip_vertical')
x = ugi('hv')
x.addKerning(left='h', right='vhook')
x.addMetrics(left='h', right='vhook')
x.addRecipe('h decompose', 'vhook decompose')
x = ugi('i')
x.addAnchor('bottom', position_x=xpos.stem_bottom_center)
x.addAnchor('ogonek', position_x='xpos.stem_bottom_right')
x.addKerning(left='i', right='i')
x.addMetrics(left='i', right='i')
x = ugi('iacute')
x.addKerning(left='i', right='i')
x.addMetrics(left='i', right='i')
x = ugi('ibreve')
x.addKerning(left='idieresis', right='i')
x.addMetrics(left='idieresis', right='i')
x = ugi('icaron')
x.addKerning(left='idieresis', right='i')
x.addMetrics(left='idieresis', right='i')
x = ugi('icircumflex')
x.addKerning(left='idieresis', right='i')
x.addMetrics(left='idieresis', right='i')
x = ugi('idieresis')
x.addKerning(left='idieresis', right='i')
x.addMetrics(left='idieresis', right='i')
x = ugi('idotaccent')
x.addKerning(left='i', right='i')
x.addMetrics(left='i', right='i')
x = ugi('idotbelow')
x.addKerning(left='i', right='i')
x.addMetrics(left='i', width='i')
x = ugi('idotless')
x.addAnchor('bottom', position_x=xpos.outline_center)
x.addAnchor('ogonek', suppress_auto=True)
x.addAnchor('top', position_x=xpos.outline_center)
x.addKerning(left='n', right='u')
x.addMetrics(left='i', width='i')
x.addRecipe('i decompose')
x = ugi('igrave')
x.addKerning(left='idieresis', right='i')
x.addMetrics(left='idieresis', right='i')
x = ugi('ihookabove')
x.addKerning(left='i', right='i')
x.addMetrics(left='i', width='i')
x = ugi('ij')
x.addKerning(left='i', right='j')
x.addMetrics(left='i', right='j')
x = ugi('imacron')
x.addKerning(left='idieresis', right='i')
x.addMetrics(left='idieresis', right='i')
x = ugi('iogonek')
x.addKerning(left='i', right='i')
x.addMetrics(width='i')
x.addRecipe('i', 'ogonekcomb')
x = ugi('istroke')
x.addKerning(left='i', right='i')
x.addRecipe('i', '_part.bar')
x = ugi('itilde')
x.addKerning(left='idieresis', right='i')
x.addMetrics(left='idieresis', right='i')
x = ugi('j')
x.addKerning(left='j', right='j')
x.addMetrics(left='j', right='j')
x = ugi('jcircumflex')
x.addKerning(left='jcircumflex', right='j')
x.addMetrics(left='jcircumflex', right='j')
x = ugi('jcrossedtail')
x.addKerning(left='jcrossedtail', right='jcrossedtail')
x.addRecipe('j decompose')
x = ugi('jdotless')
x.addAnchor('bottom', position_x=xpos.outline_center, position_y=ypos.outline_bottom)
x.addAnchor('top', position_x=xpos.stem_top_center, position_y=ypos.xHeight)
x.addKerning(left='j', right='j')
x.addKerning(left='p', right='q')
x.addMetrics(left='j', width='j')
x.addRecipe('j decompose')
x = ugi('jdotlessstroke')
x.addKerning(left='jdotlessstroke', right='istroke')
x.addRecipe('jdotless', '_part.bar')
x = ugi('jdotlessstrokehook')
x.addKerning(left='jdotlessstroke', right='dhook')
x.addRecipe('jdotlessstroke', '_part.hook')
x = ugi('k')
x.addAnchor('top', position_x=xpos.stem_top_center, position_y=ypos.ascender)
x.addKerning(left='h', right='k')
x.addMetrics(left='h', right='k')
x = ugi('kcommaaccent')
x.addKerning(left='h', right='k')
x.addMetrics(left='k', right='k')
x = ugi('kgreenlandic')
x.addKerning(left='n', right='k')
x.addMetrics(left='n', right='k')
x.addRecipe('k decompose')
x = ugi('khook')
x.addKerning(left='h', right='k')
x.addRecipe('kgreenlandic', '_part.hook')
x = ugi('kturned')
x.addKerning(left='x', right='q')
x.addRecipe('k flip_horizontal flip_vertical')
x = ugi('l')
x.addAnchor('#dot', position_x='xpos.RSB', position_y=ypos.outline_middle)
x.addAnchor('bottom', position_x=xpos.stem_bottom_center, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.stem_top_center)
x.addAnchor('topright', position_x=xpos.stem_top_right)
x.addKerning(left='h', right='d')
x.addMetrics(left='h', right='d')
x = ugi('lacute')
x.addKerning(left='h', right='d')
x.addMetrics(left='l', right='l')
x = ugi('lambdastroke')
x.addKerning(left='vturned', right='vturned')
x.addMetrics(left='lambda', right='lambda')
x.addRecipe('lambda', 'eth decompose')
x = ugi('lbar')
x.addKerning(left='lslash', right='lslash')
x.addMetrics(left='lslash', right='lslash')
x.addRecipe('l', '_part.bar')
x = ugi('lbelt')
x.addKerning(left='lslash', right='lslash')
x.addRecipe('l', 'zcurl decompose')
x = ugi('lcaron')
x.addKerning(left='h', right='dcaron')
x.addMetrics(left='l', right='dcaron')
x = ugi('lcommaaccent')
x.addKerning(left='h', right='d')
x.addMetrics(left='l', right='l')
x = ugi('ldot')
x.addKerning(left='h', right='ldot')
x.addMetrics(left='l', right='ldot')
x.addRecipe('l', 'dotaccent')
x.addRecipe('l', 'periodcentered.loclCAT')
x = ugi('leftangleabovecomb')
x.addRecipe('lefttackbelowcomb decompose')
x = ugi('lefttackbelowcomb')
x.addRecipe('uptackmod decompose')
x = ugi('lezh')
x.addKerning(left='h', right='ezh')
x.addRecipe('l', 'ezh')
x = ugi('lhookretroflex')
x.addKerning(left='h', right='dtail')
x.addRecipe('l', '_part.hook')
x = ugi('lmiddletilde')
x.addKerning(left='lslash', right='lslash')
x.addRecipe('l', '_part.tilde')
x = ugi('longs')
x.addAnchor('top', position_x=xpos.apex_top, position_y=ypos.ascender)
x.addKerning(left='f', right='f')
x.addMetrics(left='f', right='f')
x.addRecipe('f decompose')
x = ugi('lslash')
x.addKerning(left='lslash', right='lslash')
x.addMetrics(left='lslash', right='lslash')
x.addRecipe('macroncomb decompose', 'l')
x = ugi('m')
x.addAnchor('bottom', position_x=xpos.outline_center)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.xHeight)
x.addKerning(left='n', right='n')
x.addMetrics(left='n', right='n')
x = ugi('macronacutecomb')
x.addMetrics(left='=50', right='=50')
x.addRecipe('macron decompose', 'acute decompose')
x = ugi('macronbelowcomb')
x.addMetrics(left='macroncomb', right='macroncomb')
x = ugi('macrongravecomb')
x.addMetrics(left='=50', right='=50')
x.addRecipe('macroncomb decompose', 'gravecomb decompose')
x = ugi('mhook')
x.addKerning(left='n', right='j')
x.addRecipe('m', '_part.hook flip_horizontal flip_vertical')
x = ugi('minusbelowcomb')
x.addRecipe('lefttackbelowcomb', 'lefttackbelowcomb flip_horizontal')
x = ugi('minusmod')
x.addMetrics(left='plus', right='plus')
x.addRecipe('macron')
x = ugi('mlonglegturned')
x.addKerning(left='u', right='q')
x.addRecipe('m flip_horizontal flip_vertical', '_part.stem flip_horizontal flip_vertical')
x = ugi('mpalatalhook')
x.addKerning(left='n', right='n')
x.addRecipe('m', '_part.hook flip_vertical flip_horizontal')
x = ugi('mturned')
x.addKerning(left='u', right='u')
x.addRecipe('m flip_horizontal flip_vertical')
x = ugi('n')
x.addAnchor('bottom', position_x=xpos.outline_center)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.xHeight)
x.addKerning(left='n', right='n')
x.addMetrics(left='n', right='n')
x = ugi('n.subs')
x.addRecipe('nmod')
x = ugi('nacute')
x.addKerning(left='n', right='n')
x.addMetrics(left='n', right='n')
x = ugi('napostrophe')
x.addKerning(left=None, right='n')
x.addMetrics(left=None, right='n')
x.addRecipe('quoteright', 'n')
x = ugi('ncaron')
x.addKerning(left='n', right='n')
x.addMetrics(left='n', right='n')
x = ugi('ncommaaccent')
x.addKerning(left='n', right='n')
x.addMetrics(left='n', right='n')
x = ugi('nhookleft')
x.addKerning(left='j', right='n')
x.addRecipe('n', '_part.hook flip_horizontal flip_vertical')
x = ugi('nhookretroflex')
x.addKerning(left='n', right='nhookretroflex')
x.addRecipe('n', '_part.hook flip_vertical')
x = ugi('nmod')
x.addRecipe('n.sups')
x = ugi('ntilde')
x.addKerning(left='n', right='n')
x.addMetrics(left='n', right='n')
x = ugi('o')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', right='o')
x = ugi('oacute')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', width='o')
x = ugi('obarred')
x.addKerning(left='o', right='o')
x.addRecipe('obarred-cy')
x = ugi('obreve')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', width='o')
x = ugi('ocaron')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', width='o')
x = ugi('ocircumflex')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', width='o')
x = ugi('ocircumflexacute')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', width='o')
x.addRecipe('o', 'circumflexcomb_acutecomb')
x = ugi('ocircumflexdotbelow')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', width='o')
x.addRecipe('o', 'circumflexcomb', 'dotbelowcomb')
x = ugi('ocircumflexgrave')
x.addKerning(left=None, right='o')
x.addMetrics(right='o', width='o')
x.addRecipe('o', 'circumflexcomb_gravecomb')
x = ugi('ocircumflexhookabove')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', width='o')
x.addRecipe('o', 'circumflexcomb_hookabovecomb')
x = ugi('ocircumflextilde')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', width='o')
x.addRecipe('o', 'circumflexcomb_tildecomb')
x = ugi('odieresis')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', width='o')
x = ugi('odotbelow')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', width='o')
x = ugi('oe')
x.addAnchor('bottom', position_x=xpos.outline_center)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.xHeight)
x.addKerning(left='o', right='e')
x.addMetrics(left='o', right='e')
x.addRecipe('o', 'e')
x = ugi('ograve')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', width='o')
x = ugi('ohookabove')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', width='o')
x = ugi('ohorn')
x.addKerning(left='o', right='ohorn')
x.addMetrics(left='o', width='ohorn')
x = ugi('ohornacute')
x.addKerning(left='o', right='ohorn')
x.addMetrics(left='o', width='ohorn')
x.addRecipe('ohorn', 'acutecomb')
x = ugi('ohorndotbelow')
x.addKerning(left='o', right='ohorn')
x.addMetrics(left='o', width='ohorn')
x.addRecipe('ohorn', 'dotbelowcomb')
x = ugi('ohorngrave')
x.addKerning(left='o', right='ohorn')
x.addMetrics(left='o', width='ohorn')
x.addRecipe('ohorn', 'gravecomb')
x = ugi('ohornhookabove')
x.addKerning(left='o', right='ohorn')
x.addMetrics(left='o', width='ohorn')
x.addRecipe('ohorn', 'hookabovecomb')
x = ugi('ohorntilde')
x.addKerning(left='o', right='ohorn')
x.addMetrics(left='o', width='ohorn')
x.addRecipe('ohorn', 'tildecomb')
x = ugi('ohungarumlaut')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', width='o')
x = ugi('omacron')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', width='o')
x = ugi('oopen')
x.addKerning(left='eopenreversed', right='o')
x.addRecipe('c flip_horizontal flip_vertical')
x = ugi('oslash')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', right='o')
x.addRecipe('o', 'slash decompose')
x = ugi('oslashacute')
x.addKerning(left='o', right='o')
x.addMetrics(left='oslash', right='oslash')
x = ugi('otilde')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', right='o')
x = ugi('p')
x.addKerning(left='p', right='o')
x.addMetrics(left='p', right='o')
x = ugi('phi')
x.addKerning(left='o', right='o')
x = ugi('phi-latin')
x.addRecipe('phi')
x = ugi('plusbelowcomb')
x.addRecipe('lefttackbelowcomb decompose', 'lefttackbelowcomb flip_vertical')
x = ugi('plusmod')
x.addMetrics(left='plus', right='plus')
x.addRecipe('plusbelowcomb')
x = ugi('q')
x.addKerning(left='o', right='q')
x.addMetrics(left='o', right='q')
x = ugi('r')
x.addAnchor('bottom', position_x=xpos.stem_bottom_center, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.xHeight)
x.addKerning(left='n', right='r')
x.addMetrics(left='n', right='r')
x = ugi('racute')
x.addKerning(left='n', right='r')
x.addMetrics(left='r', right='r')
x = ugi('ramshorn')
x.addKerning(left='ramshorn', right='ramshorn')
x.addRecipe('gamma-latin decompose')
x = ugi('rcaron')
x.addKerning(left='rcaron', right='r')
x.addMetrics(left='r', right='r')
x = ugi('rcommaaccent')
x.addKerning(left='n', right='r')
x.addMetrics(left='r', right='r')
x = ugi('rfishhook')
x.addKerning(left='s', right='r')
x.addRecipe('_part.stem', '_part.hook')
x = ugi('rhook')
x.addKerning(left='n', right='r')
x.addRecipe('r', '_part.hook flip_vertical')
x = ugi('rhookturned')
x.addKerning(left='rturned', right='rhookturned')
x.addRecipe('rturned', '_part.hook flip_vertical')
x = ugi('rhotichookmod')
x.addRecipe('eopenreversedhook decompose')
x = ugi('righttackbelowcomb')
x.addRecipe('lefttackbelowcomb flip_horizontal')
x = ugi('ringhalfleftbelowcomb')
x.addRecipe('ringhalfrightbelowcomb flip_horizontal')
x = ugi('ringhalfrightbelowcomb')
x.addRecipe('brevecomb')
x = ugi('rlonglegturned')
x.addKerning(left='rturned', right='d')
x.addRecipe('rturned', '_part.stem flip_horizontal flip_vertical')
x = ugi('rturned')
x.addKerning(left='rturned', right='u')
x.addRecipe('r flip_horizontal flip_vertical')
x = ugi('s')
x.addAnchor('bottom', position_x=xpos.apex_bottom)
x.addAnchor('top', position_x=xpos.apex_top)
x.addKerning(left='s', right='s')
x.addMetrics(left='s', right='s')
x = ugi('s_t')
x.addRecipe('s', 't')
x = ugi('sacute')
x.addKerning(left='s', right='s')
x.addMetrics(left='s', right='s')
x = ugi('scaron')
x.addKerning(left='s', right='s')
x.addMetrics(left='scaron', right='s')
x = ugi('scedilla')
x.addKerning(left='s', right='s')
x.addMetrics(left='s', right='s')
x = ugi('schwa')
x.addKerning(left='o', right='o')
x.addMetrics(left='o', right='o')
x.addRecipe('e flip_vertical flip_horizontal')
x = ugi('schwahook')
x.addKerning(left='o', right='eopenreversedhook')
x.addRecipe('schwa decompose', 'eopenreversedhook decompose')
x = ugi('scircumflex')
x.addKerning(left='s', right='s')
x.addMetrics(left='s', right='s')
x = ugi('scommaaccent')
x.addKerning(left='s', right='s')
x.addMetrics(left='s', right='s')
x = ugi('seagullbelowcomb')
x.addRecipe('brevecomb decompose', 'brevecomb decompose')
x = ugi('shook')
x.addKerning(left='s', right='s')
x.addRecipe('s', '_part.hook flip_vertical')
x = ugi('squarebelowcomb')
x.addRecipe('bridgebelowcomb', 'bridgebelowcomb flip_vertical')
x = ugi('t')
x.addAnchor('bottom', position_x=xpos.outline_center, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.stem_top_center, position_y=ypos.outline_top)
x.addAnchor('topright', position_x=xpos.stem_top_right, position_y=ypos.ascender)
x.addKerning(left='t', right='t')
x.addMetrics(left='t', right='t')
x = ugi('tbar')
x.addKerning(left='t', right='t')
x.addMetrics(left='t', right='t')
x.addRecipe('t', '_part.bar')
x.addRecipe('t', 'macroncomb decompose')
x = ugi('tcaron')
x.addKerning(left='t', right='tcaron')
x.addMetrics(left='t', right='t')
x = ugi('tccurl')
x.addKerning(left='t', right='c')
x.addMetrics(left='t', right='ccurl')
x.addRecipe('t decompose', 'ccurl decompose')
x = ugi('tcedilla')
x.addKerning(left='t', right='t')
x.addMetrics(left='t', right='t')
x.addRecipe('t', 'cedillacomb')
x = ugi('tcommaaccent')
x.addKerning(left='t', right='t')
x.addMetrics(left='t', right='t')
x = ugi('tesh')
x.addKerning(left='t', right='dhook')
x.addMetrics(left='t', right='esh')
x.addRecipe('t', 'esh')
x = ugi('thorn')
x.addKerning(left='b', right='o')
x.addMetrics(left='b', right='o')
x.addRecipe('p decompose', 'l decompose')
x = ugi('tildeoverlaycomb')
x.addRecipe('asciitilde decompose', 'z')
x = ugi('tonebarextrahighmod')
x.addRecipe('plus decompose')
x = ugi('tonebarextralowmod')
x.addRecipe('tonebarextrahighmod flip_vertical')
x = ugi('tonebarhighmod')
x.addRecipe('tonebarextrahighmod decompose')
x = ugi('tonebarlowmod')
x.addRecipe('tonebarhighmod flip_vertical')
x = ugi('tonebarmidmod')
x.addRecipe('tonebarextrahighmod decompose')
x = ugi('tretroflexhook')
x.addKerning(left='t', right='tretroflexhook')
x.addRecipe('t decompose')
x = ugi('ts')
x.addKerning(left='t', right='s')
x.addMetrics(left='t', right='s')
x.addRecipe('t', 's')
x = ugi('u')
x.addAnchor('bottom', position_x=xpos.outline_center)
x.addAnchor('ogonek', position_x=xpos.outline_right, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.xHeight)
x.addAnchor('topright', position_x=xpos.stem_top_right, position_y=ypos.xHeight)
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('uacute')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('ubar')
x.addKerning(left='istroke', right='istroke')
x.addRecipe('u', '_part.bar')
x = ugi('ubreve')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('ucaron')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('ucircumflex')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('udieresis')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('udieresisacute')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('udieresiscaron')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('udieresisgrave')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('udieresismacron')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('udotbelow')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('ugrave')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('uhookabove')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('uhorn')
x.addKerning(left='u', right='ohorn')
x.addMetrics(left='u', right='ohorn')
x = ugi('uhornacute')
x.addKerning(left='u', right='ohorn')
x.addMetrics(left='u', right='uhorn')
x.addRecipe('uhorn', 'acutecomb')
x = ugi('uhorndotbelow')
x.addKerning(left='u', right='ohorn')
x.addMetrics(left='u', right='uhorn')
x.addRecipe('uhorn', 'dotbelowcomb')
x = ugi('uhorngrave')
x.addKerning(left='u', right='ohorn')
x.addMetrics(left='u', right='uhorn')
x.addRecipe('uhorn', 'gravecomb')
x = ugi('uhornhookabove')
x.addKerning(left='u', right='ohorn')
x.addMetrics(left='u', right='uhorn')
x.addRecipe('uhorn', 'hookabovecomb')
x = ugi('uhorntilde')
x.addKerning(left='u', right='ohorn')
x.addMetrics(left='u', right='uhorn')
x.addRecipe('uhorn', 'tildecomb')
x = ugi('uhungarumlaut')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('umacron')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('undertie')
x.addRecipe('parenleft')
x = ugi('uni2C70')
x.addKerning(left='H', right='O')
x = ugi('uogonek')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('upsilon')
x.addKerning(left='upsilon-latin', right='upsilon-latin')
x = ugi('uptackbelowcomb')
x.addRecipe('lefttackbelowcomb decompose')
x = ugi('uptackmod')
x.addMetrics(left='plus', right='plus')
x.addRecipe('plus decompose')
x = ugi('uring')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('utilde')
x.addKerning(left='u', right='u')
x.addMetrics(left='u', right='u')
x = ugi('v')
x.addKerning(left='v', right='v')
x.addMetrics(left='v', right='v')
x = ugi('verticallinebelowcomb')
x.addRecipe('macroncomb decompose')
x = ugi('verticallinelowmod')
x.addRecipe('verticallinemod')
x = ugi('verticallinemod')
x.addRecipe('verticallinebelowcomb')
x = ugi('vhook')
x.addKerning(left='u', right='vhook')
x.addRecipe('u decompose', '_part.hook flip_horizontal')
x = ugi('vrighthook')
x.addKerning(left='v', right='r')
x.addMetrics(left='v', right='r')
x.addRecipe('v decompose', '_part.hook decompose')
x = ugi('vturned')
x.addKerning(left='vturned', right='vturned')
x.addRecipe('v flip_horizontal flip_vertical')
x.addRecipe('v')
x = ugi('w')
x.addKerning(left='v', right='v')
x.addMetrics(left='v', right='v')
x = ugi('wacute')
x.addKerning(left='v', right='v')
x.addMetrics(left='w', right='w')
x = ugi('wcircumflex')
x.addKerning(left='v', right='v')
x.addMetrics(left='w', right='w')
x = ugi('wdieresis')
x.addKerning(left='v', right='v')
x.addMetrics(left='w', right='w')
x = ugi('wgrave')
x.addKerning(left='v', right='v')
x.addMetrics(left='w', right='w')
x = ugi('wturned')
x.addKerning(left='vturned', right='vturned')
x.addRecipe('w flip_horizontal flip_vertical')
x = ugi('x')
x.addKerning(left='x', right='k')
x.addMetrics(left='x', right='x')
x = ugi('xdotaccent')
x.addMetrics(left='x', right='x')
x = ugi('y')
x.addAnchor('bottom', position_x=xpos.width_75, position_y=ypos.base_line)
x.addKerning(left='v', right='v', italic_left='y', italic_right='y')
x.addMetrics(left='v', right='v', italic_left='y', italic_right='y')
x = ugi('yacute')
x.addKerning(left='v', right='v', italic_left='y', italic_right='y')
x.addMetrics(left='y', right='y', italic_left='y', italic_right='y')
x = ugi('ycircumflex')
x.addKerning(left='v', right='v', italic_left='y', italic_right='y')
x.addMetrics(left='y', right='y', italic_left='y', italic_right='y')
x = ugi('ydieresis')
x.addKerning(left='v', right='v', italic_left='y', italic_right='y')
x.addMetrics(left='y', right='y', italic_left='y', italic_right='y')
x = ugi('ydotbelow')
x.addKerning(left='v', right='v', italic_left='y', italic_right='y')
x.addMetrics(left='y', width='y')
x = ugi('ygrave')
x.addKerning(left='v', right='v', italic_left='y', italic_right='y')
x.addMetrics(left='y', right='y', italic_left='y', italic_right='y')
x = ugi('yhookabove')
x.addKerning(left='v', right='v', italic_left='y', italic_right='y')
x.addMetrics(left='y', right='y', italic_left='y', italic_right='y')
x = ugi('ytilde')
x.addKerning(left='v', right='v', italic_left='y', italic_right='y')
x.addMetrics(left='y', right='y', italic_left='y', italic_right='y')
x = ugi('yturned')
x.addKerning(left='vturned', right='vturned')
x.addRecipe('y flip_horizontal flip_vertical')
x = ugi('z')
x.addKerning(left='z', right='z')
x.addMetrics(left='z', right='z')
x = ugi('zacute')
x.addKerning(left='z', right='z')
x.addMetrics(left='z', right='z')
x = ugi('zcaron')
x.addKerning(left='z', right='z')
x.addMetrics(left='z', right='z')
x = ugi('zcurl')
x.addKerning(left='z', right='zcurl')
x.addRecipe('z decompose', 'ccurl decompose')
x = ugi('zdotaccent')
x.addKerning(left='z', right='z')
x.addMetrics(left='z', right='z')
x = ugi('zretroflexhook')
x.addKerning(left='z', right='rhookturned')
x.addRecipe('z', '_part.hook flip_vertical')
x = ugi('zstroke')
x.addMetrics(left='z', right='z')
x = ugi('a.sc')
x.addAnchor('ogonek', position_x=xpos.outline_right, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.stem_top_center, position_y=ypos.smallcapHeight)
x = ugi('ae.sc')
x.addAnchor('top', position_x=xpos.stem_top_center, position_y=ypos.smallcapHeight)
x.addRecipe('a.sc decompose', 'e.sc decompose')
x.addBuildString('/A/AE/E/a.sc/ae.sc/e.sc')
x = ugi('dcroat.sc')
x.addRecipe('eth.sc')
x = ugi('e.sc')
x.addAnchor('bottom', position_x=xpos.outline_center, position_y=ypos.base_line)
x.addAnchor('ogonek', position_x=xpos.outline_right, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.smallcapHeight)
x = ugi('eng.sc')
x.addRecipe('jdotless decompose', 'n.sc decompose')
x.addBuildString('/N/Eng/J/n.sc/eng.sc/j.sc')
x = ugi('f.sc')
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.smallcapHeight)
x = ugi('g.salt.sc')
x.addAnchor('top', position_x=xpos.apex_top, position_y=ypos.outline_top)
x = ugi('g.sc')
x.addAnchor('top', position_x=xpos.apex_top, position_y=ypos.smallcapHeight)
x = ugi('germandbls.sc')
x.addRecipe('s.sc', 's.sc')
x = ugi('h.sc')
x.addAnchor('#bottomright', position_x=xpos.outline_right, position_y=ypos.base_line)
x.addAnchor('#topleft', position_x=xpos.outline_left, position_y=ypos.smallcapHeight)
x.addAnchor('#topright', position_x=xpos.outline_right, position_y=ypos.smallcapHeight)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.smallcapHeight)
x.addAnchor('topleft', suppress_auto=True)
x = ugi('i.sc')
x.addAnchor('ogonek', position_x=xpos.outline_right, position_y=ypos.base_line)
x.addAnchor('top', position_x=xpos.stem_top_center, position_y=ypos.smallcapHeight)
x = ugi('k.sc')
x.addAnchor('#bottomright', position_x=xpos.outline_right, position_y=ypos.base_line)
x.addAnchor('#topleft', position_x=xpos.outline_left, position_y=ypos.smallcapHeight)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.smallcapHeight)
x.addAnchor('topleft', suppress_auto=True)
x = ugi('napostrophe.sc')
x.addMetrics(left='quoteright', right='n.sc')
x = ugi('o.sc')
x.addAnchor('#center', position_x=xpos.outline_center, position_y=ypos.outline_middle)
x.addAnchor('center', suppress_auto=True)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.smallcapHeight)
x = ugi('oe.sc')
x.addRecipe('o.sc decompose', 'e.sc')
x.addBuildString('/O/OE/E/o.sc/oe.sc/e.sc')
x = ugi('t.sc')
x.addAnchor('bottom', position_x=xpos.outline_center, position_y=ypos.outline_bottom)
x.addAnchor('center', position_x=xpos.outline_center, position_y=ypos.outline_middle)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.outline_top)
x = ugi('tcedilla.sc')
x.addRecipe('t.sc', 'cedillacomb')
x = ugi('thorn.sc')
x.addRecipe('p.sc decompose', 'i.sc decompose')
x.addBuildString('/P/Thorn/thorn.sc/p.sc')
x = ugi('u.sc')
x.addAnchor('bottom', position_x=xpos.outline_center, position_y=ypos.base_line)
x.addAnchor('center', position_x=xpos.outline_center, position_y=ypos.outline_middle)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.outline_top)
x = ugi('y.sc')
x.addAnchor('#center', position_x=xpos.outline_center, position_y=ypos.outline_middle)
x.addAnchor('top', position_x=xpos.outline_center, position_y=ypos.smallcapHeight)
x.addAnchor('topleft', position_x=xpos.outline_left, position_y=ypos.smallcapHeight)
#
# --------------------------------
#
# Accents Marks
#
# --------------------------------
#
x = ugi('dotaccentcomb')
x.addRecipe('dieresiscomb decompose')
x = ugi('dieresisbelowcomb')
x.addRecipe('dieresiscomb accent_bottom')
x = ugi('brevebelowcomb')
x.addRecipe('brevecomb accent_bottom')
x = ugi('macronbelowcomb')
x.addRecipe('macroncomb accent_bottom')
| 26.761031
| 90
| 0.683432
| 10,623
| 68,535
| 4.344535
| 0.043114
| 0.051309
| 0.166407
| 0.041255
| 0.856886
| 0.807267
| 0.758732
| 0.725841
| 0.674402
| 0.649983
| 0
| 0.00065
| 0.078952
| 68,535
| 2,560
| 91
| 26.771484
| 0.730483
| 0.002641
| 0
| 0.450386
| 0
| 0
| 0.226858
| 0.016433
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000514
| false
| 0
| 0.000514
| 0.000514
| 0.001542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
78e0db8d4e431aff800d3aa98bdef481cb8145b8
| 72
|
py
|
Python
|
python/cuXfilter/charts/core/__init__.py
|
AjayThorve/cuxfilter
|
537ff67de80439a43e0bad7373558f5e25dcb112
|
[
"Apache-2.0"
] | 2
|
2019-03-06T02:10:05.000Z
|
2020-05-06T06:33:02.000Z
|
python/cuXfilter/charts/core/__init__.py
|
AjayThorve/cuxfilter
|
537ff67de80439a43e0bad7373558f5e25dcb112
|
[
"Apache-2.0"
] | null | null | null |
python/cuXfilter/charts/core/__init__.py
|
AjayThorve/cuxfilter
|
537ff67de80439a43e0bad7373558f5e25dcb112
|
[
"Apache-2.0"
] | null | null | null |
# from .core_chart import BaseChart
from .core_widget import BaseWidget
| 24
| 35
| 0.833333
| 10
| 72
| 5.8
| 0.7
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 72
| 2
| 36
| 36
| 0.920635
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
60704c449624cf87e45044e11e5810c0b66d5d98
| 123
|
py
|
Python
|
coolPro/app/module2/print_funs.py
|
airwindow/Python-Standard-Project
|
f975350b8eb05466198ae7e548b7ad63837fbd36
|
[
"Apache-2.0"
] | null | null | null |
coolPro/app/module2/print_funs.py
|
airwindow/Python-Standard-Project
|
f975350b8eb05466198ae7e548b7ad63837fbd36
|
[
"Apache-2.0"
] | null | null | null |
coolPro/app/module2/print_funs.py
|
airwindow/Python-Standard-Project
|
f975350b8eb05466198ae7e548b7ad63837fbd36
|
[
"Apache-2.0"
] | null | null | null |
def print_sth(s):
print('print from module2: {}'.format(s))
if __name__ == "__main__":
s = 'test in main'
print_sth(s)
| 17.571429
| 42
| 0.650407
| 20
| 123
| 3.5
| 0.6
| 0.228571
| 0.257143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009709
| 0.162602
| 123
| 7
| 43
| 17.571429
| 0.669903
| 0
| 0
| 0
| 0
| 0
| 0.33871
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.2
| 0.6
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
6070e70603f7f80316ee8ceb1586ac237d30fa16
| 8,782
|
py
|
Python
|
cakechat/dialog_model/inference/tests/sampling.py
|
sketscripter/emotional-chatbot-cakechat
|
470df58a2206a0ea38b6bed53b20cbc63bd3de24
|
[
"Apache-2.0"
] | 1,608
|
2018-01-31T15:22:29.000Z
|
2022-03-30T19:59:16.000Z
|
cakechat/dialog_model/inference/tests/sampling.py
|
GaelicThunder/cakechat
|
844507281b30d81b3fe3674895fe27826dba8438
|
[
"Apache-2.0"
] | 60
|
2018-02-01T11:45:51.000Z
|
2019-11-13T10:35:59.000Z
|
cakechat/dialog_model/inference/tests/sampling.py
|
GaelicThunder/cakechat
|
844507281b30d81b3fe3674895fe27826dba8438
|
[
"Apache-2.0"
] | 690
|
2018-01-31T17:57:19.000Z
|
2022-03-30T07:07:41.000Z
|
import os
import sys
import unittest
import keras.backend as K
import numpy as np
from scipy.stats import binom
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from cakechat.dialog_model.inference.candidates.sampling import TokenSampler
from cakechat.config import REPETITION_PENALIZE_COEFFICIENT, RANDOM_SEED
np.random.seed(seed=RANDOM_SEED)
# Type I error rate: probability that a test will fail even though everything is OK
# The lower the probability is the more inaccurate (in terms of Type II error) the test becomes.
# This is independent probability for every test in the TestCase.
_CONFIDENCE_LEVEL = 1e-6
# Number of samples for monte-carlo estimation or probabilities.
# The bigger number of sample is, the more accurate tests become
_SAMPLES_NUM = 10000
class TestSampling(unittest.TestCase):
def test_sample_list(self):
# Error rate is p(token1) + p(token2) = conf_level / 2 + conf_level / 2 = conf_level:
probs = [_CONFIDENCE_LEVEL / 2, _CONFIDENCE_LEVEL / 2, 1 - _CONFIDENCE_LEVEL]
token_sampler = TokenSampler(
batch_size=1,
banned_tokens_ids=[],
non_penalizable_tokens_ids=range(len(probs)),
repetition_penalization_coefficient=REPETITION_PENALIZE_COEFFICIENT)
expected_token_ids = np.array([2])
actual_token_ids = token_sampler.sample(probs, sample_idx=0)
self.assertEqual(expected_token_ids, actual_token_ids)
def test_sample_ndarray(self):
# Error rate is p(token1) + p(token2) = conf_level / 2 + conf_level / 2 = conf_level
probs = np.array([_CONFIDENCE_LEVEL / 2, _CONFIDENCE_LEVEL / 2, 1 - _CONFIDENCE_LEVEL], dtype=K.floatx())
token_sampler = TokenSampler(
batch_size=1,
banned_tokens_ids=[],
non_penalizable_tokens_ids=range(len(probs)),
repetition_penalization_coefficient=REPETITION_PENALIZE_COEFFICIENT)
expected_token_ids = np.array([2])
actual_token_ids = token_sampler.sample(probs, sample_idx=0)
self.assertEqual(expected_token_ids, actual_token_ids)
def test_sample_probs(self):
probs = [0.3, 0.6, 0.1]
token_sampler = TokenSampler(
batch_size=1,
banned_tokens_ids=[],
non_penalizable_tokens_ids=range(len(probs)),
repetition_penalization_coefficient=REPETITION_PENALIZE_COEFFICIENT)
adjusted_confidence_level = _CONFIDENCE_LEVEL / len(probs) # bonferroni correction
confidence_intervals = [binom.interval(1 - adjusted_confidence_level, _SAMPLES_NUM, p) for p in probs]
est_probs_from, est_probs_to = zip(*confidence_intervals)
samples = np.array([token_sampler.sample(probs, 0) for _ in range(_SAMPLES_NUM)])
counts = {val: np.sum(samples == val) for val in np.unique(samples)}
for i, _ in enumerate(probs):
self.assertLessEqual(counts[i], est_probs_to[i])
self.assertGreaterEqual(counts[i], est_probs_from[i])
def test_sample_with_zeros(self):
probs = np.array([1.0, 0, 0], dtype=K.floatx())
token_sampler = TokenSampler(
batch_size=1,
banned_tokens_ids=[],
non_penalizable_tokens_ids=range(len(probs)),
repetition_penalization_coefficient=REPETITION_PENALIZE_COEFFICIENT)
expected_token_ids = np.array([0])
actual_token_ids = token_sampler.sample(probs, sample_idx=0)
self.assertEqual(expected_token_ids, actual_token_ids)
def test_sample_banned_tokens(self):
eps = _CONFIDENCE_LEVEL * 0.3
# Here we multiply the confidence level by 0.3 so that after removal of banned token and renormalization
# the probability of an error remains equal to _CONFIDENCE_LEVEL value.
probs = np.array([0.7, 0.3 - eps, eps], dtype=K.floatx())
token_sampler = TokenSampler(
batch_size=1,
banned_tokens_ids=[0],
non_penalizable_tokens_ids=range(len(probs)),
repetition_penalization_coefficient=REPETITION_PENALIZE_COEFFICIENT)
expected_token_ids = np.array([1])
actual_token_ids = token_sampler.sample(probs, sample_idx=0)
self.assertEqual(expected_token_ids, actual_token_ids)
def test_sample_banned_tokens_2(self):
eps = 1e-6
probs = np.array([1.0 - eps, eps, 0], dtype=K.floatx())
token_sampler = TokenSampler(
batch_size=1,
banned_tokens_ids=[0],
non_penalizable_tokens_ids=range(len(probs)),
repetition_penalization_coefficient=REPETITION_PENALIZE_COEFFICIENT)
# Token #1 has to be returned even though its probability is really small
expected_token_ids = np.array([1])
actual_token_ids = token_sampler.sample(probs, sample_idx=0)
self.assertEqual(expected_token_ids, actual_token_ids)
def test_repetition_penalization(self):
probs = [0.5, 0.5]
actual_num_nonequal_pairs = 0
for _ in range(_SAMPLES_NUM):
token_sampler = TokenSampler(
batch_size=1,
banned_tokens_ids=[],
non_penalizable_tokens_ids=[],
repetition_penalization_coefficient=REPETITION_PENALIZE_COEFFICIENT)
first_token = token_sampler.sample(probs, sample_idx=0)
second_token = token_sampler.sample(probs, sample_idx=0)
actual_num_nonequal_pairs += int(first_token != second_token)
# P(first != second) = P(first=0, second=1) + P(first=1, second=0) =
# = 0.5 * 0.5 * r / (0.5 + 0.5 * r) + 0.5 * 0.5 * r / (0.5 + 0.5 * r) = r / (1 + r)
expected_nonequal_pair_rate = REPETITION_PENALIZE_COEFFICIENT / (1 + REPETITION_PENALIZE_COEFFICIENT)
expected_nonequal_pair_rate_from, expected_nonequal_pair_rate_to = \
binom.interval(1 - _CONFIDENCE_LEVEL, _SAMPLES_NUM, expected_nonequal_pair_rate)
self.assertLessEqual(actual_num_nonequal_pairs, expected_nonequal_pair_rate_to)
self.assertGreaterEqual(actual_num_nonequal_pairs, expected_nonequal_pair_rate_from)
def test_nonpenalizable_tokens(self):
probs = [0.5, 0.5]
actual_num_nonequal_pairs = 0
samples_generated = 0
while samples_generated < _SAMPLES_NUM:
token_sampler = TokenSampler(
batch_size=1,
banned_tokens_ids=[],
non_penalizable_tokens_ids=[0],
repetition_penalization_coefficient=REPETITION_PENALIZE_COEFFICIENT)
first_token = token_sampler.sample(probs, sample_idx=0)
if first_token == 0:
samples_generated += 1
second_token = token_sampler.sample(probs, sample_idx=0)
actual_num_nonequal_pairs += (first_token != second_token)
# When we don't penalize for token#0, P(first != second | first=0) = P(second=1 | first=0) = 0.5
expected_nonequal_pair_rate = 0.5
expected_nonequal_pair_rate_from, expected_nonequal_pair_rate_to = binom.interval(
1 - _CONFIDENCE_LEVEL, _SAMPLES_NUM, expected_nonequal_pair_rate)
self.assertLessEqual(actual_num_nonequal_pairs, expected_nonequal_pair_rate_to)
self.assertGreaterEqual(actual_num_nonequal_pairs, expected_nonequal_pair_rate_from)
def test_nonpenalizable_tokens_2(self):
probs = [0.5, 0.5]
actual_num_nonequal_pairs = 0
samples_generated = 0
while samples_generated < _SAMPLES_NUM:
token_sampler = TokenSampler(
batch_size=1,
banned_tokens_ids=[],
non_penalizable_tokens_ids=[1],
repetition_penalization_coefficient=REPETITION_PENALIZE_COEFFICIENT)
first_token = token_sampler.sample(probs, sample_idx=0)
if first_token == 0:
samples_generated += 1
second_token = token_sampler.sample(probs, sample_idx=0)
actual_num_nonequal_pairs += (first_token != second_token)
# When we penalize for token#0, P(first != second | first=0) = P(second=1 | first=0) = 0.5 * r / (0.5 + 0.5 * r) = r / (1 + r)
expected_nonequal_pair_rate = REPETITION_PENALIZE_COEFFICIENT / (1 + REPETITION_PENALIZE_COEFFICIENT)
expected_nonequal_pair_rate_from, expected_nonequal_pair_rate_to = binom.interval(
1 - _CONFIDENCE_LEVEL, _SAMPLES_NUM, expected_nonequal_pair_rate)
self.assertLessEqual(actual_num_nonequal_pairs, expected_nonequal_pair_rate_to)
self.assertGreaterEqual(actual_num_nonequal_pairs, expected_nonequal_pair_rate_from)
if __name__ == '__main__':
unittest.main()
| 47.47027
| 134
| 0.685151
| 1,115
| 8,782
| 5.038565
| 0.150673
| 0.044856
| 0.06408
| 0.076896
| 0.739231
| 0.734247
| 0.724101
| 0.724101
| 0.724101
| 0.707013
| 0
| 0.021314
| 0.230699
| 8,782
| 184
| 135
| 47.728261
| 0.810243
| 0.132772
| 0
| 0.642857
| 0
| 0
| 0.001053
| 0
| 0
| 0
| 0
| 0
| 0.092857
| 1
| 0.064286
| false
| 0
| 0.057143
| 0
| 0.128571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
60eabdaede4a0a3f186c4178a0edb20e19aa9d93
| 919
|
py
|
Python
|
prescience/labelling/__init__.py
|
grockious/bounded-prescience
|
cc1278fb4c077f67c611ef8ac00b00f0f2c4f433
|
[
"BSD-3-Clause"
] | 1
|
2021-01-26T12:17:12.000Z
|
2021-01-26T12:17:12.000Z
|
prescience/labelling/__init__.py
|
grockious/bounded-prescience
|
cc1278fb4c077f67c611ef8ac00b00f0f2c4f433
|
[
"BSD-3-Clause"
] | null | null | null |
prescience/labelling/__init__.py
|
grockious/bounded-prescience
|
cc1278fb4c077f67c611ef8ac00b00f0f2c4f433
|
[
"BSD-3-Clause"
] | 2
|
2021-01-26T11:19:01.000Z
|
2021-03-19T10:18:13.000Z
|
from prescience.labelling.Labeller import Labeller
from prescience.labelling.properties import get_property
from prescience.labelling.Freeway import Hit
from prescience.labelling.Death import Death
from prescience.labelling.Assault import Overheat
from prescience.labelling.Below_Reward import Below_Reward
from prescience.labelling.Bowling import No_Hit
from prescience.labelling.Bowling import No_Strike
from prescience.labelling.DoubleDunk import Out_Of_Bounds
from prescience.labelling.DoubleDunk import Shoot_Bf_Clear
from prescience.labelling.Seaquest import Early_Surface
from prescience.labelling.Seaquest import Out_Of_Oxygen
from prescience.labelling.InstantNegativeReward import Instant_Negative_Reward
from prescience.labelling.Frostbite import Freezing
from prescience.labelling.Gravitar import Fuel
from prescience.labelling.Hero import Dynamite
from prescience.labelling.KungFuMaster import Energy_Loss
| 48.368421
| 78
| 0.887922
| 117
| 919
| 6.846154
| 0.350427
| 0.297129
| 0.48814
| 0.064919
| 0.284644
| 0.094881
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075082
| 919
| 18
| 79
| 51.055556
| 0.942353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
60f34e774b0d6b2e49455d4416b4005f13149e47
| 61
|
py
|
Python
|
python/LsstPwrCtrlCore/__init__.py
|
slaclab/lsst-pwr-ctrl-core
|
e37c38e2c55f0f0ee0f4b3691a93e2e3115f14e6
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
python/LsstPwrCtrlCore/__init__.py
|
slaclab/lsst-pwr-ctrl-core
|
e37c38e2c55f0f0ee0f4b3691a93e2e3115f14e6
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2018-04-04T05:39:39.000Z
|
2018-07-09T19:48:49.000Z
|
python/LsstPwrCtrlCore/__init__.py
|
slaclab/lsst-pwr-ctrl-core
|
e37c38e2c55f0f0ee0f4b3691a93e2e3115f14e6
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2020-12-12T23:00:48.000Z
|
2020-12-12T23:00:48.000Z
|
#!/usr/bin/env python
from LsstPwrCtrlCore._core import *
| 20.333333
| 38
| 0.737705
| 8
| 61
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147541
| 61
| 2
| 39
| 30.5
| 0.846154
| 0.327869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
60fbdc350a85db1da17caf617ac7e1782a5ce36d
| 187
|
py
|
Python
|
test.py
|
Adolph-Anthony/study-reptile
|
d4ce637a25fba88bafb7f967b1813107792deab0
|
[
"MIT"
] | null | null | null |
test.py
|
Adolph-Anthony/study-reptile
|
d4ce637a25fba88bafb7f967b1813107792deab0
|
[
"MIT"
] | null | null | null |
test.py
|
Adolph-Anthony/study-reptile
|
d4ce637a25fba88bafb7f967b1813107792deab0
|
[
"MIT"
] | null | null | null |
import requests
'https://github.com/Adolph-Anthony'
'''
这样输入用户名密码访问
'''
r = requests.get('https://github.com/Adolph-Anthony', auth=('Adolph-Anthony', 'xujing518333'))
print(r.status_code)
| 26.714286
| 94
| 0.73262
| 24
| 187
| 5.666667
| 0.625
| 0.286765
| 0.205882
| 0.294118
| 0.397059
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034091
| 0.058824
| 187
| 7
| 95
| 26.714286
| 0.738636
| 0
| 0
| 0
| 0
| 0
| 0.544379
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
88039fb2439cd25a5b0402eb23f897011f8a10b8
| 209
|
py
|
Python
|
scicopia_tools/analyzers/__init__.py
|
pikatech/Scicopia-tools
|
0e19d694adeae862e3db92779d204e4944cc47bc
|
[
"MIT"
] | null | null | null |
scicopia_tools/analyzers/__init__.py
|
pikatech/Scicopia-tools
|
0e19d694adeae862e3db92779d204e4944cc47bc
|
[
"MIT"
] | null | null | null |
scicopia_tools/analyzers/__init__.py
|
pikatech/Scicopia-tools
|
0e19d694adeae862e3db92779d204e4944cc47bc
|
[
"MIT"
] | 1
|
2021-06-18T16:00:35.000Z
|
2021-06-18T16:00:35.000Z
|
from typing import Any, Dict
class Analyzer:
def __init__(self) -> None:
pass
def process(self, text: str) -> Dict[str, Any]:
return {}
def release_resources(self):
pass
| 17.416667
| 51
| 0.593301
| 26
| 209
| 4.576923
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.301435
| 209
| 12
| 52
| 17.416667
| 0.815068
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0.25
| 0.125
| 0.125
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
71878945872280fd57eabb5f850237bebed1775d
| 104
|
py
|
Python
|
autoran/oailte/epc/__init__.py
|
samiemostafavi/oai-ran-docker
|
cfaf8adbfdd1d3ed3f33388db74a43f17681f1d1
|
[
"MIT"
] | null | null | null |
autoran/oailte/epc/__init__.py
|
samiemostafavi/oai-ran-docker
|
cfaf8adbfdd1d3ed3f33388db74a43f17681f1d1
|
[
"MIT"
] | 1
|
2022-02-25T13:09:34.000Z
|
2022-02-25T13:09:34.000Z
|
autoran/oailte/epc/__init__.py
|
samiemostafavi/oai-ran-docker
|
cfaf8adbfdd1d3ed3f33388db74a43f17681f1d1
|
[
"MIT"
] | null | null | null |
from .EPC import Cassandra, HSS, MME, SPGWU, SPGWC, EvolvedPacketCore
from .EPCRouter import CoreRouter
| 34.666667
| 69
| 0.807692
| 13
| 104
| 6.461538
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 104
| 2
| 70
| 52
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
71916609d9ca18d58e2a772a7d463fce867fce22
| 117
|
py
|
Python
|
alpyca_launch/src/alpyca/launch/__init__.py
|
arturmiller/alpyca
|
207eae92ebcdd593b7953ecb6ad4816215ecb516
|
[
"MIT"
] | 3
|
2018-12-04T18:40:36.000Z
|
2019-01-13T12:01:19.000Z
|
alpyca_launch/src/alpyca/launch/__init__.py
|
alpyca/alpyca
|
207eae92ebcdd593b7953ecb6ad4816215ecb516
|
[
"MIT"
] | 4
|
2019-01-21T19:50:56.000Z
|
2019-02-02T06:32:11.000Z
|
alpyca_launch/src/alpyca/launch/__init__.py
|
alpyca/alpyca
|
207eae92ebcdd593b7953ecb6ad4816215ecb516
|
[
"MIT"
] | null | null | null |
from launch import Launch, ParsingException
from master import Master
from node import Node
from runner import Runner
| 29.25
| 43
| 0.854701
| 17
| 117
| 5.882353
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136752
| 117
| 4
| 44
| 29.25
| 0.990099
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
71c1e9f99f9864670fc3b18c6ad27a1e0229f8f3
| 244
|
py
|
Python
|
src/compressario/compression_algorithms/__init__.py
|
ieaves/compressario
|
fd20ae36b283b119085f158c9fd0fb0e6f9f0242
|
[
"MIT"
] | null | null | null |
src/compressario/compression_algorithms/__init__.py
|
ieaves/compressario
|
fd20ae36b283b119085f158c9fd0fb0e6f9f0242
|
[
"MIT"
] | null | null | null |
src/compressario/compression_algorithms/__init__.py
|
ieaves/compressario
|
fd20ae36b283b119085f158c9fd0fb0e6f9f0242
|
[
"MIT"
] | null | null | null |
from compressario.compression_algorithms import type_compressions
from compressario.compression_algorithms.type_compressions import (
compress_float,
compress_integer,
compress_complex,
compress_object,
compress_datetime,
)
| 27.111111
| 67
| 0.819672
| 24
| 244
| 7.958333
| 0.541667
| 0.167539
| 0.282723
| 0.387435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139344
| 244
| 8
| 68
| 30.5
| 0.909524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e07ba6e1a7821ae410823c5384434f389e5adaa7
| 129
|
py
|
Python
|
code/playground/gopro_keepalive.py
|
manavjain99/oscar_buggy
|
b5dab0848f8667c9515bcfb078730cd0c4060000
|
[
"MIT"
] | 3
|
2020-08-27T14:25:14.000Z
|
2020-11-13T13:13:41.000Z
|
code/playground/gopro_keepalive.py
|
manavjain99/oscar_buggy
|
b5dab0848f8667c9515bcfb078730cd0c4060000
|
[
"MIT"
] | null | null | null |
code/playground/gopro_keepalive.py
|
manavjain99/oscar_buggy
|
b5dab0848f8667c9515bcfb078730cd0c4060000
|
[
"MIT"
] | null | null | null |
from goprocam import GoProCamera
from goprocam import constants
gopro = GoProCamera.GoPro()
gopro.stream("udp://127.0.0.1:10000")
| 32.25
| 37
| 0.790698
| 19
| 129
| 5.368421
| 0.631579
| 0.235294
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09322
| 0.085271
| 129
| 4
| 37
| 32.25
| 0.771186
| 0
| 0
| 0
| 0
| 0
| 0.161538
| 0.161538
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e083faaec3780fd98a0dc60a6e17c56a35980f0c
| 4,288
|
py
|
Python
|
onnx/backend/test/case/model/gradient.py
|
L-Net-1992/onnx
|
acc127219b45bc27b0180b1fdc08299eac81b167
|
[
"Apache-2.0"
] | 1
|
2022-03-04T03:29:37.000Z
|
2022-03-04T03:29:37.000Z
|
onnx/backend/test/case/model/gradient.py
|
alsj213/onnx
|
35092895d9bf3592e58f4710d098f8131afef259
|
[
"Apache-2.0"
] | null | null | null |
onnx/backend/test/case/model/gradient.py
|
alsj213/onnx
|
35092895d9bf3592e58f4710d098f8131afef259
|
[
"Apache-2.0"
] | 1
|
2022-03-27T19:17:02.000Z
|
2022-03-27T19:17:02.000Z
|
# SPDX-License-Identifier: Apache-2.0
import numpy as np # type: ignore
import onnx
from onnx.defs import ONNX_DOMAIN, AI_ONNX_PREVIEW_TRAINING_DOMAIN
from ..base import Base
from . import expect
class Gradient(Base):
@staticmethod
def export_gradient_scalar_add() -> None:
add_node = onnx.helper.make_node('Add',
['a', 'b'], ['c'], name='my_add')
gradient_node = onnx.helper.make_node(
'Gradient', ['a', 'b'],
['dc_da', 'dc_db'], name='my_gradient',
domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
xs=['a', 'b'], y='c')
a = np.array(1.0).astype(np.float32)
b = np.array(2.0).astype(np.float32)
c = a + b
# dc / da = d(a+b) / da = 1
dc_da = np.array(1).astype(np.float32)
# db / db = d(a+b) / db = 1
dc_db = np.array(1).astype(np.float32)
graph = onnx.helper.make_graph(
nodes=[add_node, gradient_node],
name='GradientOfAdd',
inputs=[
onnx.helper.make_tensor_value_info('a', onnx.TensorProto.FLOAT,
[]),
onnx.helper.make_tensor_value_info('b', onnx.TensorProto.FLOAT,
[])],
outputs=[
onnx.helper.make_tensor_value_info('c', onnx.TensorProto.FLOAT,
[]),
onnx.helper.make_tensor_value_info('dc_da',
onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info('dc_db',
onnx.TensorProto.FLOAT, [])])
opsets = [
onnx.helper.make_operatorsetid(ONNX_DOMAIN, 12),
onnx.helper.make_operatorsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)]
model = onnx.helper.make_model(
graph,
producer_name='backend-test',
opset_imports=opsets)
expect(model, inputs=[a, b], outputs=[c, dc_da, dc_db],
name='test_gradient_of_add')
@staticmethod
def export_gradient_scalar_add_and_mul() -> None:
add_node = onnx.helper.make_node('Add',
['a', 'b'], ['c'], name='my_add')
mul_node = onnx.helper.make_node('Mul',
['c', 'a'], ['d'], name='my_mul')
gradient_node = onnx.helper.make_node(
'Gradient', ['a', 'b'],
['dd_da', 'dd_db'], name='my_gradient',
domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
xs=['a', 'b'], y='d')
a = np.array(1.0).astype(np.float32)
b = np.array(2.0).astype(np.float32)
c = a + b
# d = a * c = a * (a + b)
d = a * c
# dd / da = d(a*a+a*b) / da = 2 * a + b
dd_da = (2 * a + b).astype(np.float32)
# dd / db = d(a*a+a*b) / db = a
dd_db = a
graph = onnx.helper.make_graph(
nodes=[add_node, mul_node, gradient_node],
name='GradientOfTwoOperators',
inputs=[
onnx.helper.make_tensor_value_info('a', onnx.TensorProto.FLOAT,
[]),
onnx.helper.make_tensor_value_info('b', onnx.TensorProto.FLOAT,
[])],
outputs=[
onnx.helper.make_tensor_value_info('d', onnx.TensorProto.FLOAT,
[]),
onnx.helper.make_tensor_value_info('dd_da',
onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info('dd_db',
onnx.TensorProto.FLOAT, [])])
opsets = [
onnx.helper.make_operatorsetid(ONNX_DOMAIN, 12),
onnx.helper.make_operatorsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)]
model = onnx.helper.make_model(graph,
producer_name='backend-test',
opset_imports=opsets)
expect(model, inputs=[a, b], outputs=[d, dd_da, dd_db],
name='test_gradient_of_add_and_mul')
| 42.039216
| 80
| 0.485774
| 482
| 4,288
| 4.078838
| 0.147303
| 0.116989
| 0.163784
| 0.101729
| 0.837742
| 0.805188
| 0.70295
| 0.70295
| 0.666328
| 0.625636
| 0
| 0.013657
| 0.385261
| 4,288
| 101
| 81
| 42.455446
| 0.73217
| 0.044776
| 0
| 0.571429
| 0
| 0
| 0.057975
| 0.012231
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0.083333
| 0
| 0.119048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.