hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
40290e453dc415a935e77faf36f362c1b48c02aa
| 176
|
py
|
Python
|
digits/model/tasks/__init__.py
|
ZeusNightBolt/DIGITS
|
3450cc683143415418af5ecdb1b17b02da3e2c79
|
[
"BSD-3-Clause"
] | 2
|
2017-04-24T10:16:15.000Z
|
2019-02-26T09:36:27.000Z
|
digits/model/tasks/__init__.py
|
ZeusNightBolt/DIGITS
|
3450cc683143415418af5ecdb1b17b02da3e2c79
|
[
"BSD-3-Clause"
] | 1
|
2016-08-30T23:48:17.000Z
|
2016-08-30T23:48:17.000Z
|
digits/model/tasks/__init__.py
|
ZeusNightBolt/DIGITS
|
3450cc683143415418af5ecdb1b17b02da3e2c79
|
[
"BSD-3-Clause"
] | 3
|
2017-04-24T10:16:15.000Z
|
2019-02-26T09:36:49.000Z
|
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
from train import TrainTask
from caffe_train import CaffeTrainTask
from torch_train import TorchTrainTask
| 29.333333
| 68
| 0.829545
| 23
| 176
| 6.26087
| 0.73913
| 0.229167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052288
| 0.130682
| 176
| 5
| 69
| 35.2
| 0.888889
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
404cd1c03632e0a5e4e90710a04e06a2c0b7cd96
| 121
|
py
|
Python
|
credentials/providers/passport/exceptions.py
|
ad-m/h1-credentials-helper-python
|
2c3e0d9c57ea4a37349debbbe7bc640bc326e5e3
|
[
"MIT"
] | null | null | null |
credentials/providers/passport/exceptions.py
|
ad-m/h1-credentials-helper-python
|
2c3e0d9c57ea4a37349debbbe7bc640bc326e5e3
|
[
"MIT"
] | null | null | null |
credentials/providers/passport/exceptions.py
|
ad-m/h1-credentials-helper-python
|
2c3e0d9c57ea4a37349debbbe7bc640bc326e5e3
|
[
"MIT"
] | null | null | null |
from credentials.exceptions import CredentialsException
class InvalidPassportException(CredentialsException):
pass
| 20.166667
| 55
| 0.859504
| 9
| 121
| 11.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107438
| 121
| 5
| 56
| 24.2
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.666667
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
40854f14af4e729cb21356ed6f16a85698630e53
| 1,969
|
py
|
Python
|
dfval/tests/tests_dfcheck.py
|
ashendrickson/dfval
|
99e758edfb76e4069ef0c4fe1085f1fa76b4cd23
|
[
"Apache-2.0"
] | 5
|
2020-10-12T23:24:47.000Z
|
2022-02-18T20:20:58.000Z
|
dfval/tests/tests_dfcheck.py
|
ashendrickson/dfval
|
99e758edfb76e4069ef0c4fe1085f1fa76b4cd23
|
[
"Apache-2.0"
] | null | null | null |
dfval/tests/tests_dfcheck.py
|
ashendrickson/dfval
|
99e758edfb76e4069ef0c4fe1085f1fa76b4cd23
|
[
"Apache-2.0"
] | 1
|
2020-09-28T15:04:16.000Z
|
2020-09-28T15:04:16.000Z
|
import unittest
from dfval import dups_check
from dfval import column_names_check
import pandas as pd
class TestCheck(unittest.TestCase):
def setUp(self):
self.d_dups = [[3, '2019-12-15'], [3, '2019-12-15'], [3, '2019-12-15'], [3, '2019-12-08']]
self.df_dups = pd.DataFrame(self.d_dups, columns = ['co_loc_ref_i', 'wk_beg_d'])
self.d_no_dups = [[3, '2019-12-15'], [3, '2019-12-22'], [3, '2019-12-29'], [3, '2019-12-08']]
self.df_no_dups = pd.DataFrame(self.d_no_dups, columns = ['co_loc_ref_i', 'wk_beg_d'])
self.k = ['co_loc_ref_i', 'wk_beg_d']
self.d_column_names = [[3, '2019-12-15'], [3, '2019-12-15'], [3, '2019-12-15'], [3, '2019-12-08']]
self.df_column_names = pd.DataFrame(self.d_dups, columns = ['co_loc_ref_i', 'wk_beg_d'])
self.expected_column_names_match = ['co_loc_ref_i', 'wk_beg_d']
self.expected_column_names_diff = ['co_loc_i', 'wk_beg_d']
def test_dups(self):
dups = dups_check(self.df_dups, self.k)
self.assertEqual(len(dups.index), 2)
def test_no_dups(self):
dups = dups_check(self.df_no_dups, self.k)
self.assertEqual(len(dups.index), 0)
def test_column_names_match(self):
column_names_result = column_names_check(self.df_column_names, self.expected_column_names_match)
self.assertEqual(len(column_names_result[column_names_result['column_check_pass'] == 'True'].index), 2)
self.assertEqual(len(column_names_result[column_names_result['column_check_pass'] == 'False'].index), 0)
def test_column_names_diff(self):
column_names_result = column_names_check(self.df_column_names, self.expected_column_names_diff)
self.assertEqual(len(column_names_result[column_names_result['column_check_pass'] == 'True'].index), 1)
self.assertEqual(len(column_names_result[column_names_result['column_check_pass'] == 'False'].index), 1)
if __name__ == '__main__':
unittest.main()
| 45.790698
| 112
| 0.681564
| 311
| 1,969
| 3.942122
| 0.170418
| 0.206362
| 0.068516
| 0.187602
| 0.806688
| 0.772431
| 0.729201
| 0.685155
| 0.606852
| 0.589723
| 0
| 0.069007
| 0.160995
| 1,969
| 42
| 113
| 46.880952
| 0.673123
| 0
| 0
| 0
| 0
| 0
| 0.167598
| 0
| 0
| 0
| 0
| 0
| 0.193548
| 1
| 0.16129
| false
| 0.129032
| 0.129032
| 0
| 0.322581
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
4094ccc25cbdd7ba8433556b6c10d17aea8f54c0
| 9,739
|
py
|
Python
|
medialog/googlefonts/vocabularies.py
|
collective/medialog.googlefonts
|
4ea2fd2a71241af4b712295a2d240d89e4cc1e52
|
[
"BSD-Source-Code"
] | null | null | null |
medialog/googlefonts/vocabularies.py
|
collective/medialog.googlefonts
|
4ea2fd2a71241af4b712295a2d240d89e4cc1e52
|
[
"BSD-Source-Code"
] | null | null | null |
medialog/googlefonts/vocabularies.py
|
collective/medialog.googlefonts
|
4ea2fd2a71241af4b712295a2d240d89e4cc1e52
|
[
"BSD-Source-Code"
] | null | null | null |
from zope.schema.vocabulary import SimpleVocabulary, SimpleTerm
from zope.schema.interfaces import IVocabularyFactory
from zope.interface import directlyProvides
from medialog.googlefonts import messageFactory as _
FONTS = [ "Abel", "Abril Fatface", "Aclonica", "Allan", "Allerta Stencil", "Allerta", "Amaranth", "Annie Use Your Telescope", "Anonymous Pro", "Anton", "Architects Daughter", "Arimo", "Arvo", "Astloch", "Bangers", "Bentham", "Bevan", "Buda", "Cabin", "Calligraffitti", "Candal", "Cantarell", "Cardo", "Cherry Cream Soda", "Chewy", "Coda", "Coming Soon", "Copse", "Corben", "Cousine", "Covered By Your Grace", "Crafty Girls", "Crimson Text", "Crushed", "Cuprum", "Damion", "Dancing Script", "Dawning of a New Day", "Droid Sans Mono", "Droid Sans", "Droid Serif", "EB Garamond", "Expletus Sans", "Fontdiner Swanky", "Geo", "Goudy Bookletter 1911", "Gruppo", "Homemade Apple", "IM Fell", "Inconsolata", "Indie Flower", "Irish Grover", "Josefin Sans", "Josefin Slab", "Just Another Hand", "Just Me Again Down Here", "Kenia", "Kranky", "Kreon", "Kristi", "Lato", "League Script", "Lekton", "Lobster", "Luckiest Guy", "Maiden Orange", "Meddon", "MedievalSharp", "Merriweather", "Michroma", "Miltonian", "Molengo", "Montserrat", "Neucha", "News Cycle", "Nobile", "Nova Round", "OFL Sorts Mill Goudy TT", "Old Standard TT", "Orbitron", "Oswald", "Over the Rainbow", "Pacifico by Vernon Adams ", "Pacifico", "Permanent Marker", "Philosopher", "PT Sans", "PT Serif", "Puritan", "Quattrocento Sans", "Quattrocento", "Radley by Vernon Adams", "Radley", "Raleway", "Reenie Beanie", "Rock Salt", "Schoolbell", "Six Caps", "Slackey", "Smythe", "Sniglet", "Special Elite", "Sue Ellen Francisco", "Sunshiney", "Swanky and Moo Moo", "Syncopate", "Tangerine", "Terminal Dosis Light", "The Girl Next Door", "Tinos", "Ubuntu", "UnifrakturMaguntia", "Unkempt", "Vibur", "Vollkorn", "VT323", "Waiting for the Sunrise", "Yanone Kaffeesatz", "** ----- more fonts ----- **", "ABeeZee", "Acme", "Actor", "Adamina", "Advent Pro", "Aguafina Script", "Akronim", "Aladin", "Aldrich", "Alef", "Alegreya Sans SC", "Alegreya Sans", "Alegreya SC", "Alegreya", "Alex Brush", "Alfa Slab One", "Alice", "Alike Angular", "Alike", "Allura", "Almendra Display", "Almendra SC", "Almendra", "Amarante", "Amatic SC", "Amethysta", "Anaheim", "Andada", "Andika", "Antic Didone", "Antic Slab", "Antic", "Arapey", "Arbutus Slab", "Arbutus", "Archivo Black", "Archivo Narrow", "Arizonia", "Armata", "Artifika", "Asap", "Asset", "Asul", "Atomic Age", "Aubrey", "Audiowide", "Autour One", "Average Sans", "Average", "Averia Gruesa Libre", "Averia Libre", "Averia Sans Libre", "Averia Serif Libre", "Bad Script", "Balthazar", "Basic", "Baumans", "Belgrano", "Belleza", "BenchNine", "Berkshire Swash", "Bigelow Rules", "Bigshot One", "Bilbo Swash Caps", "Bilbo", "Bitter", "Black Ops One", "Bonbon", "Boogaloo", "Bowlby One SC", "Bowlby One", "Brawler", "Bree Serif", "Bubblegum Sans", "Bubbler One", "Buenard", "Butcherman", "Butterfly Kids", "Cabin Condensed", "Cabin Sketch", "Caesar Dressing", "Cagliostro", "Cambo", "Cantata One", "Cantora One", "Capriola", "Carme", "Carrois Gothic SC", "Carrois Gothic", "Carter One", "Caudex", "Cedarville Cursive", "Ceviche One", "Changa One", "Chango", "Chau Philomene One", "Chela One", "Chelsea Market", "Cherry Swash", "Chicle", "Chivo", "Cinzel Decorative", "Cinzel", "Clicker Script", "Coda Caption", "Codystar", "Combo", "Comfortaa", "Concert One", "Condiment", "Contrail One", "Convergence", "Cookie", "Courgette", "Coustard", "Creepster", "Crete Round", "Croissant One", "Cutive Mono", "Cutive", "Days One", "Delius Swash Caps", "Delius Unicase", "Delius", "Della Respira", "Denk One", "Devonshire", "Didact Gothic", "Diplomata SC", "Diplomata", "Domine", "Donegal One", "Doppio One", "Dorsa", "Dosis", "Dr Sugiyama", "Duru Sans", "Dynalight", "Eagle Lake", "Eater", "Economica", "Electrolize", "Elsie Swash Caps", "Elsie", "Emblema One", "Emilys Candy", "Engagement", "Englebert", "Enriqueta", "Erica One", "Esteban", "Euphoria Script", "Ewert", "Exo 2", "Exo", "Fanwood Text", "Fascinate Inline", "Fascinate", "Faster One", "Fauna One", "Federant", "Federo", "Felipa", "Fenix", "Finger Paint", "Fjalla One", "Fjord One", "Flamenco", "Flavors", "Fondamento", "Forum", "Francois One", "Freckle Face", "Fredericka the Great", "Fredoka One", "Fresca", "Frijole", "Fruktur", "Fugaz One", "Gabriela", "Gafata", "Galdeano", "Galindo", "Gentium Basic", "Gentium Book Basic", "Geostar Fill", "Geostar", "Germania One", "Gilda Display", "Give You Glory", "Glass Antiqua", "Glegoo", "Gloria Hallelujah", "Goblin One", "Gochi Hand", "Gorditas", "Graduate", "Grand Hotel", "Gravitas One", "Great Vibes", "Griffy", "Gudea", "Habibi", "Hammersmith One", "Hanalei Fill", "Hanalei", "Handlee", "Happy Monkey", "Headland One", "Henny Penny", "Herr Von Muellerhoff", "Holtwood One SC", "Homenaje", "Iceberg", "Iceland", "IM Fell Double Pica SC", "IM Fell Double Pica", "IM Fell DW Pica SC", "IM Fell DW Pica", "IM Fell English SC", "IM Fell English", "IM Fell French Canon SC", "IM Fell French Canon", "IM Fell Great Primer SC", "IM Fell Great Primer", "Imprima", "Inder", "Inika", "Istok Web", "Italiana", "Italianno", "Jacques Francois Shadow", "Jacques Francois", "Jim Nightshade", "Jockey One", "Jolly Lodger", "Joti One", "Judson", "Julee", "Julius Sans One", "Junge", "Jura", "Kameron", "Karla", "Kaushan Script", "Kavoon", "Keania One", "Kelly Slab", "Kite One", "Knewave", "Kotta One", "Krona One", "La Belle Aurore", "Lancelot", "Leckerli One", "Ledger", "Lemon", "Libre Baskerville", "Life Savers", "Lilita One", "Lily Script One", "Limelight", "Linden Hill", "Lobster Two", "Londrina Outline", "Londrina Shadow", "Londrina Sketch", "Londrina Solid", "Lora", "Love Ya Like A Sister", "Loved by the King", "Lovers Quarrel", "Lusitana", "Lustria", "Macondo Swash Caps", "Macondo", "Magra", "Mako", "Marcellus SC", "Marcellus", "Marck Script", "Margarine", "Marko One", "Marmelad", "Marvel", "Mate SC", "Mate", "Maven Pro", "McLaren", "Medula One", "Megrim", "Meie Script", "Merienda One", "Merienda", "Merriweather Sans", "Metal Mania", "Metamorphous", "Metrophobic", "Milonga", "Miltonian Tattoo", "Miniver", "Miss Fajardose", "Modern Antiqua", "Molle", "Monda", "Monofett", "Monoton", "Monsieur La Doulaise", "Montez", "Montserrat Alternates", "Montserrat Subrayada", "Mountains of Christmas", "Mouse Memoirs", "Mr Bedfort", "Mr Dafoe", "Mr De Haviland", "Mrs Saint Delafield", "Mrs Sheppards", "Muli", "Mystery Quest", "Neuton", "New Rocker", "Niconne", "Nixie One", "Norican", "Nosifer", "Nothing You Could Do", "Noticia Text", "Noto Sans", "Noto Serif", "Nova Cut", "Nova Flat", "Nova Mono", "Nova Oval", "Nova Script", "Nova Slim", "Nova Square", "Numans", "Nunito", "Offside", "Oldenburg", "Oleo Script Swash Caps", "Oleo Script", "Open Sans Condensed", "Open Sans", "Oranienbaum", "Oregano", "Orienta", "Original Surfer", "Overlock SC", "Overlock", "Ovo", "Oxygen Mono", "Paprika", "Parisienne", "Passero One", "Passion One", "Pathway Gothic One", "Patrick Hand SC", "Patrick Hand", "Patua One", "Paytone One", "Peralta", "Petit Formal Script", "Petrona", "Piedra", "Pinyon Script", "Pirata One", "Plaster", "Play", "Playball", "Playfair Display SC", "Playfair Display", "Podkova", "Poiret One", "Poller One", "Poly", "Pompiere", "Pontano Sans", "Port Lligat Sans", "Port Lligat Slab", "Prata", "Press Start 2P", "Princess Sofia", "Prociono", "Prosto One", "PT Mono", "PT Sans Caption", "PT Sans Narrow", "PT Serif Caption", "Purple Purse", "Quando", "Quantico", "Questrial", "Quicksand", "Quintessential", "Qwigley", "Racing Sans One", "Raleway Dots", "Rambla", "Rammetto One", "Ranchers", "Rancho", "Rationale", "Redressed", "Revalia", "Ribeye Marrow", "Ribeye", "Righteous", "Risque", "Roboto Condensed", "Roboto Slab", "Roboto", "Rochester", "Rokkitt", "Romanesco", "Ropa Sans", "Rosario", "Rosarivo", "Rouge Script", "Ruda", "Rufina", "Ruge Boogie", "Ruluko", "Rum Raisin", "Ruslan Display", "Russo One", "Ruthie", "Rye", "Sacramento", "Sail", "Salsa", "Sanchez", "Sancreek", "Sansita One", "Sarina", "Satisfy", "Scada", "Seaweed Script", "Sevillana", "Seymour One", "Shadows Into Light Two", "Shadows Into Light", "Shanti", "Share Tech Mono", "Share Tech", "Share", "Shojumaru", "Short Stack", "Sigmar One", "Signika Negative", "Signika", "Simonetta", "Sintony", "Sirin Stencil", "Skranji", "Smokum", "Snippet", "Snowburst One", "Sofadi One", "Sofia", "Sonsie One", "Sorts Mill Goudy", "Source Code Pro", "Source Sans Pro", "Spicy Rice", "Spinnaker", "Spirax", "Squada One", "Stalemate", "Stalinist One", "Stardos Stencil", "Stint Ultra Condensed", "Stint Ultra Expanded", "Stoke", "Strait", "Supermercado One", "Tauri", "Telex", "Tenor Sans", "Text Me One", "Tienne", "Titan One", "Titillium Web", "Trade Winds", "Trocchi", "Trochut", "Trykker", "Tulpen One", "Ubuntu Condensed", "Ubuntu Mono", "Ultra", "Uncial Antiqua", "Underdog", "Unica One", "UnifrakturCook", "Unlock", "Unna", "Vampiro One", "Varela Round", "Varela", "Vast Shadow", "Vidaloka", "Viga", "Voces", "Volkhov", "Voltaire", "Wallpoet", "Walter Turncoat", "Warnes", "Wellfleet", "Wendy One", "Wire One", "Yellowtail", "Yeseva One", "Yesteryear", "Zeyada",
]
def fonts(self):
return FONTS
def format_font(font):
return font.replace(" ", "+")
def FontsVocabulary(context):
terms = [SimpleTerm(value=format_font(pair),
token=format_font(pair),
title=pair) for pair in FONTS]
return SimpleVocabulary(terms)
directlyProvides(FontsVocabulary, IVocabularyFactory)
| 389.56
| 9,141
| 0.662388
| 1,131
| 9,739
| 5.700265
| 0.65252
| 0.010237
| 0.006204
| 0.004964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001069
| 0.135435
| 9,739
| 24
| 9,142
| 405.791667
| 0.764608
| 0
| 0
| 0
| 0
| 0
| 0.66475
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0.0625
| 0.25
| 0.125
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
90b68d0d2296b4302d30ad34ff123181f7c2bf4b
| 2,285
|
py
|
Python
|
tests/test_problem_testset.py
|
feimeng93/probabilistic-bvp-solver
|
d6b38d4ff7b3ab6cf3003de30eb2f6eeb42c0beb
|
[
"MIT"
] | null | null | null |
tests/test_problem_testset.py
|
feimeng93/probabilistic-bvp-solver
|
d6b38d4ff7b3ab6cf3003de30eb2f6eeb42c0beb
|
[
"MIT"
] | null | null | null |
tests/test_problem_testset.py
|
feimeng93/probabilistic-bvp-solver
|
d6b38d4ff7b3ab6cf3003de30eb2f6eeb42c0beb
|
[
"MIT"
] | null | null | null |
"""Assert that the jacobians are implemented correctly."""
import sys
sys.path.append("..")
import numpy as np
import pytest
from bvps.problem_testset import testset_firstorder
@pytest.fixture
def dt():
return 1e-6
@pytest.fixture
def rtol():
return 1e-6
@pytest.fixture
def bvp1st():
return testset_firstorder()
@all_first_order_bvps
def test_jacobians_1st(bvp1st, dt, rtol):
bvp_dim = len(bvp1st.R.T)
random_direction = 1 + 0.1 * np.random.rand(bvp_dim)
random_point = 1 + np.random.rand(bvp_dim)
f1 = bvp1st.f(bvp1st.t0, random_point + dt * random_direction)
f2 = bvp1st.f(bvp1st.t0, random_point - dt * random_direction)
fd_approx = (f1 - f2) / (2 * dt)
true_df = bvp1st.df(bvp1st.t0, random_point)
assert f1.ndim == 1
assert f2.ndim == 1
assert true_df.ndim == 2
np.testing.assert_allclose(
true_df @ random_direction,
fd_approx,
rtol=rtol,
)
@all_second_order_bvps
def test_jacobians_2nd_dy(bvp2nd, dt, rtol):
bvp_dim = len(bvp2nd.R.T) // 2
random_direction = 1 + 0.1 * np.random.rand(bvp_dim)
random_point = 1 + np.random.rand(bvp_dim)
f1 = bvp2nd.f(bvp2nd.t0, random_point + dt * random_direction, random_point)
f2 = bvp2nd.f(bvp2nd.t0, random_point - dt * random_direction, random_point)
fd_approx = (f1 - f2) / (2 * dt)
true_df = bvp2nd.df_dy(bvp2nd.t0, random_point, random_point)
assert f1.ndim == 1
assert f2.ndim == 1
assert true_df.ndim == 2
np.testing.assert_allclose(
true_df @ random_direction,
fd_approx,
rtol=rtol,
)
@all_second_order_bvps
def test_jacobians_2nd_ddy(bvp2nd, dt, rtol):
bvp_dim = len(bvp2nd.R.T) // 2
random_direction = 1 + 0.1 * np.random.rand(bvp_dim)
random_point = 1 + np.random.rand(bvp_dim)
f1 = bvp2nd.f(bvp2nd.t0, random_point, random_point + dt * random_direction)
f2 = bvp2nd.f(bvp2nd.t0, random_point, random_point - dt * random_direction)
fd_approx = (f1 - f2) / (2 * dt)
true_df = bvp2nd.df_ddy(bvp2nd.t0, random_point, random_point)
assert f1.ndim == 1
assert f2.ndim == 1
assert true_df.ndim == 2
np.testing.assert_allclose(
true_df @ random_direction,
fd_approx,
rtol=rtol,
)
| 23.802083
| 80
| 0.66302
| 346
| 2,285
| 4.150289
| 0.176301
| 0.137883
| 0.081476
| 0.054318
| 0.832173
| 0.804318
| 0.768106
| 0.76532
| 0.76532
| 0.72493
| 0
| 0.04778
| 0.221444
| 2,285
| 95
| 81
| 24.052632
| 0.759415
| 0.022757
| 0
| 0.6
| 0
| 0
| 0.000898
| 0
| 0
| 0
| 0
| 0
| 0.184615
| 1
| 0.092308
| false
| 0
| 0.061538
| 0.046154
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
90dfa742d90067a058b0d5b90a5bc28fdd06c1b1
| 89
|
py
|
Python
|
backend/src/pox/ext/gini/core/openflow_keepalive.py
|
anrl/gini4
|
d26649c8c02a1737159e48732cf1ee15ba2a604d
|
[
"MIT"
] | 11
|
2019-03-02T20:39:34.000Z
|
2021-09-02T19:47:38.000Z
|
backend/src/pox/ext/gini/core/openflow_keepalive.py
|
anrl/gini4
|
d26649c8c02a1737159e48732cf1ee15ba2a604d
|
[
"MIT"
] | 29
|
2019-01-17T15:44:48.000Z
|
2021-06-02T00:19:40.000Z
|
backend/src/pox/ext/gini/core/openflow_keepalive.py
|
anrl/gini4
|
d26649c8c02a1737159e48732cf1ee15ba2a604d
|
[
"MIT"
] | 11
|
2019-01-28T05:00:55.000Z
|
2021-11-12T03:08:32.000Z
|
#!/usr/bin/python2
from openflow import keepalive
def launch():
keepalive.launch()
| 12.714286
| 30
| 0.719101
| 11
| 89
| 5.818182
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013333
| 0.157303
| 89
| 6
| 31
| 14.833333
| 0.84
| 0.191011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
291482db80f61adab0c7117dffb7661d6fbb170b
| 81
|
py
|
Python
|
juno/server/http/handler/base/html_handler.py
|
DSciLab/juno
|
1d572c8d3fd06a6c1fcc51b42a6539dd3ae0927e
|
[
"MIT"
] | null | null | null |
juno/server/http/handler/base/html_handler.py
|
DSciLab/juno
|
1d572c8d3fd06a6c1fcc51b42a6539dd3ae0927e
|
[
"MIT"
] | null | null | null |
juno/server/http/handler/base/html_handler.py
|
DSciLab/juno
|
1d572c8d3fd06a6c1fcc51b42a6539dd3ae0927e
|
[
"MIT"
] | null | null | null |
import tornado.web
class HTMLBaseHandler(tornado.web.RequestHandler):
pass
| 13.5
| 50
| 0.790123
| 9
| 81
| 7.111111
| 0.777778
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135802
| 81
| 5
| 51
| 16.2
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
4644c5e162ff3a683854049f6be8b74fc72d0421
| 228
|
py
|
Python
|
generate_tfrecord.py
|
xiekai301/211011-CropGAN
|
6ea23b14fb8e5687313b65f98c42cc74cb9b5097
|
[
"Apache-2.0"
] | null | null | null |
generate_tfrecord.py
|
xiekai301/211011-CropGAN
|
6ea23b14fb8e5687313b65f98c42cc74cb9b5097
|
[
"Apache-2.0"
] | null | null | null |
generate_tfrecord.py
|
xiekai301/211011-CropGAN
|
6ea23b14fb8e5687313b65f98c42cc74cb9b5097
|
[
"Apache-2.0"
] | null | null | null |
from TFRecord_utils import create_tfrdataset
# import tensorflow as tf
create_tfrdataset(PATH='dataset/train', tfrecord_file='dataset/train.tfrecord')
create_tfrdataset(PATH='dataset/test', tfrecord_file='dataset/test.tfrecord')
| 57
| 79
| 0.837719
| 30
| 228
| 6.166667
| 0.466667
| 0.259459
| 0.216216
| 0.291892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 228
| 4
| 80
| 57
| 0.856481
| 0.100877
| 0
| 0
| 0
| 0
| 0.333333
| 0.210784
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d3b5b6b6c0c3358b29f7c54fd0b70e0e340aad92
| 154
|
py
|
Python
|
enumfields/__init__.py
|
druids/django-enumfields
|
0afc244666f7b6ca4dd0f915f6ec2e8ab21220c1
|
[
"MIT"
] | 2
|
2019-05-21T12:30:02.000Z
|
2020-02-18T15:13:26.000Z
|
enumfields/__init__.py
|
druids/django-enumfields
|
0afc244666f7b6ca4dd0f915f6ec2e8ab21220c1
|
[
"MIT"
] | 2
|
2019-05-24T07:34:41.000Z
|
2020-06-29T14:30:18.000Z
|
enumfields/__init__.py
|
druids/django-enumfields
|
0afc244666f7b6ca4dd0f915f6ec2e8ab21220c1
|
[
"MIT"
] | 3
|
2019-05-21T12:30:06.000Z
|
2021-11-08T19:54:30.000Z
|
from .enums import IntegerChoicesEnum, TextChoicesEnum, Choice
from .fields import CharEnumField, CharEnumSubField, IntegerEnumField, IntegerEnumSubField
| 51.333333
| 90
| 0.87013
| 13
| 154
| 10.307692
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084416
| 154
| 2
| 91
| 77
| 0.950355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d3ca77f418be45140135f96542be48477643b573
| 60
|
py
|
Python
|
mkdocsthemebootstrap4/__init__.py
|
LukeCarrier/mkdocs-bootstrap4
|
19354c9d5516d56342ecfb7f18b6b59ca85ec935
|
[
"MIT"
] | 3
|
2019-11-04T13:39:31.000Z
|
2020-11-28T05:42:22.000Z
|
mkdocsthemebootstrap4/__init__.py
|
LukeCarrier/mkdocs-bootstrap4
|
19354c9d5516d56342ecfb7f18b6b59ca85ec935
|
[
"MIT"
] | 8
|
2019-12-22T13:33:38.000Z
|
2021-07-15T03:12:38.000Z
|
mkdocsthemebootstrap4/__init__.py
|
LukeCarrier/mkdocs-theme-bootstrap4
|
19354c9d5516d56342ecfb7f18b6b59ca85ec935
|
[
"MIT"
] | null | null | null |
from .plugin import Bootstrap4Blockquotes, Bootstrap4Tables
| 30
| 59
| 0.883333
| 5
| 60
| 10.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 0.083333
| 60
| 1
| 60
| 60
| 0.927273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d3e7d66280cfebfb75607cb4f2fd3d873e12486a
| 229
|
py
|
Python
|
diameter/node/AVP_FailedAVP.py
|
tj8000rpm/PythonDiameter-0.7
|
539c4fb7658fc880ddb4ba175cdcce852d1e604d
|
[
"Zlib"
] | null | null | null |
diameter/node/AVP_FailedAVP.py
|
tj8000rpm/PythonDiameter-0.7
|
539c4fb7658fc880ddb4ba175cdcce852d1e604d
|
[
"Zlib"
] | null | null | null |
diameter/node/AVP_FailedAVP.py
|
tj8000rpm/PythonDiameter-0.7
|
539c4fb7658fc880ddb4ba175cdcce852d1e604d
|
[
"Zlib"
] | null | null | null |
from diameter.AVP_Grouped import AVP_Grouped
import diameter.ProtocolConstants
class AVP_FailedAVP(AVP_Grouped):
def __init__(self,a,vendor_id=0):
AVP_Grouped.__init__(self,[a],vendor_id)
def _unittest():
pass
| 20.818182
| 48
| 0.764192
| 32
| 229
| 4.96875
| 0.53125
| 0.251572
| 0.201258
| 0.188679
| 0.213836
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005102
| 0.144105
| 229
| 10
| 49
| 22.9
| 0.806122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.142857
| 0.285714
| 0
| 0.714286
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
31070dc2621413c13a281bb6d25f983595110ba6
| 217
|
py
|
Python
|
djangopj/geo/admin.py
|
hikarine3/docker-django
|
ddcb15fa741993c3098a4f6bebbdaa498c27ac20
|
[
"MIT"
] | 25
|
2019-11-19T07:16:20.000Z
|
2021-11-09T11:04:15.000Z
|
djangopj/geo/admin.py
|
hikarine3/docker-django
|
ddcb15fa741993c3098a4f6bebbdaa498c27ac20
|
[
"MIT"
] | 6
|
2020-05-16T10:51:09.000Z
|
2021-09-22T19:01:51.000Z
|
djangopj/geo/admin.py
|
hikarine3/docker-django
|
ddcb15fa741993c3098a4f6bebbdaa498c27ac20
|
[
"MIT"
] | 1
|
2019-03-26T05:33:01.000Z
|
2019-03-26T05:33:01.000Z
|
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from .models import Country
from .models import Prefecture
admin.site.register(Country)
admin.site.register(Prefecture)
| 21.7
| 32
| 0.820276
| 30
| 217
| 5.933333
| 0.4
| 0.11236
| 0.191011
| 0.258427
| 0.314607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115207
| 217
| 9
| 33
| 24.111111
| 0.927083
| 0.119816
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
312de341942ad2a8115d0c49f1bbeaa787705ac1
| 43
|
py
|
Python
|
bin/__init__.py
|
MFSJMenger/pysurf
|
99c6a94d4cb5046f16a0961b907061d989ffb6dc
|
[
"Apache-2.0"
] | 7
|
2020-10-28T13:46:08.000Z
|
2021-05-27T06:41:56.000Z
|
bin/__init__.py
|
MFSJMenger/pysurf
|
99c6a94d4cb5046f16a0961b907061d989ffb6dc
|
[
"Apache-2.0"
] | 2
|
2020-10-27T19:15:12.000Z
|
2020-10-27T19:15:25.000Z
|
bin/__init__.py
|
MFSJMenger/pysurf
|
99c6a94d4cb5046f16a0961b907061d989ffb6dc
|
[
"Apache-2.0"
] | 2
|
2021-04-15T05:54:30.000Z
|
2022-02-08T00:10:10.000Z
|
from sp_calc import SinglePointCalculation
| 21.5
| 42
| 0.906977
| 5
| 43
| 7.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
315261c9e9bb0262caaa014f65570cfc6becbdce
| 38
|
py
|
Python
|
obs/clis/__init__.py
|
meongbego/neo-obs
|
23c85642d3533e16855a1158fb939cd4b47fc7d6
|
[
"MIT"
] | 1
|
2018-09-08T12:59:11.000Z
|
2018-09-08T12:59:11.000Z
|
obs/clis/__init__.py
|
meongbego/neo-obs
|
23c85642d3533e16855a1158fb939cd4b47fc7d6
|
[
"MIT"
] | null | null | null |
obs/clis/__init__.py
|
meongbego/neo-obs
|
23c85642d3533e16855a1158fb939cd4b47fc7d6
|
[
"MIT"
] | null | null | null |
from .login import *
from .ls import *
| 19
| 20
| 0.710526
| 6
| 38
| 4.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 38
| 2
| 21
| 19
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3161c9f50918b093b69569a65a18627f0eb7a8ea
| 45,276
|
py
|
Python
|
animal/arenas/utils/create_arenas.py
|
compsciencelab/ppo_D
|
1870c908f498ceb29295e5625ff5598bed82cbb3
|
[
"MIT"
] | 4
|
2021-08-18T07:47:38.000Z
|
2022-01-06T17:27:21.000Z
|
animal/arenas/utils/create_arenas.py
|
compsciencelab/ppo_D
|
1870c908f498ceb29295e5625ff5598bed82cbb3
|
[
"MIT"
] | null | null | null |
animal/arenas/utils/create_arenas.py
|
compsciencelab/ppo_D
|
1870c908f498ceb29295e5625ff5598bed82cbb3
|
[
"MIT"
] | 1
|
2022-02-16T11:03:12.000Z
|
2022-02-16T11:03:12.000Z
|
""" Create sets C1 to C10 of test arenas with known max reward for testing. """
import random
import numpy as np
from .edit_arenas import (add_object, write_arena, create_wall)
from .sample_features import (random_size, random_pos, random_rotation)
from .edit_arenas import (add_ramp_scenario, add_walled, add_choice,
cross_test, ramp_test_1, ramp_test_2, ramp_test_3,
tunnel_test_1, tunnel_test_2, push_test_1,
push_test_2, narrow_spaces_1, narrow_spaces_2,
preference_test_1, blackout_test_1, reasoning_step_1,
reasoning_step_2, reasoning_step_3)
objects_dict = {
'reward_objects': [
'GoodGoal',
'GoodGoalBounce',
'BadGoal',
'BadGoalBounce',
'GoodGoalMulti',
'GoodGoalMultiBounce'
],
'immovable_objects': [
'Wall',
'Ramp',
'CylinderTunnel',
'WallTransparent',
'CylinderTunnelTransparent'],
'movable_objects': [
'Cardbox1',
'Cardbox2',
'UObject',
'LObject',
'LObject2'
],
'zone_objects': [
'DeathZone',
'HotZone'
],
}
def create_c1_arena(target_path, arena_name, max_reward=5, time=250,
max_num_good_goals=1, is_train=False):
"""
Create .yaml file for C1-type arena.
- Only goals.
- Fixed random size for all goals.
- At least one green ball.
Parameters:
target_path (str): save dir path.
arena_name (str): save name arena.
max_reward (float): set max reward for arena. Relates to arena complexity.
time (int): episode length.
max_num_good_goals: goal limit.
"""
allowed_objects = objects_dict['reward_objects']
size_goal = (
np.clip(random_size('GoodGoal')[0], 1.0, max_reward), 0.0, 0.0)
position_goal = random_pos() if not is_train else None
reward = float(max_reward)
arena = add_object('', 'GoodGoal', size=size_goal, pos=position_goal)
num_goals = 1
worst_goal = 0.0
min_reward = 0.0
best_goal = size_goal[0]
while reward - best_goal > size_goal[0]:
category = allowed_objects[np.random.randint(0, len(allowed_objects))]
position_goal = random_pos() if not is_train else None
if category in ['GoodGoalMulti', 'GoodGoalMultiBounce']:
reward -= size_goal[0]
if category in ['BadGoal', 'BadGoalBounce']:
worst_goal = min(worst_goal, size_goal[0])
if category in ['GoodGoal', 'GoodGoalBounce']:
best_goal = max(best_goal, size_goal[0])
if num_goals >= max_num_good_goals:
continue
num_goals += 1
arena = add_object(arena, category, size=size_goal, pos=position_goal)
min_reward -= worst_goal
position_agent = random_pos() if not is_train else None
rotation_agent = random_rotation() if not is_train else None
arena = add_object(arena, "Agent", pos=position_agent, rot=rotation_agent)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'c1', position_agent, rotation_agent
def create_c1_arena_weird(target_path, arena_name, time=250, is_train=False):
"""
Create .yaml file for C1-type arena.
- Only goals.
- Fixed random size for all goals.
- At least one green ball.
Parameters:
target_path (str): save dir path.
arena_name (str): save name arena.
max_reward (float): set max reward for arena. Relates to arena complexity.
time (int): episode length.
max_num_good_goals: goal limit.
"""
position_goal = random_pos() if not is_train else None
arena = add_object('', 'BadGoal', size=(0.5, 0.5, 0.5), pos=position_goal)
position_agent = random_pos() if not is_train else None
rotation_agent = random_rotation() if not is_train else None
arena = add_object(arena, "Agent", pos=position_agent, rot=rotation_agent)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'c1_weird', position_agent, rotation_agent
def create_c2_arena(target_path, arena_name, max_reward=5, time=250,
max_num_good_goals=1, is_train=False):
"""
Create .yaml file for C2-type arena.
- Only goals.
- Different random size for all goals.
- At least one green and one red ball.
Parameters:
target_path (str): save dir path.
arena_name (str): save name arena.
max_reward (float): set max reward for arena. Relates to arena complexity.
time (int): episode length.
max_num_good_goals: goal limit.
"""
allowed_objects = objects_dict['reward_objects']
reward = float(max_reward)
size_goal = (
np.clip(random_size('GoodGoal')[0], 1.0, max_reward), 0.0, 0.0)
position_goal = random_pos() if not is_train else None
arena = add_object('', 'GoodGoal', size=size_goal, pos=position_goal)
best_goal = size_goal[0]
size_goal = random_size('BadGoal')
position_goal = random_pos() if not is_train else None
arena = add_object(arena, 'BadGoal', size=size_goal, pos=position_goal)
worst_goal = size_goal[0]
num_goals = 1
min_reward = 0.0
while reward - best_goal > size_goal[0]:
category = allowed_objects[np.random.randint(0, len(allowed_objects))]
size_goal = random_size(category)
position_goal = random_pos() if not is_train else None
if category in ['GoodGoalMulti', 'GoodGoalMultiBounce']:
reward -= size_goal[0]
if category in ['BadGoal', 'BadGoalBounce']:
worst_goal = min(worst_goal, size_goal[0])
if category in ['GoodGoal', 'GoodGoalBounce']:
if num_goals >= max_num_good_goals:
continue
best_goal = max(best_goal, size_goal[0])
num_goals += 1
arena = add_object(arena, category, size=size_goal, pos=position_goal)
min_reward -= worst_goal
position_agent = random_pos() if not is_train else None
rotation_agent = random_rotation() if not is_train else None
arena = add_object(arena, "Agent", pos=position_agent, rot=rotation_agent)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'c2', position_agent, rotation_agent
def create_c3_arena_basic(target_path, arena_name, time=250, num_walls=1,
is_train=False):
""" include explanation """
category = random.choice(
['GoodGoal', 'GoodGoalBounce', 'GoodGoalMulti', 'GoodGoalMultiBounce'])
size_goal = random_size(category)
position_goal = random_pos() if not is_train else None
arena = add_object('', category, size=size_goal, pos=position_goal)
arena = add_walled(arena, num_walls=num_walls)
position_agent = random_pos() if not is_train else None
rotation_agent = random_rotation() if not is_train else None
arena = add_object(arena, "Agent", pos=position_agent, rot=rotation_agent)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'c3', position_agent, rotation_agent
def create_c3_arena(target_path, arena_name, time=250, max_movable=3,
max_immovable=3, is_train=False):
"""
Create .yaml file for C3-type arena.
- One random positive reward ball, random sized
- With probability 0.5 add red ball, random sized
- Create a wall maze by randomly spawning between 1 and 10 walls
- If specified randomly add multiple movable and immovable objects
Parameters:
target_path (str): save dir path.
arena_name (str): save name arena.
max_reward (float): set max reward for arena. Relates to arena complexity.
time (int): episode length.
max_movable (int): set a limit to number of movable objects.
max_immovable (int): set a limit to number of immovable.
"""
category = random.choice(
['GoodGoal', 'GoodGoalBounce', 'GoodGoalMulti', 'GoodGoalMultiBounce'])
size_goal = random_size(category)
position_goal = random_pos() if not is_train else None
arena = add_object('', category, size=size_goal, pos=position_goal)
if random.random() > 0.5:
category = random.choice(['BadGoal', 'BadGoalBounce'])
size_goal = random_size(category)
position_goal = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_goal, pos=position_goal)
for _ in range(max_movable):
if random.random() > 0.1:
category = random.choice(objects_dict['movable_objects'])
size_object = random_size(category) if not is_train else None
pos_object = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_object,
pos=pos_object)
for _ in range(max_immovable):
if random.random() > 0.1:
category = random.choice(objects_dict['immovable_objects'])
size_object = random_size(category) if not is_train else None
pos_object = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_object,
pos=pos_object)
position_agent = random_pos() if not is_train else None
rotation_agent = random_rotation() if not is_train else None
arena = add_object(arena, "Agent", pos=position_agent, rot=rotation_agent)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'c3', position_agent, rotation_agent
def create_c4_arena(target_path, arena_name, time=250, num_red_zones=2,
max_orange_zones=1, max_movable=1, max_immovable=1,
is_train=False):
"""
Create .yaml file for C4-type arena.
- 1 green food (stationary) and some red zones
- add orange ball with probability 0.5
- add orange zone with probability 0.5
- add immobable object with probability 0.1
- add movable object with probability 0.1
Parameters:
target_path (str): save dir path.
arena_name (str): save name arena.
time (int): episode length.
num_red_zones (int): fixed number of red zones.
max_orange_zones (int): set a limit to number of orange zones.
max_movable (int): set a limit to number of movable objects.
max_immovable (int): set a limit to number of immovable.
"""
size_goal = random_size('GoodGoal')
position_goal = random_pos() if not is_train else None
arena = add_object('', 'GoodGoal', size=size_goal, pos=position_goal)
if random.random() > 0.5:
size_goal = random_size('GoodGoalMulti')
position_goal = random_pos() if not is_train else None
arena = add_object(arena, 'GoodGoalMulti', size=size_goal,
pos=position_goal)
for _ in range(num_red_zones):
size_object = random_size('DeathZone')
pos_object = random_pos() if not is_train else None
arena = add_object(arena, 'DeathZone', size=size_object,
pos=pos_object)
for _ in range(max_orange_zones):
if random.random() > 0.5:
size_object = random_size('HotZone')
pos_object = random_pos() if not is_train else None
arena = add_object(arena, 'HotZone', size=size_object,
pos=pos_object)
for _ in range(max_movable):
if random.random() > 0.8:
category = random.choice(objects_dict['movable_objects'])
size_object = random_size(category) if not is_train else None
pos_object = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_object,
pos=pos_object)
for _ in range(max_immovable):
if random.random() > 0.8:
category = random.choice(objects_dict['immovable_objects'])
size_object = random_size(category) if not is_train else None
pos_object = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_object,
pos=pos_object)
position_agent = random_pos() if not is_train else None
rotation_agent = random_rotation() if not is_train else None
arena = add_object(arena, "Agent", pos=position_agent, rot=rotation_agent)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'c4', position_agent, rotation_agent
def create_c5_arena(target_path, arena_name, time=250, max_movable=1,
max_immovable=1, is_train=False):
"""
Create .yaml file for C5-type arena.
- from 1 to 2 platforms accessible by ramps with a goal on top.
- if specified, add multiple movable and immovable objects.
Parameters:
target_path (str): save dir path.
arena_name (str): save name arena.
time (int): episode length.
max_movable (int): set a limit to number of movable objects.
max_immovable (int): set a limit to number of immovable.
"""
arena = add_ramp_scenario('')
arena = add_ramp_scenario(arena)
for _ in range(max_movable):
if random.random() < 0.8:
category = random.choice(objects_dict['movable_objects'])
size_object = random_size(category) if not is_train else None
pos_object = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_object,
pos=pos_object)
for _ in range(max_immovable):
if random.random() < 0.8:
category = random.choice(['Wall', 'Ramp', 'CylinderTunnel'])
size_object = random_size(category) if not is_train else None
pos_object = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_object,
pos=pos_object)
position_agent = random_pos() if not is_train else None
rotation_agent = random_rotation() if not is_train else None
arena = add_object(arena, "Agent", pos=position_agent, rot=rotation_agent)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'c5', position_agent, rotation_agent
def create_c6_arena_basic(target_path, arena_name, time=250, num_walls=1,
is_train=False):
""" include explanation """
category = random.choice(
['GoodGoal', 'GoodGoalBounce', 'GoodGoalMulti', 'GoodGoalMultiBounce'])
size_goal = random_size(category)
position_goal = random_pos() if not is_train else None
arena = add_object('', category, size=size_goal, pos=position_goal)
arena = add_walled(arena, num_walls=num_walls, random_rgb=True)
position_agent = random_pos() if not is_train else None
rotation_agent = random_rotation() if not is_train else None
arena = add_object(arena, "Agent", pos=position_agent, rot=rotation_agent)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'c6', position_agent, rotation_agent
def create_c6_arena(target_path, arena_name, time=250, max_movable=3,
max_immovable=3, is_train=False):
"""
Create .yaml file for C6-type arena.
- One random positive reward ball, random sized
- add a second positive reward , with probability 0.5
- add up to 2 red balls , with probability 0.5 each
- Add multiple (1 to 10) walls with random color.
- If specifiedm add also extra multiple movable and immovable objects (with random color)
Parameters:
target_path (str): save dir path.
arena_name (str): save name arena.
time (int): episode length.
max_movable (int): set a limit to number of movable objects.
max_immovable (int): set a limit to number of immovable.
"""
category = random.choice(
['GoodGoal', 'GoodGoalBounce', 'GoodGoalMulti', 'GoodGoalMultiBounce'])
size_goal = random_size(category)
position_goal = random_pos() if not is_train else None
arena = add_object('', category, size=size_goal, pos=position_goal)
if random.random() > 0.5:
category = random.choice(
['GoodGoal', 'GoodGoalBounce', 'GoodGoalMulti',
'GoodGoalMultiBounce'])
size_goal = random_size(category)
position_goal = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_goal, pos=position_goal)
for _ in range(2):
if random.random() > 0.5:
category = random.choice(['BadGoal', 'BadGoalBounce'])
size_goal = random_size(category)
position_goal = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_goal,
pos=position_goal)
for _ in range(max_movable):
if random.random() < 0.8:
category = random.choice(objects_dict['movable_objects'])
size_object = random_size(category) if not is_train else None
pos_object = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_object,
pos=pos_object)
for _ in range(max_immovable):
if random.random() < 0.8:
category = random.choice(['Wall', 'Ramp', 'CylinderTunnel'])
size_object = random_size(category) if not is_train else None
pos_object = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_object,
pos=pos_object)
position_agent = random_pos() if not is_train else None
rotation_agent = random_rotation() if not is_train else None
arena = add_object(arena, "Agent", pos=position_agent, rot=rotation_agent)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'c6', position_agent, rotation_agent
def create_c7_arena(target_path, arena_name, time=250, max_movable=3,
max_immovable=3, is_train=False):
"""
Create .yaml file for C7-type arena.
- One random positive reward ball, random sized
- add a second positive rewards , with probability 0.5
- add up to 2 red balls , with probability 0.5 each
- With probability 0.5 add red balls, random sized
- Add multiple movable and immovable objects
- random blackouts
Parameters:
target_path (str): save dir path.
arena_name (str): save name arena.
time (int): episode length.
max_movable (int): set a limit to number of movable objects.
max_immovable (int): set a limit to number of immovable.
"""
blackout_options = [[-20], [-40], [-60], [25, 30, 50, 55, 75],
[50, 55, 75, 80, 100, 105, 125]]
category = random.choice(
['GoodGoal', 'GoodGoalBounce', 'GoodGoalMulti', 'GoodGoalMultiBounce'])
size_goal = random_size(category)
position_goal = random_pos() if not is_train else None
arena = add_object('', category, size=size_goal, pos=position_goal)
if random.random() > 0.5:
category = random.choice(
['GoodGoal', 'GoodGoalBounce', 'GoodGoalMulti',
'GoodGoalMultiBounce'])
size_goal = random_size(category)
position_goal = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_goal, pos=position_goal)
for _ in range(2):
if random.random() > 0.5:
category = random.choice(['BadGoal', 'BadGoalBounce'])
size_goal = random_size(category)
position_goal = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_goal,
pos=position_goal)
for _ in range(max_movable):
if random.random() < 0.8:
category = random.choice(objects_dict['movable_objects'])
size_object = random_size(category) if not is_train else None
pos_object = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_object,
pos=pos_object)
for _ in range(max_immovable):
if random.random() < 0.8:
category = random.choice(objects_dict['immovable_objects'])
size_object = random_size(category) if not is_train else None
pos_object = random_pos() if not is_train else None
arena = add_object(arena, category, size=size_object,
pos=pos_object)
position_agent = random_pos() if not is_train else None
rotation_agent = random_rotation() if not is_train else None
arena = add_object(arena, "Agent", pos=position_agent, rot=rotation_agent)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena,
blackouts=random.choice(blackout_options))
return 'c7', position_agent, rotation_agent
def create_maze(target_path, arena_name, time=250, num_cells=3, obj=None,
is_train=False):
""" include explanation """
wall_type = random.choice(['Wall', 'WallTransparent'])
arena = ''
if obj == 'CylinderTunnel':
gap = 3
else:
gap = 2
num_cells_x = num_cells
num_cells_y = num_cells
side_wall_len_x = int(40 / num_cells_x)
side_wall_len_y = int(40 / num_cells_y)
location_pillars_x = list(range(0, 40, side_wall_len_x))[1:]
location_pillars_y = list(range(0, 40, side_wall_len_y))[1:]
walls_loc_x = location_pillars_x
walls_loc_x.append(40)
walls_loc_y = location_pillars_y
walls_loc_y.append(40)
prev_y = 0
prev_x = 0
z_size = random.choice([0.5, 10])
for y in walls_loc_y:
for x in walls_loc_x:
size_1, pos_1, size_2, pos_2 = create_wall(
(prev_x, y), (x, y), z_size, obj='door', gap=gap)
arena = add_object(arena, wall_type, size=size_1, pos=pos_1, rot=0)
arena = add_object(arena, wall_type, size=size_2, pos=pos_2, rot=0)
if obj != 'door':
size, pos = create_wall(
(prev_x, y), (x, y), z_size, obj=obj, gap=gap)
arena = add_object(arena, obj, size=size, pos=pos, rot=0)
size_1, pos_1, size_2, pos_2 = create_wall(
(x, prev_y), (x, y), z_size, obj='door', gap=gap)
arena = add_object(arena, wall_type, size=size_1, pos=pos_1, rot=0)
arena = add_object(arena, wall_type, size=size_2, pos=pos_2, rot=0)
if obj != 'door':
size, pos = create_wall(
(x, prev_y), (x, y), z_size, obj=obj, gap=gap)
arena = add_object(arena, obj, size=size, pos=pos, rot=90)
prev_x = x
prev_x = 0
prev_y = y
size_goal = random_size('GoodGoal')
position_goal = random_pos() if not is_train else None
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
position_agent = random_pos() if not is_train else None
rotation_agent = random_rotation() if not is_train else None
arena = add_object(arena, "Agent", pos=position_agent, rot=rotation_agent)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'maze', position_agent, rotation_agent
def create_arena_choice(target_path, arena_name, time=250, is_train=False):
""" include explanation """
arena = add_choice('')
size_goal = random_size('GoodGoal')
position_goal = random_pos() if not is_train else None
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
size_goal = random_size('GoodGoalMulti')
position_goal = random_pos() if not is_train else None
arena = add_object(arena, 'GoodGoalMulti', size=size_goal,
pos=position_goal)
save_name = '{}/{}'.format(target_path, arena_name, is_train=is_train)
write_arena(save_name, time, arena)
return 'choice', (20, 0, 20), 0
def create_arena_cross(target_path, arena_name, time=250, is_train=False):
""" include explanation """
arena, pos_agent, rot_agent = cross_test("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'cross', pos_agent, rot_agent
def create_arena_push1(target_path, arena_name, time=250, is_train=False):
""" include explanation """
arena, pos_agent, rot_agent = push_test_1("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'push1', pos_agent, rot_agent
def create_arena_push2(target_path, arena_name, time=250, is_train=False):
""" include explanation """
arena, pos_agent, rot_agent = push_test_2("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'push2', pos_agent, rot_agent
def create_arena_tunnel1(target_path, arena_name, time=250, is_train=False):
""" include explanation """
arena, pos_agent, rot_agent = tunnel_test_1("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'tunnel1', pos_agent, rot_agent
def create_arena_tunnel2(target_path, arena_name, time=250, is_train=False):
""" include explanation """
arena, pos_agent, rot_agent = tunnel_test_2("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'tunnel2', pos_agent, rot_agent
def create_arena_ramp1(target_path, arena_name, time=250, is_train=False):
""" include explanation """
arena, pos_agent, rot_agent = ramp_test_1("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'ramp1', pos_agent, rot_agent
def create_arena_ramp2(target_path, arena_name, time=250, is_train=False):
""" include explanation """
arena, pos_agent, rot_agent = ramp_test_2("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'ramp2', pos_agent, rot_agent
def create_arena_ramp3(target_path, arena_name, time=250, is_train=False):
""" include explanation """
arena, pos_agent, rot_agent = ramp_test_3("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'ramp3', pos_agent, rot_agent
def create_arena_narrow_spaces_1(target_path, arena_name, time=250,
is_train=False):
""" include explanation """
arena, pos_agent, rot_agent = narrow_spaces_1("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'narrow1', pos_agent, rot_agent
def create_arena_narrow_spaces_2(target_path, arena_name, time=250,
is_train=False):
""" include explanation """
arena, pos_agent, rot_agent = narrow_spaces_2("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'narrow2', pos_agent, rot_agent
def create_arena_pref1(target_path, arena_name, time=250, is_train=False):
""" include explanation """
arena, pos_agent, rot_agent = preference_test_1("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'pref', pos_agent, rot_agent
def create_blackout_test_1(target_path, arena_name, time=250, is_train=False):
arena, pos_agent, rot_agent = blackout_test_1("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena, blackouts=[10, 1000])
return 'blackout', pos_agent, rot_agent
def create_reasoning_step_1(target_path, arena_name, time=250, is_train=False):
arena, pos_agent, rot_agent = reasoning_step_1("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'reasoning1', pos_agent, rot_agent
def create_reasoning_step_2(target_path, arena_name, time=250, is_train=False):
arena, pos_agent, rot_agent = reasoning_step_2("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'reasoning2', pos_agent, rot_agent
def create_reasoning_step_3(target_path, arena_name, time=250, is_train=False):
arena, pos_agent, rot_agent = reasoning_step_3("", is_train=is_train)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'reasoning3', pos_agent, rot_agent
def create_front_back(target_path, arena_name, time=1000, is_train=False,
rew_range=[0.5, 5]):
""" include explanation """
arena = ''
range_list = list(np.linspace(rew_range[0], rew_range[1],
(rew_range[1] - rew_range[0]) / 0.1 + 1))
range_list = [round(elem, 2) for elem in range_list]
list_siz = random.sample(range_list, 2)
size = list_siz[0]
size_goal = (size, size, size)
position_goal = (20, 0, 30)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
size = list_siz[1]
size_goal = (size, size, size)
position_goal = (20, 0, 10)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
pos_agent = (20, 0, 20)
arena = add_object(arena, "Agent", pos=pos_agent, rot=0)
save_name = '{}/{}'.format(target_path, arena_name)
blackout_options_2 = [[-10], [1000, 1000]]
write_arena(save_name, time, arena,
blackouts=random.choice(blackout_options_2))
return 'front_back', (20, 0, 20), 0
def create_left_right(target_path, arena_name, time=1000, is_train=False,
rew_range=[0.5, 5]):
""" include explanation """
arena = ''
range_list = list(np.linspace(rew_range[0], rew_range[1],
(rew_range[1] - rew_range[0]) / 0.1 + 1))
range_list = [round(elem, 2) for elem in range_list]
list_siz = random.sample(range_list, 2)
size = list_siz[0]
size_goal = (size, size, size)
position_goal = (10, 0, 20)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
size = list_siz[1]
size_goal = (size, size, size)
position_goal = (30, 0, 20)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
pos_agent = (20, 0, 20)
arena = add_object(arena, "Agent", pos=pos_agent, rot=0)
save_name = '{}/{}'.format(target_path, arena_name)
blackout_options_2 = [[-10], [1000, 1000]]
write_arena(save_name, time, arena,
blackouts=random.choice(blackout_options_2))
return 'left_right', (20, 0, 20), 0
def create_corners_green(target_path, arena_name, time=1000, is_train=False,
rew_range=[0.5, 5]):
""" include explanation """
arena = ''
range_list = list(np.linspace(rew_range[0], rew_range[1],
(rew_range[1] - rew_range[0]) / 0.1 + 1))
range_list = [round(elem, 2) for elem in range_list]
list_siz = random.sample(range_list, 4)
size = list_siz[0]
size_goal = (size, size, size)
position_goal = (37, 0, 37)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
size = list_siz[1]
size_goal = (size, size, size)
position_goal = (3, 0, 37)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
size = list_siz[2]
size_goal = (size, size, size)
position_goal = (37, 0, 3)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
size = list_siz[3]
size_goal = (size, size, size)
position_goal = (3, 0, 3)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
pos_agent = (20, 0, 20)
arena = add_object(arena, "Agent", pos=pos_agent)
save_name = '{}/{}'.format(target_path, arena_name)
blackout_options_2 = [[-10], [1000, 1000]]
write_arena(save_name, time, arena,
blackouts=random.choice(blackout_options_2))
return 'corners_green', (20, 0, 20), 0
def create_cross_green(target_path, arena_name, time=1000, is_train=False,
rew_range=[0.5, 5]):
""" include explanation """
arena = ''
range_list = list(np.linspace(rew_range[0], rew_range[1],
(rew_range[1] - rew_range[0]) / 0.1 + 1))
range_list = [round(elem, 2) for elem in range_list]
list_siz = random.sample(range_list, 4)
size = list_siz[0]
size_goal = (size, size, size)
position_goal = (30, 0, 20)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
size = list_siz[1]
size_goal = (size, size, size)
position_goal = (10, 0, 20)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
size = list_siz[2]
size_goal = (size, size, size)
position_goal = (20, 0, 10)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
size = list_siz[3]
size_goal = (size, size, size)
position_goal = (20, 0, 30)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
pos_agent = (20, 0, 20)
arena = add_object(arena, "Agent", pos=pos_agent)
save_name = '{}/{}'.format(target_path, arena_name)
blackout_options_2 = [[-10], [1000, 1000]]
write_arena(save_name, time, arena,
blackouts=random.choice(blackout_options_2))
return 'cross_green', (20, 0, 20), 0
def create_in_front(target_path, arena_name, time=1000, is_train=False,
rew_range=[0.5, 5]):
""" include explanation """
arena = ''
range_list = list(np.linspace(rew_range[0], rew_range[1],
(rew_range[1] - rew_range[0]) / 0.1 + 1))
range_list = [round(elem, 2) for elem in range_list]
list_siz = random.sample(range_list, 2)
size = list_siz[0]
size_goal = (size, size, size)
position_goal = (30, 0, 35)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
size = list_siz[1]
size_goal = (size, size, size)
position_goal = (10, 0, 35)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
pos_agent = (20, 0, 3)
arena = add_object(arena, "Agent", pos=pos_agent, rot=0)
save_name = '{}/{}'.format(target_path, arena_name)
blackout_options_2 = [[-10], [1000, 1000]]
write_arena(save_name, time, arena,
blackouts=random.choice(blackout_options_2))
return 'in_front', (20, 0, 20), 0
def create_make_fall_1(target_path, arena_name, time=1000, is_train=False,
rew_range=[5, 50]):
"""
The reward is on top of a box. Agent must push the box to make reward fall down.
"""
arena = ''
height = random.randint(2, 7)
pos_box = (random.randint(5, 35), 0, random.randint(5, 35))
siz_box = (random.randint(2, 4), height, random.randint(2, 4))
category = random.choice(['Cardbox1', 'Cardbox2'])
arena = add_object(arena, category, size=siz_box, pos=pos_box, rot=0)
rew_size = random.randint(1, 5)
rew_pos = (pos_box[0], siz_box[1] + rew_size / 2, pos_box[2])
arena = add_object(arena, random.choice(["GoodGoal", "GoodGoalMulti"]),
size=(rew_size, rew_size, rew_size), pos=rew_pos)
rotation_agent = random_rotation()
position_agent = random_pos()
arena = add_object(arena, "Agent", pos=position_agent, rot=rotation_agent)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'make_fall', position_agent, rotation_agent
def create_arena_choice_2(target_path, arena_name, time=250, is_train=False,
rew_range=[0.5, 5]):
""" include explanation """
arena = add_choice('')
range_list = list(np.linspace(rew_range[0], rew_range[1],
(rew_range[1] - rew_range[0]) / 0.1 + 1))
range_list = [round(elem, 2) for elem in range_list]
list_siz = random.sample(range_list, 4)
size = list_siz[0]
size_goal = (size, size, size)
position_goal = (30, 0, 30)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
size = list_siz[1]
size_goal = (size, size, size)
position_goal = (10, 0, 30)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
size = list_siz[2]
size_goal = (size, size, size)
position_goal = (10, 0, 10)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
size = size = list_siz[3]
size_goal = (size, size, size)
position_goal = (30, 0, 10)
arena = add_object(arena, 'GoodGoal', size=size_goal, pos=position_goal)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'choice2', (20, 0, 20), 0
def create_box_reasoning(target_path, arena_name, time=1000, is_train=True):
""" The reward is on top of a box. agent must push box in the right position to be able to acces the reward platform."""
arena = ''
arena = add_object(arena, 'WallTransparent', size=(4, 2, 4), pos=(13.5, 0, 20), rot=90)
arena = add_object(arena, 'WallTransparent', size=(4, 2, 4), pos=(22, 0, 20), rot=90)
arena = add_object(arena, 'Cardbox1', size=(4, 2, 4), pos=(17.75, 0, 9), rot=90)
arena = add_object(arena, 'Ramp', size=(8, 2, 8), pos=(28, 0, 20), rot=90)
arena = add_object(arena, 'GoodGoal', size=(3, 3, 3), pos=(13.5, 2.25, 20), rot=0)
agent_rotation = random_rotation()
while True:
agent_pos = random_pos()
if pos_on_obj(agent_pos,(13.5, 0, 20),(4, 2, 4)) == False:
if pos_on_obj(agent_pos,(22, 0, 20),(4, 2, 4)) == False:
if pos_on_obj(agent_pos,(17.75, 0, 9),(4, 2, 4)) == False:
if pos_on_obj(agent_pos,(28, 0, 20),(8, 2, 8)) == False:
break
arena = add_object(arena, "Agent", pos= agent_pos, rot=agent_rotation)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'box_reasoning', (20, 0, 3), 0
def create_box_reasoning_hard(target_path, arena_name, time=1000, is_train=True):
""" The reward is on top of a box. agent must push box in the right position to be able to acces the reward platform."""
pos_box_ = [(10, 0, 10),(10, 0, 30),(30, 0, 10),(30, 0, 30)]
pos_box = random.choice(pos_box_)
arena = ''
arena = add_object(arena, 'WallTransparent', size=(4, 2, 4), pos=(13.5, 0, 20), rot=90)
arena = add_object(arena, 'WallTransparent', size=(4, 2, 4), pos=(22, 0, 20), rot=90)
arena = add_object(arena, 'Cardbox1', size=(4, 2, 4), pos=pos_box, rot=90)
arena = add_object(arena, 'Ramp', size=(8, 2, 8), pos=(28, 0, 20), rot=90)
arena = add_object(arena, 'GoodGoal', size=(3, 3, 3), pos=(13.5, 2.25, 20), rot=0)
agent_rotation = random_rotation()
while True:
agent_pos = random_pos()
if pos_on_obj(agent_pos,(13.5, 0, 20),(4, 2, 4)) == False:
if pos_on_obj(agent_pos,(22, 0, 20),(4, 2, 4)) == False:
if pos_on_obj(agent_pos, pos_box,(4, 2, 4)) == False:
if pos_on_obj(agent_pos,(28, 0, 20),(8, 2, 8)) == False:
break
arena = add_object(arena, "Agent", pos= agent_pos, rot=agent_rotation)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'box_reasoning', (20, 0, 3), 0
def create_box_reasoning_2(target_path, arena_name, time=1000, is_train=True):
""" The reward is on top of a box. agent must push box in the right position to be able to acces the reward platform."""
pos_box_ = [(10, 0, 10),(10, 0, 30),(30, 0, 10),(30, 0, 30)]
pos_box1, pos_box2 = random.sample(pos_box_, k=2)
arena = ''
arena = add_object(arena, 'WallTransparent', size=(4, 2, 4), pos=(7.5, 0, 20), rot=90)
arena = add_object(arena, 'WallTransparent', size=(4, 2, 4), pos=(22, 0, 20), rot=90)
arena = add_object(arena, 'Cardbox1', size=(4, 2, 4), pos=pos_box1, rot=90)
arena = add_object(arena, 'Cardbox1', size=(4, 2, 4), pos=pos_box2, rot=90)
arena = add_object(arena, 'Ramp', size=(8, 2, 8), pos=(28, 0, 20), rot=90)
arena = add_object(arena, 'GoodGoal', size=(3, 3, 3), pos=(7.5, 2.25, 20), rot=0)
agent_rotation = random_rotation()
while True:
agent_pos = random_pos()
if pos_on_obj(agent_pos,(7.5, 0, 20),(4, 2, 4)) == False:
if pos_on_obj(agent_pos,(22, 0, 20),(4, 2, 4)) == False:
if pos_on_obj(agent_pos, pos_box1,(4, 2, 4)) == False:
if pos_on_obj(agent_pos, pos_box2,(4, 2, 4)) == False:
if pos_on_obj(agent_pos,(28, 0, 20),(8, 2, 8)) == False:
break
arena = add_object(arena, "Agent", pos= agent_pos, rot=agent_rotation)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'box_reasoning', (20, 0, 3), 0
def create_push_down(target_path, arena_name, time=1000, is_train=True):
""" The reward is on top of a box. agent must push box in the right position to be able to acces the reward platform."""
coin = random.choice([0,1])
if coin == 1:
box_pos = (random.randint(25, 140) / 10., 1., random.randint(25, 200) / 10.)
fake_box_pos = (random.randint(260, 380) / 10., 1., random.randint(25, 200) / 10.)
else:
fake_box_pos = (random.randint(25, 140) / 10., 1., random.randint(25, 200) / 10.)
box_pos = (random.randint(260, 380) / 10., 1., random.randint(25, 200) / 10.)
pos_goal = (random.randint(10, 390) / 10., 1., random.randint(250, 390) / 10.)
goal_wal_pos = (pos_goal[0], 0, pos_goal[2])
arena = ''
arena = add_object(arena, 'Wall', size=(0.3, 1, 0.3), pos=goal_wal_pos, rot=0, RGB= (153, 153, 153))
arena = add_object(arena, 'Wall', size=(40, 1, 20), pos=(20, 0, 10), rot=0, RGB= (0, 0, 255))
arena = add_object(arena, 'Wall', size=(11, 2, 5), pos=(20, 1, 2.5), rot=0, RGB= (0, 0, 255))
arena = add_object(arena, 'Wall', size=(1, .5, 15), pos=(20, 1, 12.5), rot=0, RGB=(153, 153, 153))
arena = add_object(arena, 'Cardbox2', size=(1.5, 1.5, 1.5), pos=box_pos, rot=0)
arena = add_object(arena, 'Wall', size=(1.5, 1.5, 1.5), pos=fake_box_pos, rot=0, RGB=(153, 153, 153))
arena = add_object(arena, 'GoodGoal', size=(1,1,1), pos=pos_goal, rot=0)
arena = add_object(arena, "Agent", pos= (20, 3, 2.5), rot=2)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'push_down_decieve', (20, 0, 3), 0
def create_push_red(target_path, arena_name, time=1000, is_train=True):
""" The reward is on top of a box. agent must push box in the right position to be able to acces the reward platform."""
arena = ''
arena = add_object(arena, 'DeathZone', size=(10, 0, 10), pos=(20, 0, 20), rot=45)
arena = add_object(arena, 'GoodGoal', size=(1, 1, 1), pos=(20, 0, 20), rot=0)
while True:
agent_pos = random_pos()
pos_box1 = (random.randint(65, 335) / 10., 0., random.randint(65, 335) / 10.)
if pos_on_obj(pos_box1,(20, 0, 20),(15, 0, 15)) == False:
if pos_on_obj(agent_pos,(20, 0, 20),(15, 0, 15)) == False:
if pos_on_obj(agent_pos, pos_box1,(6, .2, 2)) == False:
break
agent_rotation = random_rotation()
box_rotation = random_rotation()
arena = add_object(arena, "Agent", pos= agent_pos, rot=agent_rotation)
arena = add_object(arena, 'Cardbox2', size=(6, .2, 2), pos=pos_box1, rot=box_rotation)
save_name = '{}/{}'.format(target_path, arena_name)
write_arena(save_name, time, arena)
return 'push_red', (20, 0, 3), 0
def pos_on_obj(agent_pos, obj_pos, obs_size):
x_lim1 = obj_pos[0] - obs_size[0]/2
x_lim2 = obj_pos[0] + obs_size[0]/2
y_lim1 = obj_pos[2] - obs_size[2]/2
y_lim2 = obj_pos[2] + obs_size[2]/2
if (agent_pos[0] < x_lim1) or (agent_pos[0]> x_lim2):
if (agent_pos[2] < y_lim1) or (agent_pos[2]> y_lim2):
return False
else:
return True
else:
return True
| 38.079058
| 125
| 0.634243
| 6,426
| 45,276
| 4.213041
| 0.043417
| 0.035164
| 0.055332
| 0.068777
| 0.888191
| 0.87168
| 0.849924
| 0.829387
| 0.812507
| 0.803088
| 0
| 0.039784
| 0.24386
| 45,276
| 1,188
| 126
| 38.111111
| 0.751015
| 0.112908
| 0
| 0.684
| 0
| 0
| 0.056631
| 0.000632
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053333
| false
| 0
| 0.006667
| 0
| 0.116
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3173549b78f25e85fad530fb2464104ba94a5011
| 86
|
py
|
Python
|
Level1/Lessons12903/wowo0709.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | null | null | null |
Level1/Lessons12903/wowo0709.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | null | null | null |
Level1/Lessons12903/wowo0709.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | 1
|
2021-04-05T07:35:59.000Z
|
2021-04-05T07:35:59.000Z
|
def solution(s):
N = len(s)
return s[N//2] if N % 2 == 1 else s[N//2-1:N//2+1]
| 28.666667
| 54
| 0.488372
| 22
| 86
| 1.909091
| 0.454545
| 0.190476
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 0.255814
| 86
| 3
| 54
| 28.666667
| 0.546875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
3185b8f83ea18b9e532fcc72f94a943c356cf5dc
| 43
|
py
|
Python
|
init_db.py
|
notnamed/social-graph
|
1120889bcf72901c69a07797fdbf689b36853e6f
|
[
"MIT"
] | null | null | null |
init_db.py
|
notnamed/social-graph
|
1120889bcf72901c69a07797fdbf689b36853e6f
|
[
"MIT"
] | null | null | null |
init_db.py
|
notnamed/social-graph
|
1120889bcf72901c69a07797fdbf689b36853e6f
|
[
"MIT"
] | null | null | null |
import social_graph
social_graph.init_db()
| 14.333333
| 22
| 0.860465
| 7
| 43
| 4.857143
| 0.714286
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 43
| 2
| 23
| 21.5
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
319b781e5814130abde4cc96fd6e91ef3e46db80
| 49
|
py
|
Python
|
src/compgen2/gov/__init__.py
|
CorrelAid/compgen-ii-cgv
|
810a044d6bbe1ce058a359115e3e5fc71a358549
|
[
"MIT"
] | 1
|
2022-02-02T12:41:06.000Z
|
2022-02-02T12:41:06.000Z
|
src/compgen2/gov/__init__.py
|
CorrelAid/compgen-ii-cgv
|
810a044d6bbe1ce058a359115e3e5fc71a358549
|
[
"MIT"
] | null | null | null |
src/compgen2/gov/__init__.py
|
CorrelAid/compgen-ii-cgv
|
810a044d6bbe1ce058a359115e3e5fc71a358549
|
[
"MIT"
] | null | null | null |
from .gov import Gov
from .matcher import Matcher
| 24.5
| 28
| 0.816327
| 8
| 49
| 5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 49
| 2
| 28
| 24.5
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
31a22c249c33ef768a09aa92564829e135db5a00
| 229
|
py
|
Python
|
msc/tests/test_WNVJungle.py
|
WNVJungle/git-github-travis-lab
|
f8127748ee7a31be457d428a09fe7b66c88c54b2
|
[
"Apache-2.0"
] | null | null | null |
msc/tests/test_WNVJungle.py
|
WNVJungle/git-github-travis-lab
|
f8127748ee7a31be457d428a09fe7b66c88c54b2
|
[
"Apache-2.0"
] | 3
|
2019-10-23T10:40:14.000Z
|
2019-10-23T10:54:11.000Z
|
msc/tests/test_WNVJungle.py
|
WNVJungle/git-github-travis-lab
|
f8127748ee7a31be457d428a09fe7b66c88c54b2
|
[
"Apache-2.0"
] | 11
|
2019-10-23T09:50:57.000Z
|
2019-10-23T10:24:18.000Z
|
from msc.rot13 import rot13
from msc.rot13 import rot13_char
def test_rot13_char_a():
assert "n" == rot13_char("a"), "Unexpected character"
def test_rot13_abcdef():
assert "abcdef" == rot13("nopqrs"), "Unexpected character"
| 22.9
| 59
| 0.742358
| 33
| 229
| 4.939394
| 0.424242
| 0.165644
| 0.147239
| 0.220859
| 0.282209
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0.126638
| 229
| 9
| 60
| 25.444444
| 0.735
| 0
| 0
| 0
| 0
| 0
| 0.235808
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
730e223c864123dc0dc8d6f805e3f412e178069a
| 197
|
py
|
Python
|
web_elements/content.py
|
tronze/PythonCustomCalendar
|
bd7ffbbf28616ccf65e545605ede5f3dda251c9a
|
[
"BSD-3-Clause"
] | 8
|
2019-03-05T12:23:07.000Z
|
2021-01-10T09:49:27.000Z
|
web_elements/content.py
|
tronze/PythonCustomCalendar
|
bd7ffbbf28616ccf65e545605ede5f3dda251c9a
|
[
"BSD-3-Clause"
] | null | null | null |
web_elements/content.py
|
tronze/PythonCustomCalendar
|
bd7ffbbf28616ccf65e545605ede5f3dda251c9a
|
[
"BSD-3-Clause"
] | 2
|
2019-02-18T06:34:34.000Z
|
2019-03-05T12:17:22.000Z
|
from .node import Node
class Content(Node):
def __init__(self, content):
super().__init__()
self.content = content
def create_element(self):
return self.content
| 16.416667
| 32
| 0.634518
| 23
| 197
| 5.043478
| 0.521739
| 0.284483
| 0.258621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.269036
| 197
| 11
| 33
| 17.909091
| 0.805556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.142857
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
733a0470c3524cff14b8d3a54298cb7055db5599
| 44
|
py
|
Python
|
tests/test_suite.py
|
kasium/alembic
|
af7963889abffe2ab8dc640d4fdcb8cea6d53942
|
[
"MIT"
] | 1,324
|
2018-11-27T05:44:41.000Z
|
2022-03-30T19:49:20.000Z
|
tests/test_suite.py
|
kasium/alembic
|
af7963889abffe2ab8dc640d4fdcb8cea6d53942
|
[
"MIT"
] | 452
|
2018-11-27T22:43:38.000Z
|
2022-03-28T04:33:43.000Z
|
tests/test_suite.py
|
kasium/alembic
|
af7963889abffe2ab8dc640d4fdcb8cea6d53942
|
[
"MIT"
] | 159
|
2018-11-29T18:46:15.000Z
|
2022-03-28T16:34:19.000Z
|
from alembic.testing.suite import * # noqa
| 22
| 43
| 0.75
| 6
| 44
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 44
| 1
| 44
| 44
| 0.891892
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b410a1c2a7ffaab5e3100a670f9684254bc4b69c
| 2,990
|
py
|
Python
|
src/mutator/test_uoi_strategy.py
|
AAU-PSix/canary
|
93b07d23cd9380adc03a6aa1291a13eaa3b3008c
|
[
"MIT"
] | null | null | null |
src/mutator/test_uoi_strategy.py
|
AAU-PSix/canary
|
93b07d23cd9380adc03a6aa1291a13eaa3b3008c
|
[
"MIT"
] | null | null | null |
src/mutator/test_uoi_strategy.py
|
AAU-PSix/canary
|
93b07d23cd9380adc03a6aa1291a13eaa3b3008c
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from .uoi_strategy import UoiStrategy
from ts import (
LanguageLibrary,
Parser,
CSyntax,
)
class TestAbsStrategu(TestCase):
def setUp(self) -> None:
LanguageLibrary.build()
self._language = LanguageLibrary.c()
self._parser = Parser.create_with_language(self._language)
self._syntax = CSyntax()
def test_capture_update_expression(self) -> None:
program = "--a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
candidates = stategy.capture(tree.root)
self.assertEqual(len(candidates), 1)
def test_mutations_update_expression(self) -> None:
program = "--a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
mutations = stategy.mutations(
self._parser, tree, tree.root
)
self.assertEqual(len(mutations), 1)
def test_mutate_update_expression(self) -> None:
program = "--a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
mutation = stategy.mutate(
tree, tree.root
)
self.assertEqual(mutation.text, "++a;")
def test_capture_arithmetic_unary_expression(self) -> None:
program = "-a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
candidates = stategy.capture(tree.root)
self.assertEqual(len(candidates), 1)
def test_mutations_arithmetic_unary_expression(self) -> None:
program = "-a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
mutations = stategy.mutations(
self._parser, tree, tree.root
)
self.assertEqual(len(mutations), 1)
def test_mutate_arithmetic_unary_expression(self) -> None:
program = "-a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
mutation = stategy.mutate(
tree, tree.root
)
self.assertEqual(mutation.text, "+a;")
def test_capture_logical_unary_expression(self) -> None:
program = "!a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
candidates = stategy.capture(tree.root)
self.assertEqual(len(candidates), 1)
def test_mutations_logical_unary_expression(self) -> None:
program = "!a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
mutations = stategy.mutations(
self._parser, tree, tree.root
)
self.assertEqual(len(mutations), 1)
def test_mutate_logical_unary_expression(self) -> None:
program = "!a;"
tree = self._parser.parse(program)
stategy = UoiStrategy(self._parser)
mutation = stategy.mutate(
tree, tree.root
)
self.assertEqual(mutation.text, "a;")
| 27.685185
| 66
| 0.619732
| 309
| 2,990
| 5.799353
| 0.142395
| 0.122768
| 0.090402
| 0.125558
| 0.851004
| 0.851004
| 0.851004
| 0.851004
| 0.851004
| 0.851004
| 0
| 0.002754
| 0.271237
| 2,990
| 108
| 67
| 27.685185
| 0.819642
| 0
| 0
| 0.607595
| 0
| 0
| 0.013039
| 0
| 0
| 0
| 0
| 0
| 0.113924
| 1
| 0.126582
| false
| 0
| 0.037975
| 0
| 0.177215
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b479ea6f9d3acc9f01d13fa6538cc476a4e77ace
| 124
|
py
|
Python
|
python/testData/hierarchy/call/Static/ArgumentList/main.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/hierarchy/call/Static/ArgumentList/main.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/hierarchy/call/Static/ArgumentList/main.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from file_1 import *
target_<caret>func()
func1(target_func)
func1(target_func())
func2(target_func)
func2(target_func())
| 13.777778
| 20
| 0.774194
| 19
| 124
| 4.736842
| 0.473684
| 0.444444
| 0.333333
| 0.422222
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044248
| 0.08871
| 124
| 8
| 21
| 15.5
| 0.752212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.166667
| 0
| 0.166667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b48a5e0433c477b5bbc3ef3fba7800faa2b49177
| 32
|
py
|
Python
|
lim/genetics/heritability/__init__.py
|
glimix/glimix
|
22c9b94732918bce31f64cb33ce368ea85ead478
|
[
"MIT"
] | 2
|
2016-12-16T14:14:59.000Z
|
2017-01-31T16:50:08.000Z
|
lim/genetics/heritability/__init__.py
|
glimix/glimix
|
22c9b94732918bce31f64cb33ce368ea85ead478
|
[
"MIT"
] | null | null | null |
lim/genetics/heritability/__init__.py
|
glimix/glimix
|
22c9b94732918bce31f64cb33ce368ea85ead478
|
[
"MIT"
] | 2
|
2017-02-13T14:34:37.000Z
|
2017-02-15T14:27:32.000Z
|
from ._estimate import estimate
| 16
| 31
| 0.84375
| 4
| 32
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
81fd37ba1f5605285358f7bfabbd32206b6341ee
| 7,284
|
py
|
Python
|
tests/test_path_zip.py
|
jhermann/dephell_archive
|
582e7e38d7dd702a267b6436da42bc372c8e4d44
|
[
"MIT"
] | null | null | null |
tests/test_path_zip.py
|
jhermann/dephell_archive
|
582e7e38d7dd702a267b6436da42bc372c8e4d44
|
[
"MIT"
] | 19
|
2019-12-17T12:29:36.000Z
|
2020-06-03T07:56:22.000Z
|
tests/test_path_zip.py
|
jhermann/dephell_archive
|
582e7e38d7dd702a267b6436da42bc372c8e4d44
|
[
"MIT"
] | 6
|
2019-09-04T05:30:51.000Z
|
2021-09-28T02:43:22.000Z
|
# built-in
from pathlib import Path
# project
from dephell_archive import ArchivePath
wheel_path = Path(__file__).parent / 'requirements' / 'wheel.whl'
def test_open_zip(tmpdir):
path = ArchivePath(
archive_path=wheel_path,
cache_path=Path(str(tmpdir)),
)
subpath = path / 'dephell' / '__init__.py'
with subpath.open() as stream:
content = stream.read()
assert 'from .controllers' in content
def test_glob_zip(tmpdir):
path = ArchivePath(
archive_path=wheel_path,
cache_path=Path(str(tmpdir)),
)
paths = list(path.glob('*/__init__.py'))
assert len(paths) == 1
assert paths[0].member_path.as_posix() == 'dephell/__init__.py'
def test_exists(tmpdir):
path = ArchivePath(
archive_path=wheel_path,
cache_path=Path(str(tmpdir)),
)
subpath = path / 'dephell' / '__init__.py'
assert subpath.exists() is True
subpath = path / 'dephell' / 'some_junk.py'
assert subpath.exists() is False
def test_is_file(tmpdir):
path = ArchivePath(
archive_path=wheel_path,
cache_path=Path(str(tmpdir)),
)
subpath = path / 'dephell' / '__init__.py'
assert subpath.is_file() is True
subpath = path / 'dephell'
assert subpath.is_file() is False
def test_is_dir(tmpdir):
path = ArchivePath(
archive_path=wheel_path,
cache_path=Path(str(tmpdir)),
)
subpath = path / 'dephell' / '__init__.py'
assert subpath.is_dir() is False
subpath = path / 'dephell'
assert subpath.exists() is True
assert subpath.is_dir() is True
def test_is_dir_explicit_entry(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'graphviz-0.13.2.zip'),
cache_path=Path(str(tmpdir)),
)
subpath = path / 'graphviz-0.13.2'
assert subpath.is_dir() is True
subpath = subpath / 'graphviz'
assert subpath.exists() is True
assert subpath.is_dir() is True
subpath = subpath / '__init__.py'
assert subpath.is_dir() is False
def test_iterdir_non_recursive(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'dnspython-1.16.0.zip'),
cache_path=Path(str(tmpdir)),
)
paths = [str(subpath) for subpath in path.iterdir(_recursive=False)]
assert paths == ['dnspython-1.16.0']
def test_iterdir_recursive(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'dnspython-1.16.0.zip'),
cache_path=Path(str(tmpdir)),
)
paths = [str(subpath) for subpath in path.iterdir(_recursive=True)]
assert 'dnspython-1.16.0' in paths
assert str(Path('dnspython-1.16.0', 'setup.py')) in paths
assert str(Path('dnspython-1.16.0', 'dns', '__init__.py')) in paths
assert str(Path('dnspython-1.16.0', 'dns', 'rdtypes')) in paths
assert str(Path('dnspython-1.16.0', 'dns', 'rdtypes', 'ANY')) in paths
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
def test_iterdir_subpath_non_recursive(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'dnspython-1.16.0.zip'),
cache_path=Path(str(tmpdir)),
)
subpath = path / 'dnspython-1.16.0'
paths = [str(item) for item in subpath.iterdir(_recursive=False)]
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
assert 'dns' in paths
assert 'dnspython.egg-info' in paths
assert 'setup.py' in paths
subpath = subpath / 'dns'
paths = [str(item) for item in subpath.iterdir(_recursive=False)]
assert 'rdtypes' in paths
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
def test_iterdir_subpath_recursive(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'dnspython-1.16.0.zip'),
cache_path=Path(str(tmpdir)),
)
subpath = path / 'dnspython-1.16.0'
paths = [str(item) for item in subpath.iterdir(_recursive=True)]
assert 'setup.py' in paths
assert Path('dnspython-1.16.0', 'dns') not in paths
assert 'dns' in paths
assert str(Path('dns', '__init__.py')) in paths
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
def test_iterdir_non_recursive_with_dirs(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'graphviz-0.13.2.zip'),
cache_path=Path(str(tmpdir)),
)
paths = [str(subpath) for subpath in path.iterdir(_recursive=False)]
assert paths == ['graphviz-0.13.2']
def test_iterdir_recursive_with_dirs(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'graphviz-0.13.2.zip'),
cache_path=Path(str(tmpdir)),
)
paths = [str(subpath) for subpath in path.iterdir(_recursive=True)]
assert 'graphviz-0.13.2' in paths
assert str(Path('graphviz-0.13.2', 'setup.py')) in paths
assert str(Path('graphviz-0.13.2', 'graphviz', '__init__.py')) in paths
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
def test_iterdir_subpath_non_recursive_with_dirs(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'graphviz-0.13.2.zip'),
cache_path=Path(str(tmpdir)),
)
subpath = path / 'graphviz-0.13.2'
paths = [str(item) for item in subpath.iterdir(_recursive=False)]
assert 'graphviz' in paths
assert 'graphviz.egg-info' in paths
assert 'setup.py' in paths
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
subpath = subpath / 'graphviz.egg-info'
paths = [str(item) for item in subpath.iterdir(_recursive=False)]
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
assert set(paths) == {
'dependency_links.txt',
'PKG-INFO',
'requires.txt',
'SOURCES.txt',
'top_level.txt',
}
def test_iterdir_subpath_recursive_with_dirs(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'graphviz-0.13.2.zip'),
cache_path=Path(str(tmpdir)),
)
subpath = path / 'graphviz-0.13.2'
paths = [str(item) for item in subpath.iterdir(_recursive=True)]
assert 'graphviz' in paths
assert str(Path('graphviz', '__init__.py')) in paths
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
def test_iterdir_non_recursive_wheel(tmpdir):
path = ArchivePath(
archive_path=wheel_path,
cache_path=Path(str(tmpdir)),
)
paths = [str(subpath) for subpath in path.iterdir(_recursive=False)]
assert len(paths) == 2
assert 'dephell' in paths
assert 'dephell-0.2.0.dist-info' in paths
def test_iterdir_recursive_wheel(tmpdir):
path = ArchivePath(
archive_path=wheel_path,
cache_path=Path(str(tmpdir)),
)
paths = [str(subpath) for subpath in path.iterdir(_recursive=True)]
assert 'dephell' in paths
assert str(Path('dephell', '__init__.py')) in paths
assert 'dephell-0.2.0.dist-info' in paths
assert str(Path('dephell-0.2.0.dist-info', 'WHEEL')) in paths
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
| 29.852459
| 75
| 0.647996
| 983
| 7,284
| 4.629705
| 0.081384
| 0.055372
| 0.077126
| 0.09844
| 0.871017
| 0.801143
| 0.779609
| 0.776972
| 0.756537
| 0.722259
| 0
| 0.021183
| 0.215815
| 7,284
| 243
| 76
| 29.975309
| 0.77556
| 0.002197
| 0
| 0.590164
| 0
| 0
| 0.177839
| 0.009498
| 0
| 0
| 0
| 0
| 0.295082
| 1
| 0.087432
| false
| 0
| 0.010929
| 0
| 0.098361
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c326b4e481a46d1f175ab5f0dd8013ca9af26194
| 18,975
|
py
|
Python
|
tests/unit/test_nhl_boxscore.py
|
JosephDErwin/sportsreference
|
f026366bec91fdf4bebef48e3a4bfd7c5bfab4bd
|
[
"MIT"
] | null | null | null |
tests/unit/test_nhl_boxscore.py
|
JosephDErwin/sportsreference
|
f026366bec91fdf4bebef48e3a4bfd7c5bfab4bd
|
[
"MIT"
] | null | null | null |
tests/unit/test_nhl_boxscore.py
|
JosephDErwin/sportsreference
|
f026366bec91fdf4bebef48e3a4bfd7c5bfab4bd
|
[
"MIT"
] | 1
|
2020-07-08T16:05:25.000Z
|
2020-07-08T16:05:25.000Z
|
from flexmock import flexmock
from mock import patch, PropertyMock
from pyquery import PyQuery as pq
from sportsreference import utils
from sportsreference.constants import AWAY, HOME
from sportsreference.nhl.boxscore import Boxscore, Boxscores
class MockField:
def __init__(self, field):
self._field = field
def text(self):
return self._field
class MockBoxscoreData:
def __init__(self, fields):
self._fields = fields
def __call__(self, field):
return self
def items(self):
return [self._fields]
class MockName:
def __init__(self, name):
self._name = name
def text(self):
return self._name
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents):
self.status_code = 404
self.html_contents = html_contents
self.text = html_contents
boxscore = read_file('%s.html' % BOXSCORE)
return MockPQ(boxscore)
class TestNHLBoxscore:
@patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
flexmock(Boxscore) \
.should_receive('_parse_game_data') \
.and_return(None)
self.boxscore = Boxscore(None)
def test_away_team_wins(self):
fake_away_goals = PropertyMock(return_value=4)
fake_home_goals = PropertyMock(return_value=3)
type(self.boxscore)._away_goals = fake_away_goals
type(self.boxscore)._home_goals = fake_home_goals
assert self.boxscore.winner == AWAY
def test_home_team_wins(self):
fake_away_goals = PropertyMock(return_value=3)
fake_home_goals = PropertyMock(return_value=4)
type(self.boxscore)._away_goals = fake_away_goals
type(self.boxscore)._home_goals = fake_home_goals
assert self.boxscore.winner == HOME
def test_winning_name_is_home(self):
expected_name = 'Home Name'
fake_winner = PropertyMock(return_value=HOME)
fake_home_name = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._home_name = fake_home_name
assert self.boxscore.winning_name == expected_name
def test_winning_name_is_away(self):
expected_name = 'Away Name'
fake_winner = PropertyMock(return_value=AWAY)
fake_away_name = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._away_name = fake_away_name
assert self.boxscore.winning_name == expected_name
def test_winning_abbr_is_home(self):
expected_name = 'HOME'
flexmock(utils) \
.should_receive('_parse_abbreviation') \
.and_return(expected_name)
fake_winner = PropertyMock(return_value=HOME)
fake_home_abbr = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._home_abbr = fake_home_abbr
assert self.boxscore.winning_abbr == expected_name
def test_winning_abbr_is_away(self):
expected_name = 'AWAY'
flexmock(utils) \
.should_receive('_parse_abbreviation') \
.and_return(expected_name)
fake_winner = PropertyMock(return_value=AWAY)
fake_away_abbr = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._away_abbr = fake_away_abbr
assert self.boxscore.winning_abbr == expected_name
def test_losing_name_is_home(self):
expected_name = 'Home Name'
fake_winner = PropertyMock(return_value=AWAY)
fake_home_name = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._home_name = fake_home_name
assert self.boxscore.losing_name == expected_name
def test_losing_name_is_away(self):
expected_name = 'Away Name'
fake_winner = PropertyMock(return_value=HOME)
fake_away_name = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._away_name = fake_away_name
assert self.boxscore.losing_name == expected_name
def test_losing_abbr_is_home(self):
expected_name = 'HOME'
flexmock(utils) \
.should_receive('_parse_abbreviation') \
.and_return(expected_name)
fake_winner = PropertyMock(return_value=AWAY)
fake_home_abbr = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._home_abbr = fake_home_abbr
assert self.boxscore.losing_abbr == expected_name
def test_losing_abbr_is_away(self):
expected_name = 'AWAY'
flexmock(utils) \
.should_receive('_parse_abbreviation') \
.and_return(expected_name)
fake_winner = PropertyMock(return_value=HOME)
fake_away_abbr = PropertyMock(return_value=MockName(expected_name))
type(self.boxscore).winner = fake_winner
type(self.boxscore)._away_abbr = fake_away_abbr
assert self.boxscore.losing_abbr == expected_name
def test_invalid_away_game_winning_goals_returns_default(self):
goals = ['0', '1', 'bad']
fake_goals = PropertyMock(return_value=goals)
fake_num_skaters = PropertyMock(return_value=3)
fake_num_goalies = PropertyMock(return_value=0)
type(self.boxscore)._away_game_winning_goals = fake_goals
type(self.boxscore)._away_skaters = fake_num_skaters
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_game_winning_goals == 1
def test_invalid_away_even_strength_assists_returns_default(self):
assists = ['0', '1', 'bad']
fake_assists = PropertyMock(return_value=assists)
fake_num_skaters = PropertyMock(return_value=3)
fake_num_goalies = PropertyMock(return_value=0)
type(self.boxscore)._away_even_strength_assists = fake_assists
type(self.boxscore)._away_skaters = fake_num_skaters
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_even_strength_assists == 1
def test_invalid_home_even_strength_assists_returns_default(self):
assists = ['0', '1', 'bad']
fake_assists = PropertyMock(return_value=assists)
fake_num_skaters = PropertyMock(return_value=0)
fake_num_goalies = PropertyMock(return_value=0)
type(self.boxscore)._home_even_strength_assists = fake_assists
type(self.boxscore)._away_skaters = fake_num_skaters
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_even_strength_assists == 1
def test_invalid_away_power_play_assists_returns_default(self):
assists = ['0', '1', 'bad']
fake_assists = PropertyMock(return_value=assists)
fake_num_skaters = PropertyMock(return_value=3)
fake_num_goalies = PropertyMock(return_value=0)
type(self.boxscore)._away_power_play_assists = fake_assists
type(self.boxscore)._away_skaters = fake_num_skaters
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_power_play_assists == 1
def test_invalid_home_power_play_assits_returns_default(self):
assists = ['0', '1', 'bad']
fake_assists = PropertyMock(return_value=assists)
fake_num_skaters = PropertyMock(return_value=0)
fake_num_goalies = PropertyMock(return_value=0)
type(self.boxscore)._home_power_play_assists = fake_assists
type(self.boxscore)._away_skaters = fake_num_skaters
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_power_play_assists == 1
def test_invalid_away_short_handed_assists_returns_default(self):
assists = ['0', '1', 'bad']
fake_assists = PropertyMock(return_value=assists)
fake_num_skaters = PropertyMock(return_value=3)
fake_num_goalies = PropertyMock(return_value=0)
type(self.boxscore)._away_short_handed_assists = fake_assists
type(self.boxscore)._away_skaters = fake_num_skaters
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_short_handed_assists == 1
def test_invalid_home_short_handed_assits_returns_default(self):
assists = ['0', '1', 'bad']
fake_assists = PropertyMock(return_value=assists)
fake_num_skaters = PropertyMock(return_value=0)
fake_num_goalies = PropertyMock(return_value=0)
type(self.boxscore)._home_short_handed_assists = fake_assists
type(self.boxscore)._away_skaters = fake_num_skaters
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_short_handed_assists == 1
def test_invalid_url_returns_none(self):
result = Boxscore(None)._retrieve_html_page('')
assert result is None
def test_regular_season_information(self):
fields = {
'date': 'October 5, 2017',
'playoff_round': None,
'time': '7:00 PM',
'attendance': 17565,
'arena': 'TD Garden',
'duration': '2:39'
}
mock_field = """October 5, 2017, 7:00 PM
Attendance: 17,565
Arena: TD Garden
Game Duration: 2:39
Logos via Sports Logos.net / About logos
"""
m = MockBoxscoreData(MockField(mock_field))
self.boxscore._parse_game_date_and_location(m)
for field, value in fields.items():
assert getattr(self.boxscore, field) == value
def test_playoffs_information(self):
fields = {
'date': 'June 7, 2018',
'playoff_round': 'Stanley Cup Final',
'time': '8:00 PM',
'attendance': 18529,
'arena': 'T-Mobile Arena',
'duration': '2:45'
}
mock_field = """June 7, 2018, 8:00 PM
Stanley Cup Final
Attendance: 18,529
Arena: T-Mobile Arena
Game Duration: 2:45
Logos via Sports Logos.net / About logos
"""
m = MockBoxscoreData(MockField(mock_field))
self.boxscore._parse_game_date_and_location(m)
for field, value in fields.items():
assert getattr(self.boxscore, field) == value
def test_no_game_information(self):
fields = {
'date': '',
'playoff_round': None,
'time': None,
'attendance': None,
'arena': None,
'duration': None
}
mock_field = '\n'
m = MockBoxscoreData(MockField(mock_field))
self.boxscore._parse_game_date_and_location(m)
for field, value in fields.items():
assert getattr(self.boxscore, field) == value
def test_limited_game_information(self):
fields = {
'date': 'June 7, 2018',
'playoff_round': 'Stanley Cup Final',
'time': None,
'attendance': None,
'arena': 'T-Mobile Arena',
'duration': None
}
mock_field = """June 7, 2018
Stanley Cup Final
Arena: T-Mobile Arena
Logos via Sports Logos.net / About logos
"""
m = MockBoxscoreData(MockField(mock_field))
self.boxscore._parse_game_date_and_location(m)
for field, value in fields.items():
assert getattr(self.boxscore, field) == value
def test_away_shutout_single_goalies(self):
shutout = ['1', '0']
fake_shutout = PropertyMock(return_value=shutout)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._away_shutout = fake_shutout
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_shutout == 1
def test_away_shutout_multiple_goalies(self):
shutout = ['0', '1', '0']
fake_shutout = PropertyMock(return_value=shutout)
fake_num_goalies = PropertyMock(return_value=2)
type(self.boxscore)._away_shutout = fake_shutout
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_shutout == 1
def test_away_shutout_multiple_goalies_empty_field(self):
shutout = ['', '1', '0']
fake_shutout = PropertyMock(return_value=shutout)
fake_num_goalies = PropertyMock(return_value=2)
type(self.boxscore)._away_shutout = fake_shutout
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_shutout == 1
def test_home_shutout_single_goalies(self):
shutout = ['0', '1']
fake_shutout = PropertyMock(return_value=shutout)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._home_shutout = fake_shutout
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_shutout == 1
def test_home_shutout_multiple_goalies(self):
shutout = ['0', '0', '1']
fake_shutout = PropertyMock(return_value=shutout)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._home_shutout = fake_shutout
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_shutout == 1
def test_home_shutout_multiple_goalies_empty_field(self):
shutout = ['0', '', '1']
fake_shutout = PropertyMock(return_value=shutout)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._home_shutout = fake_shutout
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_shutout == 1
def test_away_saves_single_goalies(self):
saves = ['29', '30']
fake_saves = PropertyMock(return_value=saves)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._away_saves = fake_saves
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_saves == 29
def test_away_saves_multiple_goalies_empty_field(self):
saves = ['29', '3', '30']
fake_saves = PropertyMock(return_value=saves)
fake_num_goalies = PropertyMock(return_value=2)
type(self.boxscore)._away_saves = fake_saves
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_saves == 32
def test_away_saves_multiple_goalies_empty_field(self):
saves = ['29', '', '30']
fake_saves = PropertyMock(return_value=saves)
fake_num_goalies = PropertyMock(return_value=2)
type(self.boxscore)._away_saves = fake_saves
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.away_saves == 29
def test_home_saves_single_goalies(self):
saves = ['29', '30']
fake_saves = PropertyMock(return_value=saves)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._home_saves = fake_saves
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_saves == 30
def test_home_saves_multiple_goalies_empty_field(self):
saves = ['29', '3', '30']
fake_saves = PropertyMock(return_value=saves)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._home_saves = fake_saves
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_saves == 33
def test_home_saves_multiple_goalies_empty_field(self):
saves = ['29', '30', '']
fake_saves = PropertyMock(return_value=saves)
fake_num_goalies = PropertyMock(return_value=1)
type(self.boxscore)._home_saves = fake_saves
type(self.boxscore)._away_goalies = fake_num_goalies
assert self.boxscore.home_saves == 30
def test_away_save_percentage(self):
fake_saves = PropertyMock(return_value=30)
fake_shots_on_goal = PropertyMock(return_value=33)
type(self.boxscore).away_saves = fake_saves
type(self.boxscore).home_shots_on_goal = fake_shots_on_goal
assert self.boxscore.away_save_percentage == 0.909
def test_away_save_percentage_zero_shots(self):
fake_saves = PropertyMock(return_value=0)
fake_shots_on_goal = PropertyMock(return_value=0)
type(self.boxscore).away_saves = fake_saves
type(self.boxscore).home_shots_on_goal = fake_shots_on_goal
assert self.boxscore.away_save_percentage == 0.0
def test_home_save_percentage(self):
fake_saves = PropertyMock(return_value=30)
fake_shots_on_goal = PropertyMock(return_value=33)
type(self.boxscore).home_saves = fake_saves
type(self.boxscore).away_shots_on_goal = fake_shots_on_goal
assert self.boxscore.home_save_percentage == 0.909
def test_home_save_percentage_zero_shots(self):
fake_saves = PropertyMock(return_value=0)
fake_shots_on_goal = PropertyMock(return_value=0)
type(self.boxscore).home_saves = fake_saves
type(self.boxscore).away_shots_on_goal = fake_shots_on_goal
assert self.boxscore.home_save_percentage == 0.0
def test_no_class_information_returns_dataframe_of_none(self):
mock_goals = PropertyMock(return_value=None)
type(self.boxscore)._away_goals = mock_goals
type(self.boxscore)._home_goals = mock_goals
assert self.boxscore.dataframe is None
class TestMLBBoxscores:
@patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
flexmock(Boxscores) \
.should_receive('_get_team_details') \
.and_return((None, None, None, None, None, None))
flexmock(Boxscores) \
.should_receive('_find_games') \
.and_return(None)
self.boxscores = Boxscores(None)
def test_improper_loser_boxscore_format_skips_game(self):
mock_html = pq("""<table class="teams">
<tbody>
<tr class="loser">
<td class="right">1</td>
<td class="right gamelink">
</td>
</tr>
<tr class="winner">
<td><a href="/teams/DET/2019.html">Detroit Red Wings</a></td>
<td class="right">3</td>
<td class="right">
</td>
</tr>
</tbody>
</table>""")
games = self.boxscores._extract_game_info([mock_html])
assert len(games) == 0
def test_improper_winner_boxscore_format_skips_game(self):
mock_html = pq("""<table class="teams">
<tbody>
<tr class="loser">
<td><a href="/teams/LAK/2019.html">Los Angeles Kings</a></td>
<td class="right">1</td>
<td class="right gamelink">
<a href="/boxscores/201812100DET.html">Final</a>
</td>
</tr>
<tr class="winner">
<td class="right">3</td>
<td class="right">
</td>
</tr>
</tbody>
</table>""")
games = self.boxscores._extract_game_info([mock_html])
assert len(games) == 0
| 34.189189
| 75
| 0.677523
| 2,358
| 18,975
| 5.091179
| 0.081001
| 0.117951
| 0.099958
| 0.078301
| 0.857226
| 0.827988
| 0.806247
| 0.786672
| 0.786672
| 0.771678
| 0
| 0.015833
| 0.224453
| 18,975
| 554
| 76
| 34.250903
| 0.799946
| 0
| 0
| 0.655422
| 0
| 0.004819
| 0.084796
| 0.00585
| 0
| 0
| 0
| 0
| 0.098795
| 1
| 0.125301
| false
| 0
| 0.014458
| 0.009639
| 0.166265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c365664b9c836dcf8ce6404c28cf198414c006e5
| 144
|
py
|
Python
|
jupyterlab_nvdashboard/tests/test_utils.py
|
vidosits/jupyterlab-nvdashboard
|
50cf834b22dc3c6a6ce44997cbfd01aae2bed7e2
|
[
"BSD-3-Clause"
] | 368
|
2019-10-07T15:32:50.000Z
|
2022-03-27T03:42:29.000Z
|
jupyterlab_nvdashboard/tests/test_utils.py
|
vidosits/jupyterlab-nvdashboard
|
50cf834b22dc3c6a6ce44997cbfd01aae2bed7e2
|
[
"BSD-3-Clause"
] | 82
|
2019-10-03T02:05:39.000Z
|
2022-03-17T20:27:16.000Z
|
jupyterlab_nvdashboard/tests/test_utils.py
|
vidosits/jupyterlab-nvdashboard
|
50cf834b22dc3c6a6ce44997cbfd01aae2bed7e2
|
[
"BSD-3-Clause"
] | 42
|
2019-10-03T09:02:46.000Z
|
2021-12-08T05:32:24.000Z
|
import pytest
def test_format_bytes():
from jupyterlab_nvdashboard.utils import format_bytes
assert format_bytes(1e13) == "10.00 TB"
| 18
| 57
| 0.756944
| 20
| 144
| 5.2
| 0.75
| 0.317308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058333
| 0.166667
| 144
| 7
| 58
| 20.571429
| 0.808333
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c370c3f931c787eed137e054e44a6d964aaf49e0
| 13,362
|
py
|
Python
|
src/tests/functional/api/test_api_views.py
|
s-light/pretalx
|
5abed452688d8c0b3e44e71b7ce3ab9b6d80bd95
|
[
"Apache-2.0"
] | null | null | null |
src/tests/functional/api/test_api_views.py
|
s-light/pretalx
|
5abed452688d8c0b3e44e71b7ce3ab9b6d80bd95
|
[
"Apache-2.0"
] | null | null | null |
src/tests/functional/api/test_api_views.py
|
s-light/pretalx
|
5abed452688d8c0b3e44e71b7ce3ab9b6d80bd95
|
[
"Apache-2.0"
] | null | null | null |
import json
import pytest
@pytest.mark.django_db
def test_api_user_endpoint(orga_client, room):
response = orga_client.get('/api/me', follow=True)
assert response.status_code == 200
content = json.loads(response.content.decode())
assert set(content.keys()) == {'name', 'email', 'locale', 'timezone'}
@pytest.mark.django_db
def test_can_only_see_public_events(client, event, other_event):
other_event.is_public = False
other_event.save()
assert event.is_public
assert not other_event.is_public
response = client.get('/api/events', follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content) == 1, content
assert content[0]['name']['en'] == event.name
@pytest.mark.django_db
def test_orga_can_see_nonpublic_events(orga_client, event, other_event):
event.is_public = False
event.save()
assert not event.is_public
assert other_event.is_public
response = orga_client.get('/api/events', follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content) == 2, content
assert content[0]['name']['en'] == event.name
@pytest.mark.django_db
def test_can_only_see_public_submissions(
client, slot, accepted_submission, rejected_submission, submission
):
response = client.get(submission.event.api_urls.submissions, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 1
assert content['results'][0]['title'] == slot.submission.title
@pytest.mark.django_db
def test_can_only_see_public_submissions_if_public_schedule(
client, slot, accepted_submission, rejected_submission, submission, answer
):
submission.event.settings.set('show_schedule', False)
response = client.get(submission.event.api_urls.submissions, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 0
assert all(submission['answers'] == [] for submission in content['results'])
@pytest.mark.django_db
def test_orga_can_see_all_submissions(
orga_client, slot, accepted_submission, rejected_submission, submission, answer
):
response = orga_client.get(submission.event.api_urls.submissions, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 4
assert content['results'][0]['title'] == slot.submission.title
assert any(submission['answers'] == [] for submission in content['results'])
assert any(submission['answers'] != [] for submission in content['results'])
@pytest.mark.django_db
def test_orga_can_see_all_submissions_even_nonpublic(
orga_client, slot, accepted_submission, rejected_submission, submission
):
submission.event.settings.set('show_schedule', False)
response = orga_client.get(submission.event.api_urls.submissions, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 4
assert content['results'][0]['title'] == slot.submission.title
@pytest.mark.django_db
def test_only_see_talks_when_a_release_exists(
orga_client, confirmed_submission, rejected_submission, submission
):
response = orga_client.get(submission.event.api_urls.talks, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 0
@pytest.mark.django_db
def test_can_only_see_public_talks(
client, slot, accepted_submission, rejected_submission, submission
):
response = client.get(submission.event.api_urls.talks, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 1
assert content['results'][0]['title'] == slot.submission.title
@pytest.mark.django_db
def test_can_only_see_public_talks_if_public_schedule(
client, slot, accepted_submission, rejected_submission, submission
):
submission.event.settings.set('show_schedule', False)
response = client.get(submission.event.api_urls.talks, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 0
@pytest.mark.django_db
def test_orga_can_see_all_talks(
orga_client, slot, accepted_submission, rejected_submission, submission
):
response = orga_client.get(submission.event.api_urls.talks, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 1
assert content['results'][0]['title'] == slot.submission.title
@pytest.mark.django_db
def test_orga_can_see_all_talks_even_nonpublic(
orga_client, slot, accepted_submission, rejected_submission, submission
):
submission.event.settings.set('show_schedule', False)
response = orga_client.get(submission.event.api_urls.talks, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 1
assert content['results'][0]['title'] == slot.submission.title
@pytest.mark.django_db
def test_user_can_see_schedule(client, slot):
assert slot.submission.event.schedules.count() == 2
response = client.get(slot.submission.event.api_urls.schedules, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 1
@pytest.mark.django_db
def test_user_cannot_see_wip_schedule(client, slot):
assert slot.submission.event.schedules.count() == 2
response = client.get(slot.submission.event.api_urls.schedules + 'wip', follow=True)
json.loads(response.content.decode())
assert response.status_code == 404
@pytest.mark.django_db
def test_user_cannot_see_schedule_if_not_public(client, slot):
slot.submission.event.settings.set('show_schedule', False)
assert slot.submission.event.schedules.count() == 2
response = client.get(slot.submission.event.api_urls.schedules, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 0
@pytest.mark.django_db
def test_orga_can_see_schedule(orga_client, slot):
assert slot.submission.event.schedules.count() == 2
response = orga_client.get(slot.submission.event.api_urls.schedules, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 2
@pytest.mark.django_db
def test_orga_can_see_wip_schedule(orga_client, slot):
assert slot.submission.event.schedules.count() == 2
response = orga_client.get(
slot.submission.event.api_urls.schedules + 'wip', follow=True
)
json.loads(response.content.decode())
assert response.status_code == 200
@pytest.mark.django_db
def test_orga_can_see_current_schedule(orga_client, slot):
assert slot.submission.event.schedules.count() == 2
response = orga_client.get(
slot.submission.event.api_urls.schedules + 'latest', follow=True
)
json.loads(response.content.decode())
assert response.status_code == 200
assert slot.submission.title in response.content.decode()
@pytest.mark.django_db
def test_orga_cannot_see_schedule_even_if_not_public(orga_client, slot):
slot.submission.event.settings.set('show_schedule', False)
assert slot.submission.event.schedules.count() == 2
response = orga_client.get(slot.submission.event.api_urls.schedules, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 2
@pytest.mark.django_db
def test_can_only_see_public_speakers(
client,
slot,
accepted_submission,
rejected_submission,
submission,
impersonal_answer,
):
response = client.get(submission.event.api_urls.speakers, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 1
assert content['results'][0]['name'] == accepted_submission.speakers.first().name
assert set(content['results'][0].keys()) == {
'name',
'code',
'biography',
'submissions',
'avatar',
}
@pytest.mark.django_db
def test_can_only_see_public_speakerss_if_public_schedule(
client, slot, accepted_submission, rejected_submission, submission
):
submission.event.settings.set('show_schedule', False)
response = client.get(submission.event.api_urls.speakers, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 0
@pytest.mark.django_db
def test_orga_can_see_all_speakers(
orga_client,
slot,
accepted_submission,
rejected_submission,
submission,
impersonal_answer,
):
response = orga_client.get(submission.event.api_urls.speakers, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 2
assert set(content['results'][0].keys()) == {
'name',
'code',
'email',
'biography',
'submissions',
'answers',
'avatar',
}
assert set(content['results'][0]['answers'][0].keys()) == {
'answer',
'answer_file',
'person',
'question',
'submission',
'options',
'id',
}
@pytest.mark.django_db
def test_reviewer_cannot_see_speakers(
review_client,
slot,
accepted_submission,
rejected_submission,
submission,
impersonal_answer,
):
submission.event.settings.review_hide_speaker_names = True
response = review_client.get(submission.event.api_urls.speakers, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 1 # can see the slot's speaker, but not the other submissions'
@pytest.mark.django_db
def test_orga_can_see_all_speakers_even_nonpublic(
orga_client, slot, accepted_submission, rejected_submission, submission
):
submission.event.settings.set('show_schedule', False)
response = orga_client.get(submission.event.api_urls.speakers, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 2
@pytest.mark.django_db
def test_orga_speakers_with_multiple_talks_are_not_duplicated(
client, speaker, slot, other_slot, accepted_submission, other_accepted_submission
):
other_accepted_submission.speakers.add(accepted_submission.speakers.first())
response = client.get(accepted_submission.event.api_urls.speakers, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert content['count'] == 2
@pytest.mark.django_db
def test_anon_cannot_see_reviews(client, event, review):
response = client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content['results']) == 0, content
@pytest.mark.django_db
def test_orga_can_see_reviews(orga_client, event, review):
response = orga_client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content['results']) == 1
@pytest.mark.django_db
def test_reviewer_can_see_reviews(review_client, event, review, other_review):
response = review_client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content['results']) == 2, content
@pytest.mark.django_db
def test_reviewer_can_filter_by_submission(review_client, event, review, other_review):
response = review_client.get(
event.api_urls.reviews + f'?submission__code={review.submission.code}',
follow=True,
)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content['results']) == 1, content
@pytest.mark.django_db
def test_reviewer_cannot_see_review_to_own_talk(
review_user, review_client, event, review, other_review
):
other_review.submission.speakers.add(review_user)
response = review_client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content['results']) == 1, content
@pytest.mark.django_db
def test_everybody_can_see_rooms(client, room):
response = client.get(room.event.api_urls.rooms, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content['results']) == 1, content
assert 'speaker_info' not in content['results'][0]
@pytest.mark.django_db
def test_orga_can_see_room_speaker_info(orga_client, room):
response = orga_client.get(room.event.api_urls.rooms, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content['results']) == 1, content
assert 'speaker_info' in content['results'][0]
| 33.074257
| 94
| 0.729906
| 1,724
| 13,362
| 5.434455
| 0.067865
| 0.060839
| 0.073967
| 0.061479
| 0.871705
| 0.853133
| 0.846835
| 0.831465
| 0.805529
| 0.766784
| 0
| 0.012848
| 0.149529
| 13,362
| 403
| 95
| 33.156328
| 0.811598
| 0.004341
| 0
| 0.684887
| 0
| 0
| 0.051646
| 0.003157
| 0
| 0
| 0
| 0
| 0.289389
| 1
| 0.102894
| false
| 0
| 0.006431
| 0
| 0.109325
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f00237d5ff01654dba32165ed49a88a7a15a299
| 43,461
|
py
|
Python
|
action-server/tests/actions/test_daily_ci_enroll_form.py
|
dialoguemd/covidflow
|
b159b76dc68462f272614db4cbf716844872ebca
|
[
"MIT"
] | 7
|
2020-05-23T07:07:26.000Z
|
2021-11-29T05:58:51.000Z
|
action-server/tests/actions/test_daily_ci_enroll_form.py
|
dialoguemd/covidflow
|
b159b76dc68462f272614db4cbf716844872ebca
|
[
"MIT"
] | 210
|
2020-04-13T17:21:55.000Z
|
2021-04-20T15:46:26.000Z
|
action-server/tests/actions/test_daily_ci_enroll_form.py
|
dialoguemd/covidflow
|
b159b76dc68462f272614db4cbf716844872ebca
|
[
"MIT"
] | 3
|
2020-04-09T14:38:09.000Z
|
2020-07-29T15:06:11.000Z
|
from unittest.mock import MagicMock, patch
import pytest
from rasa_sdk.events import Form, SlotSet
from rasa_sdk.forms import REQUESTED_SLOT
from covidflow.actions.daily_ci_enroll_form import (
CODE_TRY_COUNTER_SLOT,
DO_ENROLL_SLOT,
FORM_NAME,
JUST_SENT_CODE_SLOT,
NO_CODE_SOLUTION_SLOT,
PHONE_TO_CHANGE_SLOT,
PHONE_TRY_COUNTER_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT,
VALIDATION_CODE_REFERENCE_SLOT,
VALIDATION_CODE_SLOT,
WANTS_CANCEL_SLOT,
DailyCiEnrollForm,
)
from covidflow.constants import (
FIRST_NAME_SLOT,
HAS_DIALOGUE_SLOT,
PHONE_NUMBER_SLOT,
PRECONDITIONS_SLOT,
)
from .form_test_helper import FormTestCase
FIRST_NAME = "John"
PHONE_NUMBER = "15141234567"
VALIDATION_CODE = "4567"
DOMAIN = {
"responses": {
"utter_ask_daily_ci_enroll__wants_cancel_error": [{"text": ""}],
"utter_ask_daily_ci_enroll__no_code_solution_error": [{"text": ""}],
"utter_ask_preconditions_error": [{"text": ""}],
"utter_ask_daily_ci_enroll__preconditions_examples_error": [{"text": ""}],
}
}
def AsyncMock(*args, **kwargs):
mock = MagicMock(*args, **kwargs)
async def mock_coroutine(*args, **kwargs):
return mock(*args, **kwargs)
mock_coroutine.mock = mock
return mock_coroutine
class TestDailyCiEnrollForm(FormTestCase):
def setUp(self):
super().setUp()
self.form = DailyCiEnrollForm()
def test_validate_first_name(self):
slot_mapping = self.form.slot_mappings()[FIRST_NAME_SLOT]
self.assertEqual(slot_mapping, self.form.from_text())
self._validate_first_name("john", "john")
self._validate_first_name("John", "John")
self._validate_first_name("john john", "john john")
# At the moment, we can't extract the name
self._validate_first_name("it's John!", "it's John!")
def _validate_first_name(self, text: str, expected_name: str):
slot_values = self.form.validate_first_name(text, self.dispatcher, None, None)
self.assertEqual({FIRST_NAME_SLOT: expected_name}, slot_values)
@pytest.mark.asyncio
async def test_validate_phone_number(self):
slot_mapping = self.form.slot_mappings()[PHONE_NUMBER_SLOT]
self.assertEqual(slot_mapping[-1], self.form.from_text())
await self._validate_phone_number("5145554567", "15145554567")
await self._validate_phone_number("15145554567", "15145554567")
await self._validate_phone_number("514-555-4567", "15145554567")
await self._validate_phone_number("1 (514)-555-4567", "15145554567")
await self._validate_phone_number("it's 514-555-4567!", "15145554567")
await self._validate_phone_number("it's 1 514 555 4567", "15145554567")
await self._validate_phone_number("145554567", None)
await self._validate_phone_number("25145554567", None)
async def _validate_phone_number(self, text: str, expected_phone_number: str):
tracker = self.create_tracker(
slots={VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE}
)
slot_values = await self.form.validate_phone_number(
text, self.dispatcher, tracker, None
)
self.assertEqual(
expected_phone_number, slot_values.get(PHONE_NUMBER_SLOT, None)
)
@pytest.mark.asyncio
async def test_validate_validation_code(self):
slot_mapping = self.form.slot_mappings()[VALIDATION_CODE_SLOT][-1]
self.assertEqual(slot_mapping, self.form.from_text())
await self._validate_validation_code("its 4567", "4567")
await self._validate_validation_code("4567", "4567")
await self._validate_validation_code("45678", None)
await self._validate_validation_code("514", None)
await self._validate_validation_code("4325", None)
async def _validate_validation_code(self, text: str, expected_validation_code: str):
tracker = self.create_tracker(
slots={VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE}
)
slot_values = await self.form.validate_daily_ci_enroll__validation_code(
text, self.dispatcher, tracker, None
)
self.assertEqual(
expected_validation_code, slot_values.get(VALIDATION_CODE_SLOT, None)
)
def test_form_activation(self):
tracker = self.create_tracker(active_form=False)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PRECONDITIONS_WITH_EXAMPLES_SLOT, "N/A"),
Form(FORM_NAME),
SlotSet(REQUESTED_SLOT, DO_ENROLL_SLOT),
]
)
self.assert_templates(
[
"utter_daily_ci_enroll__offer_checkin",
"utter_daily_ci_enroll__explain_checkin_1",
"utter_daily_ci_enroll__explain_checkin_2",
"utter_ask_daily_ci_enroll__do_enroll",
]
)
def test_provide_do_enroll_checkin_affirm(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: DO_ENROLL_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
},
intent="affirm",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[SlotSet(DO_ENROLL_SLOT, True), SlotSet(REQUESTED_SLOT, FIRST_NAME_SLOT),],
)
self.assert_templates(
["utter_daily_ci_enroll__start_enroll", "utter_ask_first_name",],
)
def test_provide_do_enroll_checkin_deny(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: DO_ENROLL_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
},
intent="deny",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(DO_ENROLL_SLOT, False),
Form(None),
SlotSet(REQUESTED_SLOT, None),
],
)
self.assert_templates([])
def test_provide_first_name(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: FIRST_NAME_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
},
text=FIRST_NAME,
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(FIRST_NAME_SLOT, FIRST_NAME),
SlotSet(REQUESTED_SLOT, PHONE_NUMBER_SLOT),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__thanks_first_name",
"utter_daily_ci_enroll__text_message_checkin",
"utter_ask_phone_number",
],
)
def test_provide_invalid_first_name(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: FIRST_NAME_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
},
text=" ",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[SlotSet(FIRST_NAME_SLOT, None), SlotSet(REQUESTED_SLOT, FIRST_NAME_SLOT),],
)
self.assert_templates(["utter_ask_first_name"])
@patch(
"covidflow.actions.daily_ci_enroll_form.send_validation_code",
new=AsyncMock(return_value=VALIDATION_CODE),
)
def test_provide_phone_number(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PHONE_NUMBER_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
},
text=PHONE_NUMBER,
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PHONE_NUMBER_SLOT, PHONE_NUMBER),
SlotSet(PHONE_TO_CHANGE_SLOT, False),
SlotSet(VALIDATION_CODE_REFERENCE_SLOT, VALIDATION_CODE),
SlotSet(JUST_SENT_CODE_SLOT, True),
SlotSet(REQUESTED_SLOT, VALIDATION_CODE_SLOT),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__acknowledge",
"utter_ask_daily_ci_enroll__validation_code",
]
)
@patch(
"covidflow.actions.daily_ci_enroll_form.send_validation_code",
new=AsyncMock(return_value=VALIDATION_CODE),
)
def test_provide_phone_number_after_change(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PHONE_NUMBER_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
JUST_SENT_CODE_SLOT: True,
CODE_TRY_COUNTER_SLOT: 1,
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
},
text=PHONE_NUMBER,
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PHONE_NUMBER_SLOT, PHONE_NUMBER),
SlotSet(PHONE_TO_CHANGE_SLOT, False),
SlotSet(VALIDATION_CODE_REFERENCE_SLOT, VALIDATION_CODE),
SlotSet(JUST_SENT_CODE_SLOT, True),
SlotSet(REQUESTED_SLOT, VALIDATION_CODE_SLOT),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__acknowledge",
"utter_ask_daily_ci_enroll__validation_code",
]
)
@patch(
"covidflow.actions.daily_ci_enroll_form.send_validation_code",
new=AsyncMock(return_value=None),
)
def test_provide_phone_number_sms_error(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PHONE_NUMBER_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
},
text=PHONE_NUMBER,
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PHONE_NUMBER_SLOT, PHONE_NUMBER),
SlotSet(PHONE_TO_CHANGE_SLOT, False),
SlotSet(DO_ENROLL_SLOT, False),
Form(None),
SlotSet(REQUESTED_SLOT, None),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__acknowledge",
"utter_daily_ci_enroll__validation_code_not_sent_1",
"utter_daily_ci_enroll__validation_code_not_sent_2",
"utter_daily_ci_enroll__continue",
]
)
def test_provide_first_invalid_phone_number(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PHONE_NUMBER_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
},
text=" ",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PHONE_NUMBER_SLOT, None),
SlotSet(PHONE_TO_CHANGE_SLOT, False),
SlotSet(PHONE_TRY_COUNTER_SLOT, 1),
SlotSet(REQUESTED_SLOT, PHONE_NUMBER_SLOT),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__invalid_phone_number",
"utter_ask_phone_number_error",
]
)
def test_provide_second_invalid_phone_number(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PHONE_NUMBER_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_TRY_COUNTER_SLOT: 1,
},
text=" ",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PHONE_NUMBER_SLOT, None),
SlotSet(PHONE_TO_CHANGE_SLOT, False),
SlotSet(PHONE_TRY_COUNTER_SLOT, 2),
SlotSet(REQUESTED_SLOT, PHONE_NUMBER_SLOT),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__invalid_phone_number",
"utter_ask_phone_number_error",
]
)
def test_provide_third_invalid_phone_number(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PHONE_NUMBER_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_TRY_COUNTER_SLOT: 2,
},
text=" ",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PHONE_NUMBER_SLOT, None),
SlotSet(PHONE_TO_CHANGE_SLOT, False),
SlotSet(DO_ENROLL_SLOT, False),
Form(None),
SlotSet(REQUESTED_SLOT, None),
],
)
self.assert_templates(["utter_daily_ci_enroll__invalid_phone_no_checkin"])
def test_provide_no_phone_number(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PHONE_NUMBER_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
},
intent="no_phone",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PHONE_NUMBER_SLOT, None),
SlotSet(PHONE_TO_CHANGE_SLOT, False),
SlotSet(DO_ENROLL_SLOT, False),
Form(None),
SlotSet(REQUESTED_SLOT, None),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__no_phone_no_checkin",
"utter_daily_ci_enroll__continue",
]
)
def test_provide_phone_number_cancel(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PHONE_NUMBER_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
},
intent="cancel",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PHONE_NUMBER_SLOT, None),
SlotSet(PHONE_TO_CHANGE_SLOT, False),
SlotSet(WANTS_CANCEL_SLOT, None),
SlotSet(REQUESTED_SLOT, WANTS_CANCEL_SLOT),
],
)
self.assert_templates(["utter_ask_daily_ci_enroll__wants_cancel"])
def test_provide_wants_cancel_affirm(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: WANTS_CANCEL_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
WANTS_CANCEL_SLOT: None,
},
intent="affirm",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(WANTS_CANCEL_SLOT, True),
SlotSet(DO_ENROLL_SLOT, False),
Form(None),
SlotSet(REQUESTED_SLOT, None),
],
)
self.assert_templates(["utter_daily_ci_enroll__no_problem_continue"])
def test_provide_wants_cancel_deny(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: WANTS_CANCEL_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
WANTS_CANCEL_SLOT: None,
},
intent="deny",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(WANTS_CANCEL_SLOT, False),
SlotSet(PHONE_TRY_COUNTER_SLOT, 1),
SlotSet(REQUESTED_SLOT, PHONE_NUMBER_SLOT),
],
)
self.assert_templates(
["utter_daily_ci_enroll__ok_continue", "utter_ask_phone_number_error"]
)
def test_provide_wants_cancel_error(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: WANTS_CANCEL_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
WANTS_CANCEL_SLOT: None,
},
intent="something_else",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(WANTS_CANCEL_SLOT, None),
SlotSet(REQUESTED_SLOT, WANTS_CANCEL_SLOT),
],
)
self.assert_templates(["utter_ask_daily_ci_enroll__wants_cancel_error"])
def test_provide_validation_code(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: VALIDATION_CODE_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
},
text=VALIDATION_CODE,
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(VALIDATION_CODE_SLOT, VALIDATION_CODE),
SlotSet(JUST_SENT_CODE_SLOT, False),
SlotSet(REQUESTED_SLOT, PRECONDITIONS_SLOT),
],
)
self.assert_templates(
["utter_daily_ci_enroll__thanks", "utter_ask_preconditions"]
)
def test_provide_first_invalid_validation_code(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: VALIDATION_CODE_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
},
text=" ",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(VALIDATION_CODE_SLOT, None),
SlotSet(CODE_TRY_COUNTER_SLOT, 1),
SlotSet(JUST_SENT_CODE_SLOT, False),
SlotSet(REQUESTED_SLOT, VALIDATION_CODE_SLOT),
],
)
self.assert_templates(["utter_ask_daily_ci_enroll__validation_code_error"])
def test_provide_second_invalid_validation_code(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: VALIDATION_CODE_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
CODE_TRY_COUNTER_SLOT: 1,
},
text=" ",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(VALIDATION_CODE_SLOT, None),
SlotSet(CODE_TRY_COUNTER_SLOT, 2),
SlotSet(JUST_SENT_CODE_SLOT, False),
SlotSet(REQUESTED_SLOT, VALIDATION_CODE_SLOT),
],
)
self.assert_templates(["utter_ask_daily_ci_enroll__validation_code_error"])
def test_provide_third_invalid_validation_code(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: VALIDATION_CODE_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
CODE_TRY_COUNTER_SLOT: 2,
},
text=" ",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(VALIDATION_CODE_SLOT, None),
SlotSet(DO_ENROLL_SLOT, False),
Form(None),
SlotSet(REQUESTED_SLOT, None),
],
)
self.assert_templates(["utter_daily_ci_enroll__invalid_phone_no_checkin"])
def test_provide_validation_code_change_phone(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: VALIDATION_CODE_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
},
intent="change_phone",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(VALIDATION_CODE_SLOT, None),
SlotSet(PHONE_NUMBER_SLOT, None),
SlotSet(PHONE_TO_CHANGE_SLOT, True),
SlotSet(REQUESTED_SLOT, PHONE_NUMBER_SLOT),
],
)
self.assert_templates(["utter_ask_phone_number_new"])
@patch(
"covidflow.actions.daily_ci_enroll_form.send_validation_code",
new=AsyncMock(return_value=VALIDATION_CODE),
)
def test_provide_validation_code_phone_number(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: VALIDATION_CODE_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
},
text=PHONE_NUMBER,
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(VALIDATION_CODE_SLOT, None),
SlotSet(PHONE_NUMBER_SLOT, PHONE_NUMBER),
SlotSet(PHONE_TO_CHANGE_SLOT, False),
SlotSet(VALIDATION_CODE_REFERENCE_SLOT, VALIDATION_CODE),
SlotSet(JUST_SENT_CODE_SLOT, True),
SlotSet(REQUESTED_SLOT, VALIDATION_CODE_SLOT),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__acknowledge_new_phone_number",
"utter_ask_daily_ci_enroll__validation_code",
]
)
@patch(
"covidflow.actions.daily_ci_enroll_form.send_validation_code",
new=AsyncMock(return_value=VALIDATION_CODE),
)
def test_provide_validation_code_change_phone_with_new(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: VALIDATION_CODE_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
},
intent="change_phone",
text=PHONE_NUMBER,
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(VALIDATION_CODE_SLOT, None),
SlotSet(PHONE_NUMBER_SLOT, PHONE_NUMBER),
SlotSet(PHONE_TO_CHANGE_SLOT, False),
SlotSet(VALIDATION_CODE_REFERENCE_SLOT, VALIDATION_CODE),
SlotSet(JUST_SENT_CODE_SLOT, True),
SlotSet(REQUESTED_SLOT, VALIDATION_CODE_SLOT),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__acknowledge_new_phone_number",
"utter_ask_daily_ci_enroll__validation_code",
]
)
@patch(
"covidflow.actions.daily_ci_enroll_form.send_validation_code",
new=AsyncMock(return_value=None),
)
def test_provide_validation_code_phone_number_sms_error(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: VALIDATION_CODE_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
},
text=PHONE_NUMBER,
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(VALIDATION_CODE_SLOT, None),
SlotSet(PHONE_NUMBER_SLOT, PHONE_NUMBER),
SlotSet(PHONE_TO_CHANGE_SLOT, False),
SlotSet(DO_ENROLL_SLOT, False),
Form(None),
SlotSet(REQUESTED_SLOT, None),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__acknowledge_new_phone_number",
"utter_daily_ci_enroll__validation_code_not_sent_1",
"utter_daily_ci_enroll__validation_code_not_sent_2",
"utter_daily_ci_enroll__continue",
]
)
def test_provide_validation_code_did_not_get_code_first_time(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: VALIDATION_CODE_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
},
intent="did_not_get_code",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(VALIDATION_CODE_SLOT, None),
SlotSet(CODE_TRY_COUNTER_SLOT, 1),
SlotSet(JUST_SENT_CODE_SLOT, False),
SlotSet(NO_CODE_SOLUTION_SLOT, None),
SlotSet(REQUESTED_SLOT, NO_CODE_SOLUTION_SLOT),
],
)
self.assert_templates(
["utter_ask_daily_ci_enroll__no_code_solution",]
)
def test_provide_validation_code_did_not_get_code_second_time(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: VALIDATION_CODE_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
CODE_TRY_COUNTER_SLOT: 1,
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
},
intent="did_not_get_code",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(VALIDATION_CODE_SLOT, None),
SlotSet(CODE_TRY_COUNTER_SLOT, 2),
SlotSet(JUST_SENT_CODE_SLOT, False),
SlotSet(NO_CODE_SOLUTION_SLOT, None),
SlotSet(REQUESTED_SLOT, NO_CODE_SOLUTION_SLOT),
],
)
self.assert_templates(
["utter_ask_daily_ci_enroll__no_code_solution",]
)
def test_provide_validation_code_did_not_get_code_third_time(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: VALIDATION_CODE_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
CODE_TRY_COUNTER_SLOT: 2,
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
},
intent="did_not_get_code",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(VALIDATION_CODE_SLOT, None),
SlotSet(DO_ENROLL_SLOT, False),
SlotSet(NO_CODE_SOLUTION_SLOT, None),
Form(None),
SlotSet(REQUESTED_SLOT, None),
],
)
self.assert_templates(
["utter_daily_ci_enroll__invalid_phone_no_checkin",]
)
@patch(
"covidflow.actions.daily_ci_enroll_form.send_validation_code",
new=AsyncMock(return_value=VALIDATION_CODE),
)
def test_provide_no_code_solution_new_code(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: NO_CODE_SOLUTION_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
CODE_TRY_COUNTER_SLOT: 1, # set when received did_not_get_code intent
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
},
intent="new_code",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(NO_CODE_SOLUTION_SLOT, "new_code"),
SlotSet(VALIDATION_CODE_REFERENCE_SLOT, VALIDATION_CODE),
SlotSet(JUST_SENT_CODE_SLOT, True),
SlotSet(REQUESTED_SLOT, VALIDATION_CODE_SLOT),
],
)
self.assert_templates(["utter_ask_daily_ci_enroll__validation_code"])
@patch(
"covidflow.actions.daily_ci_enroll_form.send_validation_code",
new=AsyncMock(return_value=None),
)
def test_provide_no_code_solution_new_code_sms_error(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: NO_CODE_SOLUTION_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
CODE_TRY_COUNTER_SLOT: 1,
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
},
intent="new_code",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(NO_CODE_SOLUTION_SLOT, "new_code"),
SlotSet(DO_ENROLL_SLOT, False),
Form(None),
SlotSet(REQUESTED_SLOT, None),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__validation_code_not_sent_1",
"utter_daily_ci_enroll__validation_code_not_sent_2",
"utter_daily_ci_enroll__continue",
]
)
def test_provide_no_code_solution_change_phone(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: NO_CODE_SOLUTION_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
},
intent="change_phone",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(NO_CODE_SOLUTION_SLOT, "change_phone"),
SlotSet(PHONE_NUMBER_SLOT, None),
SlotSet(PHONE_TO_CHANGE_SLOT, True),
SlotSet(REQUESTED_SLOT, PHONE_NUMBER_SLOT),
],
)
self.assert_templates(["utter_ask_phone_number_new"])
def test_provide_no_code_solution_error(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: NO_CODE_SOLUTION_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_REFERENCE_SLOT: VALIDATION_CODE,
},
intent="anything",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(NO_CODE_SOLUTION_SLOT, None),
SlotSet(REQUESTED_SLOT, NO_CODE_SOLUTION_SLOT),
],
)
self.assert_templates(["utter_ask_daily_ci_enroll__no_code_solution_error"])
def test_provide_preconditions_affirm(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PRECONDITIONS_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_SLOT: VALIDATION_CODE,
},
intent="affirm",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PRECONDITIONS_SLOT, True),
SlotSet(REQUESTED_SLOT, HAS_DIALOGUE_SLOT),
],
)
self.assert_templates(
["utter_daily_ci_enroll__acknowledge", "utter_ask_has_dialogue"]
)
def test_provide_preconditions_deny(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PRECONDITIONS_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_SLOT: VALIDATION_CODE,
},
intent="deny",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PRECONDITIONS_SLOT, False),
SlotSet(REQUESTED_SLOT, HAS_DIALOGUE_SLOT),
],
)
self.assert_templates(
["utter_daily_ci_enroll__acknowledge", "utter_ask_has_dialogue"],
)
def test_provide_preconditions_error(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PRECONDITIONS_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_SLOT: VALIDATION_CODE,
},
intent="other",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PRECONDITIONS_SLOT, None),
SlotSet(REQUESTED_SLOT, PRECONDITIONS_SLOT),
],
)
self.assert_templates(["utter_ask_preconditions_error"],)
def test_provide_preconditions_dont_know(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PRECONDITIONS_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_SLOT: VALIDATION_CODE,
},
intent="dont_know",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PRECONDITIONS_SLOT, None),
SlotSet(PRECONDITIONS_WITH_EXAMPLES_SLOT, None),
SlotSet(REQUESTED_SLOT, PRECONDITIONS_WITH_EXAMPLES_SLOT),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__explain_preconditions",
"utter_ask_daily_ci_enroll__preconditions_examples",
],
)
def test_provide_preconditions_help_preconditions(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PRECONDITIONS_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_SLOT: VALIDATION_CODE,
},
intent="help_preconditions",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PRECONDITIONS_SLOT, None),
SlotSet(PRECONDITIONS_WITH_EXAMPLES_SLOT, None),
SlotSet(REQUESTED_SLOT, PRECONDITIONS_WITH_EXAMPLES_SLOT),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__explain_preconditions",
"utter_ask_daily_ci_enroll__preconditions_examples",
],
)
def test_provide_preconditions_with_examples_affirm(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PRECONDITIONS_WITH_EXAMPLES_SLOT,
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_SLOT: VALIDATION_CODE,
},
intent="affirm",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PRECONDITIONS_WITH_EXAMPLES_SLOT, True),
SlotSet(PRECONDITIONS_SLOT, True),
SlotSet(REQUESTED_SLOT, HAS_DIALOGUE_SLOT),
],
)
self.assert_templates(
["utter_daily_ci_enroll__acknowledge", "utter_ask_has_dialogue"]
)
def test_provide_preconditions_with_examples_deny(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PRECONDITIONS_WITH_EXAMPLES_SLOT,
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_SLOT: VALIDATION_CODE,
},
intent="deny",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PRECONDITIONS_WITH_EXAMPLES_SLOT, False),
SlotSet(PRECONDITIONS_SLOT, False),
SlotSet(REQUESTED_SLOT, HAS_DIALOGUE_SLOT),
],
)
self.assert_templates(
["utter_daily_ci_enroll__acknowledge", "utter_ask_has_dialogue"],
)
def test_provide_preconditions_with_examples_dont_know(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PRECONDITIONS_WITH_EXAMPLES_SLOT,
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_SLOT: VALIDATION_CODE,
},
intent="dont_know",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PRECONDITIONS_WITH_EXAMPLES_SLOT, "dont_know"),
SlotSet(PRECONDITIONS_SLOT, True),
SlotSet(REQUESTED_SLOT, HAS_DIALOGUE_SLOT),
],
)
self.assert_templates(
["utter_daily_ci_enroll__note_preconditions", "utter_ask_has_dialogue"],
)
def test_provide_preconditions_with_examples_error(self):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: PRECONDITIONS_WITH_EXAMPLES_SLOT,
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_SLOT: VALIDATION_CODE,
},
intent="other",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(PRECONDITIONS_WITH_EXAMPLES_SLOT, None),
SlotSet(REQUESTED_SLOT, PRECONDITIONS_WITH_EXAMPLES_SLOT),
],
)
self.assert_templates(
["utter_ask_daily_ci_enroll__preconditions_examples_error"],
)
@patch("covidflow.actions.daily_ci_enroll_form.ci_enroll")
def test_provide_has_dialogue_affirm(self, mock_ci_enroll):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: HAS_DIALOGUE_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_SLOT: VALIDATION_CODE,
PRECONDITIONS_SLOT: True,
},
intent="affirm",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(HAS_DIALOGUE_SLOT, True),
Form(None),
SlotSet(REQUESTED_SLOT, None),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__enroll_done_1",
"utter_daily_ci_enroll__enroll_done_2",
"utter_daily_ci_enroll__enroll_done_3",
]
)
@patch("covidflow.actions.daily_ci_enroll_form.ci_enroll")
def test_provide_has_dialogue_deny(self, mock_ci_enroll):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: HAS_DIALOGUE_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_SLOT: VALIDATION_CODE,
PRECONDITIONS_SLOT: True,
},
intent="deny",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(HAS_DIALOGUE_SLOT, False),
Form(None),
SlotSet(REQUESTED_SLOT, None),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__enroll_done_1",
"utter_daily_ci_enroll__enroll_done_2",
"utter_daily_ci_enroll__enroll_done_3",
],
)
@patch("covidflow.actions.daily_ci_enroll_form.ci_enroll", side_effect=Exception)
def test_provide_has_dialogue_enrollment_failed(self, mock_ci_enroll):
tracker = self.create_tracker(
slots={
REQUESTED_SLOT: HAS_DIALOGUE_SLOT,
PRECONDITIONS_WITH_EXAMPLES_SLOT: "N/A",
DO_ENROLL_SLOT: True,
FIRST_NAME_SLOT: FIRST_NAME,
PHONE_NUMBER_SLOT: PHONE_NUMBER,
VALIDATION_CODE_SLOT: VALIDATION_CODE,
PRECONDITIONS_SLOT: True,
},
intent="affirm",
)
self.run_form(tracker, DOMAIN)
self.assert_events(
[
SlotSet(HAS_DIALOGUE_SLOT, True),
Form(None),
SlotSet(REQUESTED_SLOT, None),
],
)
self.assert_templates(
[
"utter_daily_ci_enroll__enroll_fail_1",
"utter_daily_ci_enroll__enroll_fail_2",
"utter_daily_ci_enroll__enroll_fail_3",
]
)
mock_ci_enroll.assert_called()
| 32.289004
| 88
| 0.575228
| 4,312
| 43,461
| 5.286642
| 0.036874
| 0.092736
| 0.045052
| 0.067424
| 0.9206
| 0.897833
| 0.878137
| 0.860414
| 0.837779
| 0.823127
| 0
| 0.00803
| 0.349555
| 43,461
| 1,345
| 89
| 32.313011
| 0.798366
| 0.001887
| 0
| 0.63549
| 0
| 0
| 0.102338
| 0.085255
| 0
| 0
| 0
| 0
| 0.081294
| 1
| 0.041084
| false
| 0
| 0.006119
| 0
| 0.049825
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f0921ee7873a4f3165dfb865e16b2ca70b1351a
| 14,452
|
py
|
Python
|
castoredc_api/tests/test_import/test_async_import/test_import_report_async.py
|
reiniervlinschoten/castoredc_api
|
54a71606fa681a05e795e42a37d4b4f58b97e787
|
[
"MIT"
] | 1
|
2022-02-07T17:49:31.000Z
|
2022-02-07T17:49:31.000Z
|
castoredc_api/tests/test_import/test_async_import/test_import_report_async.py
|
reiniervlinschoten/castoredc_api
|
54a71606fa681a05e795e42a37d4b4f58b97e787
|
[
"MIT"
] | 48
|
2021-08-05T15:20:27.000Z
|
2022-03-28T14:49:25.000Z
|
castoredc_api/tests/test_import/test_async_import/test_import_report_async.py
|
reiniervlinschoten/castoredc_api
|
54a71606fa681a05e795e42a37d4b4f58b97e787
|
[
"MIT"
] | 1
|
2021-08-06T07:06:37.000Z
|
2021-08-06T07:06:37.000Z
|
import pytest
from castoredc_api import CastorException
from castoredc_api.importer.import_data import import_data
class TestImportReportAsync:
"""Tests uploading data to Castor."""
def test_import_report_value_success(self, import_study):
"""Tests if uploading value data is successful"""
imported_data = import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_report_medication_values.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/report_link_file.xlsx",
study=import_study,
label_data=False,
target="Report",
target_name="Medication",
use_async=True,
)
assert imported_data == self.report_success
def test_import_report_label_success(self, import_study):
"""Tests if uploading label data is successful"""
imported_data = import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_report_medication_labels.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/report_link_file.xlsx",
study=import_study,
label_data=True,
target="Report",
target_name="Medication",
use_async=True,
)
assert imported_data == self.report_success
def test_import_report_bulk_success(self, import_study):
"""Tests if uploading label data in bulk is successful"""
imported_data = import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_report_medication_labels_bulk.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/report_link_file.xlsx",
study=import_study,
label_data=True,
target="Report",
target_name="Medication",
use_async=True,
)
for record in imported_data:
for item in imported_data[record]:
assert item in self.report_success_bulk[record]
def test_import_report_more_than_connections_success(self, import_study):
"""Tests if uploading label data is successful when uploading more than max_connections"""
imported_data = import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_report_medication_labels_bulk_large.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/report_link_file.xlsx",
study=import_study,
label_data=True,
target="Report",
target_name="Medication",
use_async=True,
)
assert imported_data == self.report_more_than_connections
def test_import_report_value_missing(self, import_study):
"""Tests if uploading value data with missings is successful"""
imported_data = import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_report_medication_values_missings.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/report_link_file.xlsx",
study=import_study,
label_data=False,
target="Report",
target_name="Medication",
use_async=True,
)
assert imported_data == self.report_missing
def test_import_report_label_missing(self, import_study):
"""Tests if uploading label data with missings is successful"""
imported_data = import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_report_medication_labels_missings.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/report_link_file.xlsx",
study=import_study,
label_data=True,
target="Report",
target_name="Medication",
use_async=True,
)
assert imported_data == self.report_missing
def test_import_report_value_error(self, import_study):
"""Tests if uploading value data with errors is successful"""
with pytest.raises(CastorException) as e:
import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_report_medication_values_errors.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/report_link_file.xlsx",
study=import_study,
label_data=False,
target="Report",
target_name="Medication",
use_async=True,
)
assert str(e.value) == self.report_error
def test_import_report_label_error(self, import_study):
"""Tests if uploading label data with errors is successful"""
with pytest.raises(CastorException) as e:
import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_report_medication_labels_errors.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/report_link_file.xlsx",
study=import_study,
label_data=True,
target="Report",
target_name="Medication",
use_async=True,
)
assert str(e.value) == self.report_error
def test_import_report_error_during_upload(self, import_study):
"""Tests if uploading data with an error during the upload process fails properly"""
with pytest.raises(CastorException) as e:
import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_report_medication_values_errors_upload.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/report_link_file.xlsx",
study=import_study,
label_data=False,
target="Report",
target_name="Medication",
use_async=True,
)
assert str(e.value) == self.report_error
def test_import_report_error_during_upload_failed_field(self, import_study):
"""Tests if uploading data with an error during the upload process fails properly"""
imported = import_data(
data_source_path="tests/test_import/data_files_for_import_tests/data_file_report_medication_labels_nonexistent_field.xlsx",
column_link_path="tests/test_import/link_files_for_import_tests/report_link_file_nonexistent_field.xlsx",
study=import_study,
label_data=True,
target="Report",
target_name="Medication",
use_async=True,
)
assert imported == self.report_error_wrong_field
report_success = {
"110001": [
{
"success": {
"med_name": "Azathioprine",
"med_start": "05-12-2019",
"med_stop": "05-12-2020",
"med_dose": "0.05",
"med_units": "3",
},
"failed": {},
}
],
"110002": [
{
"success": {
"med_name": "Vedolizumab",
"med_start": "17-08-2018",
"med_stop": "17-09-2020",
"med_dose": "300",
"med_units": "7",
"med_other_unit": "mg/4 weeks",
},
"failed": {},
}
],
"110003": [
{
"success": {
"med_name": "Ustekinumab",
"med_start": "19-12-2017",
"med_stop": "03-06-2019",
"med_dose": "90",
"med_units": "7",
"med_other_unit": "mg/8 weeks",
},
"failed": {},
}
],
"110004": [
{
"success": {
"med_name": "Thioguanine",
"med_start": "25-04-2020",
"med_stop": "27-05-2021",
"med_dose": "15",
"med_units": "2",
},
"failed": {},
}
],
"110005": [
{
"success": {
"med_name": "Tofacitinib",
"med_start": "01-03-2020",
"med_stop": "31-12-2999",
"med_dose": "10",
"med_units": "2",
},
"failed": {},
}
],
}
report_missing = {
"110001": [
{
"success": {
"med_name": "Azathioprine",
"med_start": "05-12-2019",
"med_stop": "05-12-2020",
"med_dose": "0.05",
"med_units": "3",
},
"failed": {},
}
],
"110002": [{"success": {"med_start": "17-08-2018"}, "failed": {}}],
"110003": [
{
"success": {
"med_start": "19-12-2017",
"med_stop": "03-06-2019",
"med_dose": "90",
"med_units": "7",
"med_other_unit": "mg/8 weeks",
},
"failed": {},
}
],
"110004": [
{"success": {"med_name": "Thioguanine", "med_units": "2"}, "failed": {}}
],
"110005": [
{
"success": {
"med_name": "Tofacitinib",
"med_start": "01-03-2020",
"med_stop": "31-12-2999",
"med_dose": "10",
},
"failed": {},
}
],
}
report_success_bulk = {
"110001": [
{
"success": {
"med_name": "Azathioprine",
"med_start": "05-12-2019",
"med_stop": "05-12-2020",
"med_dose": "0.05",
"med_units": "3",
},
"failed": {},
},
{
"success": {
"med_name": "Vedolizumab",
"med_start": "17-08-2018",
"med_stop": "17-09-2020",
"med_dose": "300",
"med_units": "7",
"med_other_unit": "mg/4 weeks",
},
"failed": {},
},
{
"success": {
"med_name": "Ustekinumab",
"med_start": "19-12-2017",
"med_stop": "03-06-2019",
"med_dose": "90",
"med_units": "7",
"med_other_unit": "mg/8 weeks",
},
"failed": {},
},
],
"110002": [
{
"success": {
"med_name": "Thioguanine",
"med_start": "25-04-2020",
"med_stop": "27-05-2021",
"med_dose": "15",
"med_units": "2",
},
"failed": {},
},
{
"success": {
"med_name": "Tofacitinib",
"med_start": "01-03-2020",
"med_stop": "31-12-2999",
"med_dose": "10",
"med_units": "2",
},
"failed": {},
},
],
}
report_error = (
"Non-viable data found in dataset to be imported. See output folder for details"
)
report_error_wrong_field = {
"110001": [
{
"success": {
"med_name": "Azathioprine",
"med_start": "05-12-2019",
"med_stop": "05-12-2020",
"med_dose": "0.05",
"med_units": "3",
},
"failed": {"pat_sex": ["BAD_REQUEST", "Unsupported field type"]},
}
],
"110002": [
{
"success": {
"med_name": "Vedolizumab",
"med_start": "17-08-2018",
"med_stop": "17-09-2020",
"med_dose": "300",
"med_units": "7",
"med_other_unit": "mg/4 weeks",
},
"failed": {"pat_sex": ["BAD_REQUEST", "Unsupported field type"]},
}
],
"110003": [
{
"success": {
"med_name": "Ustekinumab",
"med_start": "19-12-2017",
"med_stop": "03-06-2019",
"med_dose": "90",
"med_units": "7",
"med_other_unit": "mg/8 weeks",
},
"failed": {"pat_sex": ["BAD_REQUEST", "Unsupported field type"]},
}
],
"110004": [
{
"success": {
"med_name": "Thioguanine",
"med_start": "25-04-2020",
"med_stop": "27-05-2021",
"med_dose": "15",
"med_units": "2",
},
"failed": {"pat_sex": ["BAD_REQUEST", "Unsupported field type"]},
}
],
"110005": [
{
"success": {
"med_name": "Tofacitinib",
"med_start": "01-03-2020",
"med_stop": "31-12-2999",
"med_dose": "10",
"med_units": "2",
},
"failed": {"pat_sex": ["BAD_REQUEST", "Unsupported field type"]},
}
],
}
report_more_than_connections = {
"110006": [
{
"success": {
"med_name": "Azathioprine",
"med_start": "05-12-2019",
"med_stop": "05-12-2020",
"med_dose": "0.05",
"med_units": "3",
},
"failed": {},
}
for i in range(39)
]
}
| 35.772277
| 135
| 0.476266
| 1,364
| 14,452
| 4.692082
| 0.103372
| 0.046875
| 0.040625
| 0.059375
| 0.902344
| 0.88875
| 0.88875
| 0.883906
| 0.872656
| 0.831563
| 0
| 0.057717
| 0.407764
| 14,452
| 403
| 136
| 35.861042
| 0.690034
| 0.044423
| 0
| 0.681319
| 0
| 0
| 0.297077
| 0.117639
| 0
| 0
| 0
| 0
| 0.027473
| 1
| 0.027473
| false
| 0
| 0.173077
| 0
| 0.21978
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f73ef8b45320b7f555fe98e8434d7029128b373
| 93
|
py
|
Python
|
src/__init__.py
|
kirbiyik/generate-any-text
|
7f9d78e439e23f99be34681268c052f7f6df9fdb
|
[
"MIT"
] | 11
|
2019-07-27T04:42:17.000Z
|
2020-11-15T21:55:40.000Z
|
src/__init__.py
|
kirbiyik/generate-any-text
|
7f9d78e439e23f99be34681268c052f7f6df9fdb
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
kirbiyik/generate-any-text
|
7f9d78e439e23f99be34681268c052f7f6df9fdb
|
[
"MIT"
] | 1
|
2019-07-27T14:02:40.000Z
|
2019-07-27T14:02:40.000Z
|
from .layers import *
from .model import *
from .optimizer import *
from .dataloader import *
| 23.25
| 25
| 0.752688
| 12
| 93
| 5.833333
| 0.5
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 93
| 4
| 25
| 23.25
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
488b0f5283d971475c4ebbcb495d025d65a798f0
| 178
|
py
|
Python
|
test_inputs/tests.py
|
gribbg/x7-testing
|
c062c430ff1c31e943f04f152c9c88c179f5085b
|
[
"BSD-2-Clause"
] | null | null | null |
test_inputs/tests.py
|
gribbg/x7-testing
|
c062c430ff1c31e943f04f152c9c88c179f5085b
|
[
"BSD-2-Clause"
] | null | null | null |
test_inputs/tests.py
|
gribbg/x7-testing
|
c062c430ff1c31e943f04f152c9c88c179f5085b
|
[
"BSD-2-Clause"
] | null | null | null |
"""
This file will generate output that will be in error because the
name of this module (tests) clashes with the @tests annotation.
"""
def this_wont_work():
pass
| 19.777778
| 68
| 0.696629
| 27
| 178
| 4.518519
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230337
| 178
| 8
| 69
| 22.25
| 0.890511
| 0.719101
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
488bb55325651b61fca0dc5655afb0eda4f622c8
| 26,437
|
py
|
Python
|
server/tracebin_server/traces/fixture_gen.py
|
alex/tracebin
|
cd9717e750718dcb94140f60957c3f543915317f
|
[
"BSD-3-Clause"
] | 3
|
2015-11-08T12:45:45.000Z
|
2019-05-09T20:03:57.000Z
|
server/tracebin_server/traces/fixture_gen.py
|
alex/tracebin
|
cd9717e750718dcb94140f60957c3f543915317f
|
[
"BSD-3-Clause"
] | null | null | null |
server/tracebin_server/traces/fixture_gen.py
|
alex/tracebin
|
cd9717e750718dcb94140f60957c3f543915317f
|
[
"BSD-3-Clause"
] | null | null | null |
from textwrap import dedent
from fixture_generator import fixture_generator
from .models import (Log, RuntimeEnviroment, TimelineEvent, StatCounter,
BaseTrace, PythonTrace, RegexTrace, NumPyPyTrace, TraceSection, TraceChunk,
PythonChunk, ResOpChunk)
@fixture_generator(
Log, RuntimeEnviroment, TimelineEvent, StatCounter, BaseTrace, PythonTrace,
RegexTrace, NumPyPyTrace, TraceSection, TraceChunk, PythonChunk, ResOpChunk
)
def demo_data():
log = Log.objects.create(
uploader=None, public=True, command="pypy test.py", runtime=9.8,
)
for kind, key, value in [
(RuntimeEnviroment.JIT_OPTION, "trace_limit", "6000"),
(RuntimeEnviroment.JIT_OPTION, "loop_longevity", "1000"),
(RuntimeEnviroment.JIT_OPTION, "retrace_limit", "5"),
(RuntimeEnviroment.JIT_OPTION, "trace_eagerness", "200"),
(RuntimeEnviroment.JIT_OPTION, "enable_opts", "all"),
(RuntimeEnviroment.JIT_OPTION, "max_retrace_guards", "15"),
(RuntimeEnviroment.JIT_OPTION, "treshold", "1039"),
(RuntimeEnviroment.JIT_OPTION, "function_threshold", "1619"),
(RuntimeEnviroment.JIT_OPTION, "inlining", "1"),
(RuntimeEnviroment.GC_OPTION, "PYPY_GC_NURSERY", "4MB"),
(RuntimeEnviroment.GC_OPTION, "PYPY_GC_MAJOR_COLLECT", "1.82"),
(RuntimeEnviroment.GC_OPTION, "PYPY_GC_GROWTH", "1.4"),
(RuntimeEnviroment.BUILD_OPTION, "PyPy Version", "c2d42bf471da"),
(RuntimeEnviroment.BUILD_OPTION, "GC root finder", "asmgcc"),
(RuntimeEnviroment.BUILD_OPTION, "Garbage collector", "minimark"),
]:
log.enviroment_options.create(kind=kind, key=key, value=value)
# For now we just care about the percent of total time, so this idiotic
# representation is fine, these add up to 100.
start_time = 0
for event_type, duration in [
("jit-running", 65),
("gc-major", 12),
("jit-tracing", 8),
("gc-mior", 8),
("jit-backend-compile", 7),
]:
log.timeline_events.create(
event_type=event_type,
start_time=start_time,
end_time=start_time + duration
)
start_time += duration
for label, value in [
("traces_compiled", 3),
("traces_aborted", 0),
("gc_major", 12),
("gc_minor", 37),
]:
log.counters.create(label=label, count=value)
py_trace = PythonTrace.objects.create(
log=log, root_file="test.py", root_function="main",
)
RegexTrace.objects.create(
log=log, pattern=r"\w+",
)
NumPyPyTrace.objects.create(
log=log, debug_repr="Call1(sin, Call2(multiply, Array, Scalar))",
)
entry = py_trace.sections.create(label=TraceSection.ENTRY)
ResOpChunk.objects.create(
section=entry,
ordering=0,
raw_source=dedent("""
[p0, p1]
p2 = getfield_gc(p0, descr=<FieldP pypy.interpreter.pyframe.PyFrame.inst_last_exception 80>)
p3 = getfield_gc(p0, descr=<FieldP pypy.interpreter.pyframe.PyFrame.inst_pycode 120>)
i4 = getfield_gc(p0, descr=<FieldU pypy.interpreter.pyframe.PyFrame.inst_is_being_profiled 150>)
p5 = getfield_gc(p0, descr=<FieldP pypy.interpreter.pyframe.PyFrame.inst_lastblock 96>)
i6 = getfield_gc(p0, descr=<FieldS pypy.interpreter.pyframe.PyFrame.inst_valuestackdepth 128>)
i7 = getfield_gc(p0, descr=<FieldS pypy.interpreter.pyframe.PyFrame.inst_last_instr 88>)
p8 = getfield_gc(p0, descr=<FieldP pypy.interpreter.pyframe.PyFrame.inst_locals_stack_w 104>)
p10 = getarrayitem_gc(p8, 0, descr=<ArrayP 8>)
p12 = getarrayitem_gc(p8, 1, descr=<ArrayP 8>)
p14 = getarrayitem_gc(p8, 2, descr=<ArrayP 8>)
p16 = getarrayitem_gc(p8, 3, descr=<ArrayP 8>)
p18 = getarrayitem_gc(p8, 4, descr=<ArrayP 8>)
p20 = getarrayitem_gc(p8, 5, descr=<ArrayP 8>)
p22 = getarrayitem_gc(p8, 6, descr=<ArrayP 8>)
p23 = getfield_gc(p0, descr=<FieldP pypy.interpreter.pyframe.PyFrame.inst_cells 40>)
"""),
)
preamble = py_trace.sections.create(label=TraceSection.PREAMBLE)
ResOpChunk.objects.create(
section=preamble,
ordering=0,
raw_source=dedent("""
label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, p16, p18, p20, p22, descr=TargetToken(140048017138976))
"""),
)
PythonChunk.objects.create(
section=preamble,
ordering=1,
start_line=3,
end_line=6,
raw_source=dedent("""
def main():
data = [0] * N
for i in xrange(N):
"""),
)
ResOpChunk.objects.create(
section=preamble,
ordering=2,
raw_source=dedent("""
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #26 FOR_ITER')
guard_value(i6, 4, descr=<Guard4>) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14, p16, p18, p20, p22]
guard_class(p16, 38449928, descr=<Guard5>) [p1, p0, p16, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22]
i26 = getfield_gc(p16, descr=<FieldS pypy.module.__builtin__.functional.W_XRangeIterator.inst_remaining 16>)
i28 = int_gt(i26, 0)
guard_true(i28, descr=<Guard6>) [p1, p0, p16, p2, p3, i4, p5, p10, p12, p14, p18, p20, p22]
i29 = getfield_gc(p16, descr=<FieldS pypy.module.__builtin__.functional.W_XRangeIterator.inst_current 8>)
i30 = getfield_gc(p16, descr=<FieldS pypy.module.__builtin__.functional.W_XRangeIterator.inst_step 24>)
i31 = int_add(i29, i30)
i33 = int_sub(i26, 1)
setfield_gc(p16, i31, descr=<FieldS pypy.module.__builtin__.functional.W_XRangeIterator.inst_current 8>)
setfield_gc(p16, i33, descr=<FieldS pypy.module.__builtin__.functional.W_XRangeIterator.inst_remaining 16>)
guard_value(i4, 0, descr=<Guard7>) [i4, p1, p0, p2, p3, p5, p10, p12, p14, p16, p20, p22, i29]
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #29 STORE_FAST')
""")
)
PythonChunk.objects.create(
section=preamble,
ordering=3,
start_line=6,
end_line=7,
raw_source=""" x = i ^ 3"""
)
ResOpChunk.objects.create(
section=preamble,
ordering=4,
raw_source=dedent("""
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #32 LOAD_FAST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #35 LOAD_CONST')
guard_value(p3, ConstPtr(ptr35), descr=<Guard8>) [p1, p0, p3, p2, p5, p10, p14, p16, p20, p22, i29]
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #38 BINARY_XOR')
i37 = int_xor(i29, 3)
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #39 STORE_FAST')
"""),
)
PythonChunk.objects.create(
section=preamble,
ordering=5,
start_line=7,
end_line=8,
raw_source=""" x <<= 2"""
)
ResOpChunk.objects.create(
section=preamble,
ordering=6,
raw_source=dedent("""
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #42 LOAD_FAST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #45 LOAD_CONST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #48 INPLACE_LSHIFT')
i39 = int_lshift(i37, 2)
i40 = int_rshift(i39, 2)
i41 = int_ne(i40, i37)
guard_false(i41, descr=<Guard9>) [p1, p0, i39, p2, p5, p10, p16, p22, i37, i29]
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #49 STORE_FAST')
"""),
)
PythonChunk.objects.create(
section=preamble,
ordering=7,
start_line=8,
end_line=9,
raw_source=""" x *= 7""",
)
ResOpChunk.objects.create(
section=preamble,
ordering=8,
raw_source=dedent("""
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #52 LOAD_FAST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #55 LOAD_CONST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #58 INPLACE_MULTIPLY')
i43 = int_mul_ovf(i39, 7)
guard_no_overflow(, descr=<Guard10>) [p1, p0, i43, p2, p5, p10, p16, p22, i39, None, i29]
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #59 STORE_FAST')
""")
)
PythonChunk.objects.create(
section=preamble,
ordering=9,
start_line=9,
end_line=10,
raw_source=""" x -= 1""",
)
ResOpChunk.objects.create(
section=preamble,
ordering=10,
raw_source=dedent("""
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #62 LOAD_FAST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #65 LOAD_CONST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #68 INPLACE_SUBTRACT')
i46 = int_sub_ovf(i43, 1)
guard_no_overflow(, descr=<Guard11>) [p1, p0, i46, p2, p5, p10, p16, p22, i43, None, None, i29]
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #69 STORE_FAST')
""")
)
PythonChunk.objects.create(
section=preamble,
ordering=11,
start_line=10,
end_line=11,
raw_source=""" data[i] = x""",
)
ResOpChunk.objects.create(
section=preamble,
ordering=12,
raw_source=dedent("""
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #72 LOAD_FAST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #75 LOAD_FAST')
guard_nonnull_class(p10, ConstClass(W_ListObject), descr=<Guard12>) [p1, p0, p10, p2, p5, p16, p22, i46, None, None, None, i29]
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #78 LOAD_FAST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #81 STORE_SUBSCR')
p48 = getfield_gc(p10, descr=<FieldP pypy.objspace.std.listobject.W_ListObject.inst_strategy 16>)
guard_class(p48, 38554720, descr=<Guard13>) [p1, p0, p10, i29, p48, p2, p5, p16, i46, None, None, None, None]
p50 = getfield_gc(p10, descr=<FieldP pypy.objspace.std.listobject.W_ListObject.inst_lstorage 8>)
i51 = getfield_gc(p50, descr=<FieldS list.length 8>)
i52 = uint_ge(i29, i51)
guard_false(i52, descr=<Guard14>) [p1, p0, p10, i51, i46, i29, p50, p2, p5, p16, None, None, None, None, None]
p53 = getfield_gc(p50, descr=<FieldP list.items 16>)
setarrayitem_gc(p53, i29, i46, descr=<ArrayS 8>)
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #82 JUMP_ABSOLUTE')
guard_not_invalidated(, descr=<Guard15>) [p1, p0, p2, p5, p10, p16, i46, None, None, None, i29]
i55 = getfield_raw(43922552, descr=<FieldS pypysig_long_struct.c_value 0>)
i57 = int_lt(i55, 0)
guard_false(i57, descr=<Guard16>) [p1, p0, p2, p5, p10, p16, i46, None, None, None, i29]
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #26 FOR_ITER')
i58 = same_as(i46)
i59 = same_as(i31)
""")
)
loop = py_trace.sections.create(label=TraceSection.LOOP_BODY)
ResOpChunk.objects.create(
section=loop,
ordering=0,
raw_source=dedent("""
label(p0, p1, p2, p5, p10, i29, i46, p16, i33, i59, i30, p50, descr=TargetToken(140048017139056))
""")
)
PythonChunk.objects.create(
section=loop,
ordering=1,
start_line=3,
end_line=6,
raw_source=dedent("""
def main():
data = [0] * N
for i in xrange(N):
"""),
)
ResOpChunk.objects.create(
section=loop,
ordering=2,
raw_source=dedent("""
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #26 FOR_ITER')
i60 = int_gt(i33, 0)
guard_true(i60, descr=<Guard17>) [p1, p0, p16, p2, p5, p10, i29, i46]
i61 = int_add(i59, i30)
i62 = int_sub(i33, 1)
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #29 STORE_FAST')
""")
)
PythonChunk.objects.create(
section=loop,
ordering=3,
start_line=6,
end_line=7,
raw_source=""" x = i ^ 3""",
)
ResOpChunk.objects.create(
section=loop,
ordering=4,
raw_source=dedent("""
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #32 LOAD_FAST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #35 LOAD_CONST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #38 BINARY_XOR')
i63 = int_xor(i59, 3)
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #39 STORE_FAST')
""")
)
PythonChunk.objects.create(
section=loop,
ordering=5,
start_line=7,
end_line=8,
raw_source=""" x <<= 2""",
)
ResOpChunk.objects.create(
section=loop,
ordering=6,
raw_source=dedent("""
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #42 LOAD_FAST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #45 LOAD_CONST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #48 INPLACE_LSHIFT')
i64 = int_lshift(i63, 2)
i65 = int_rshift(i64, 2)
setfield_gc(p16, i61, descr=<FieldS pypy.module.__builtin__.functional.W_XRangeIterator.inst_current 8>)
setfield_gc(p16, i62, descr=<FieldS pypy.module.__builtin__.functional.W_XRangeIterator.inst_remaining 16>)
i66 = int_ne(i65, i63)
guard_false(i66, descr=<Guard18>) [p1, p0, i64, p2, p5, p10, p16, i63, i59, None, None]
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #49 STORE_FAST')
"""),
)
PythonChunk.objects.create(
section=loop,
ordering=7,
start_line=8,
end_line=9,
raw_source=""" x *= 7""",
)
ResOpChunk.objects.create(
section=loop,
ordering=8,
raw_source=dedent("""
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #52 LOAD_FAST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #55 LOAD_CONST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #58 INPLACE_MULTIPLY')
i67 = int_mul_ovf(i64, 7)
guard_no_overflow(, descr=<Guard19>) [p1, p0, i67, p2, p5, p10, p16, i64, None, i59, None, None]
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #59 STORE_FAST')
""")
)
PythonChunk.objects.create(
section=loop,
ordering=9,
start_line=9,
end_line=10,
raw_source=""" x -= 1""",
)
ResOpChunk.objects.create(
section=loop,
ordering=10,
raw_source=dedent("""
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #62 LOAD_FAST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #65 LOAD_CONST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #68 INPLACE_SUBTRACT')
i68 = int_sub_ovf(i67, 1)
guard_no_overflow(, descr=<Guard20>) [p1, p0, i68, p2, p5, p10, p16, i67, None, None, i59, None, None]
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #69 STORE_FAST')
"""),
)
PythonChunk.objects.create(
section=loop,
ordering=11,
start_line=10,
end_line=11,
raw_source=""" data[i] = x""",
)
ResOpChunk.objects.create(
section=loop,
ordering=12,
raw_source=dedent("""
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #72 LOAD_FAST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #75 LOAD_FAST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #78 LOAD_FAST')
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #81 STORE_SUBSCR')
i69 = getfield_gc(p50, descr=<FieldS list.length 8>)
i70 = uint_ge(i59, i69)
guard_false(i70, descr=<Guard21>) [p1, p0, p10, i69, i68, i59, p50, p2, p5, p16, None, None, None, None, None, None]
p71 = getfield_gc(p50, descr=<FieldP list.items 16>)
setarrayitem_gc(p71, i59, i68, descr=<ArrayS 8>)
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #82 JUMP_ABSOLUTE')
guard_not_invalidated(, descr=<Guard22>) [p1, p0, p2, p5, p10, p16, i68, None, None, None, i59, None, None]
i72 = getfield_raw(43922552, descr=<FieldS pypysig_long_struct.c_value 0>)
i73 = int_lt(i72, 0)
guard_false(i73, descr=<Guard23>) [p1, p0, p2, p5, p10, p16, i68, None, None, None, i59, None, None]
debug_merge_point(0, '<code object main. file 'test.py'. line 3> #26 FOR_ITER')
jump(p0, p1, p2, p5, p10, i59, i68, p16, i62, i61, i30, p50, descr=TargetToken(140048017139056))
""")
)
py_trace_inline = PythonTrace.objects.create(
log=log, root_file="test.py", root_function="main_inline",
)
entry = py_trace_inline.sections.create(label=TraceSection.ENTRY)
ResOpChunk.objects.create(
section=entry,
ordering=0,
raw_source=dedent("""
[p0, p1]
p2 = getfield_gc(p0, descr=<FieldP pypy.interpreter.pyframe.PyFrame.inst_last_exception 80>)
p3 = getfield_gc(p0, descr=<FieldP pypy.interpreter.pyframe.PyFrame.inst_pycode 120>)
i4 = getfield_gc(p0, descr=<FieldU pypy.interpreter.pyframe.PyFrame.inst_is_being_profiled 150>)
p5 = getfield_gc(p0, descr=<FieldP pypy.interpreter.pyframe.PyFrame.inst_lastblock 96>)
i6 = getfield_gc(p0, descr=<FieldS pypy.interpreter.pyframe.PyFrame.inst_valuestackdepth 128>)
i7 = getfield_gc(p0, descr=<FieldS pypy.interpreter.pyframe.PyFrame.inst_last_instr 88>)
p8 = getfield_gc(p0, descr=<FieldP pypy.interpreter.pyframe.PyFrame.inst_locals_stack_w 104>)
p10 = getarrayitem_gc(p8, 0, descr=<ArrayP 8>)
p12 = getarrayitem_gc(p8, 1, descr=<ArrayP 8>)
p14 = getarrayitem_gc(p8, 2, descr=<ArrayP 8>)
p15 = getfield_gc(p0, descr=<FieldP pypy.interpreter.pyframe.PyFrame.inst_cells 40>)
"""),
)
preamble = py_trace_inline.sections.create(label=TraceSection.PREAMBLE)
ResOpChunk.objects.create(
section=preamble,
ordering=1,
raw_source=dedent("""
label(p0, p1, p2, p3, i4, p5, i6, i7, p10, p12, p14, descr=TargetToken(139725302244320))
"""),
)
PythonChunk.objects.create(
section=preamble,
ordering=2,
start_line=4,
end_line=7,
raw_source=dedent("""
def main():
i = 0
while i < 10000:
"""),
)
ResOpChunk.objects.create(
section=preamble,
ordering=3,
raw_source=dedent("""
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #9 LOAD_FAST')
guard_value(i6, 1, descr=<Guard4>) [i6, p1, p0, p2, p3, i4, p5, i7, p10, p12, p14]
guard_nonnull_class(p10, ConstClass(W_IntObject), descr=<Guard5>) [p1, p0, p10, p2, p3, i4, p5, p12, p14]
guard_value(i4, 0, descr=<Guard6>) [i4, p1, p0, p2, p3, p5, p10, p14]
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #12 LOAD_CONST')
guard_value(p3, ConstPtr(ptr19), descr=<Guard7>) [p1, p0, p3, p2, p5, p10, p14]
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #15 COMPARE_OP')
i20 = getfield_gc_pure(p10, descr=<FieldS pypy.objspace.std.intobject.W_IntObject.inst_intval 8>)
i22 = int_lt(i20, 10000)
guard_true(i22, descr=<Guard8>) [p1, p0, p10, p2, p5]
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #18 POP_JUMP_IF_FALSE')
"""),
)
PythonChunk.objects.create(
section=preamble,
ordering=4,
start_line=7,
end_line=8,
raw_source=""" i = f(i)""",
)
ResOpChunk.objects.create(
section=preamble,
ordering=5,
raw_source=dedent("""
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #21 LOAD_GLOBAL')
p23 = getfield_gc(p0, descr=<FieldP pypy.interpreter.eval.Frame.inst_w_globals 8>)
guard_value(p23, ConstPtr(ptr24), descr=<Guard9>) [p1, p0, p23, p2, p5, p10]
p25 = getfield_gc(p23, descr=<FieldP pypy.objspace.std.dictmultiobject.W_DictMultiObject.inst_strategy 16>)
guard_value(p25, ConstPtr(ptr26), descr=<Guard10>) [p1, p0, p25, p23, p2, p5, p10]
guard_not_invalidated(, descr=<Guard11>) [p1, p0, p23, p2, p5, p10]
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #24 LOAD_FAST')
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #27 CALL_FUNCTION')
p28 = call(ConstClass(getexecutioncontext), descr=<Callr 8 EF=1>)
p29 = getfield_gc(p28, descr=<FieldP pypy.interpreter.executioncontext.ExecutionContext.inst_topframeref 64>)
i30 = force_token()
p31 = getfield_gc(p28, descr=<FieldP pypy.interpreter.executioncontext.ExecutionContext.inst_w_tracefunc 80>)
guard_isnull(p31, descr=<Guard12>) [p1, p0, p28, p31, p2, p5, p10, i30, p29]
i32 = getfield_gc(p28, descr=<FieldU pypy.interpreter.executioncontext.ExecutionContext.inst_profilefunc 40>)
i33 = int_is_zero(i32)
guard_true(i33, descr=<Guard13>) [p1, p0, p28, p2, p5, p10, i30, p29]
"""),
)
PythonChunk.objects.create(
section=preamble,
ordering=6,
start_line=1,
end_line=3,
raw_source=dedent("""
def f(i):
return i + 1
"""),
)
ResOpChunk.objects.create(
section=preamble,
ordering=7,
raw_source=dedent("""
debug_merge_point(1, '<code object f. file 'test.py'. line 1> #0 LOAD_FAST')
debug_merge_point(1, '<code object f. file 'test.py'. line 1> #3 LOAD_CONST')
debug_merge_point(1, '<code object f. file 'test.py'. line 1> #6 BINARY_ADD')
i35 = int_add(i20, 1)
debug_merge_point(1, '<code object f. file 'test.py'. line 1> #7 RETURN_VALUE')
"""),
)
ResOpChunk.objects.create(
section=preamble,
ordering=8,
raw_source=dedent("""
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #30 STORE_FAST')
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #33 JUMP_ABSOLUTE')
guard_not_invalidated(, descr=<Guard14>) [p1, p0, p2, p5, i35, None, None]
i38 = getfield_raw(44216344, descr=<FieldS pypysig_long_struct.c_value 0>)
i40 = int_lt(i38, 0)
guard_false(i40, descr=<Guard15>) [p1, p0, p2, p5, i35, None, None]
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #9 LOAD_FAST')
p41 = same_as(ConstPtr(ptr26))
"""),
)
loop = py_trace_inline.sections.create(label=TraceSection.LOOP_BODY)
ResOpChunk.objects.create(
section=loop,
ordering=0,
raw_source=dedent("""
label(p0, p1, p2, p5, i35, descr=TargetToken(139725302244400))
"""),
)
PythonChunk.objects.create(
section=loop,
ordering=1,
start_line=4,
end_line=7,
raw_source=dedent("""
def main():
i = 0
while i < 10000:
"""),
)
ResOpChunk.objects.create(
section=loop,
ordering=2,
raw_source=dedent("""
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #9 LOAD_FAST')
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #12 LOAD_CONST')
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #15 COMPARE_OP')
i42 = int_lt(i35, 10000)
guard_true(i42, descr=<Guard16>) [p1, p0, p2, p5, i35]
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #18 POP_JUMP_IF_FALSE')
"""),
)
PythonChunk.objects.create(
section=loop,
ordering=3,
start_line=7,
end_line=8,
raw_source=""" i = f(i)""",
)
ResOpChunk.objects.create(
section=loop,
ordering=4,
raw_source=dedent("""
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #21 LOAD_GLOBAL')
guard_not_invalidated(, descr=<Guard17>) [p1, p0, p2, p5, i35]
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #24 LOAD_FAST')
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #27 CALL_FUNCTION')
i43 = force_token()
"""),
)
PythonChunk.objects.create(
section=loop,
ordering=5,
start_line=1,
end_line=3,
raw_source=dedent("""
def f(i):
return i + 1
"""),
)
ResOpChunk.objects.create(
section=loop,
ordering=6,
raw_source=dedent("""
debug_merge_point(1, '<code object f. file 'test.py'. line 1> #0 LOAD_FAST')
debug_merge_point(1, '<code object f. file 'test.py'. line 1> #3 LOAD_CONST')
debug_merge_point(1, '<code object f. file 'test.py'. line 1> #6 BINARY_ADD')
i44 = int_add(i35, 1)
debug_merge_point(1, '<code object f. file 'test.py'. line 1> #7 RETURN_VALUE')
"""),
)
ResOpChunk.objects.create(
section=loop,
ordering=7,
raw_source=dedent("""
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #30 STORE_FAST')
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #33 JUMP_ABSOLUTE')
i45 = getfield_raw(44216344, descr=<FieldS pypysig_long_struct.c_value 0>)
i46 = int_lt(i45, 0)
guard_false(i46, descr=<Guard18>) [p1, p0, p2, p5, i44, None]
debug_merge_point(0, '<code object main_inline. file 'test.py'. line 4> #9 LOAD_FAST')
jump(p0, p1, p2, p5, i44, descr=TargetToken(139725302244400))
"""),
)
| 44.431933
| 135
| 0.612513
| 3,571
| 26,437
| 4.356483
| 0.113974
| 0.030469
| 0.050138
| 0.068394
| 0.783056
| 0.749502
| 0.72109
| 0.712862
| 0.699492
| 0.686058
| 0
| 0.08484
| 0.248742
| 26,437
| 595
| 136
| 44.431933
| 0.698454
| 0.004312
| 0
| 0.61658
| 0
| 0.210708
| 0.679268
| 0.092284
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001727
| false
| 0
| 0.005181
| 0
| 0.010363
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d2c45b1783d1dc87374d177921834282ee656e79
| 210
|
py
|
Python
|
packages/admin.py
|
dandeduck/package-tracking-web
|
f7cb3dffd6f7f6b7ced5b1106a049c79c192dfa5
|
[
"MIT"
] | 1
|
2021-02-11T22:16:51.000Z
|
2021-02-11T22:16:51.000Z
|
packages/admin.py
|
dandeduck/package-tracking-web
|
f7cb3dffd6f7f6b7ced5b1106a049c79c192dfa5
|
[
"MIT"
] | 54
|
2021-02-11T18:52:11.000Z
|
2021-06-13T13:45:01.000Z
|
packages/admin.py
|
dandeduck/package-tracking-web
|
f7cb3dffd6f7f6b7ced5b1106a049c79c192dfa5
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Partner, Order, Address, Package
admin.site.register(Partner)
admin.site.register(Order)
admin.site.register(Address)
admin.site.register(Package)
| 23.333333
| 53
| 0.780952
| 28
| 210
| 5.857143
| 0.428571
| 0.219512
| 0.414634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 210
| 8
| 54
| 26.25
| 0.886486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d2d83d452fd30e718546c0eac26fe03bbef59c06
| 1,647
|
py
|
Python
|
sympy/physics/optics/__init__.py
|
utkarshdeorah/sympy
|
dcdf59bbc6b13ddbc329431adf72fcee294b6389
|
[
"BSD-3-Clause"
] | 8,323
|
2015-01-02T15:51:43.000Z
|
2022-03-31T13:13:19.000Z
|
sympy/physics/optics/__init__.py
|
utkarshdeorah/sympy
|
dcdf59bbc6b13ddbc329431adf72fcee294b6389
|
[
"BSD-3-Clause"
] | 15,102
|
2015-01-01T01:33:17.000Z
|
2022-03-31T22:53:13.000Z
|
sympy/physics/optics/__init__.py
|
utkarshdeorah/sympy
|
dcdf59bbc6b13ddbc329431adf72fcee294b6389
|
[
"BSD-3-Clause"
] | 4,490
|
2015-01-01T17:48:07.000Z
|
2022-03-31T17:24:05.000Z
|
__all__ = [
'TWave',
'RayTransferMatrix', 'FreeSpace', 'FlatRefraction', 'CurvedRefraction',
'FlatMirror', 'CurvedMirror', 'ThinLens', 'GeometricRay', 'BeamParameter',
'waist2rayleigh', 'rayleigh2waist', 'geometric_conj_ab',
'geometric_conj_af', 'geometric_conj_bf', 'gaussian_conj',
'conjugate_gauss_beams',
'Medium',
'refraction_angle', 'deviation', 'fresnel_coefficients', 'brewster_angle',
'critical_angle', 'lens_makers_formula', 'mirror_formula', 'lens_formula',
'hyperfocal_distance', 'transverse_magnification',
'jones_vector', 'stokes_vector', 'jones_2_stokes', 'linear_polarizer',
'phase_retarder', 'half_wave_retarder', 'quarter_wave_retarder',
'transmissive_filter', 'reflective_filter', 'mueller_matrix',
'polarizing_beam_splitter',
]
from .waves import TWave
from .gaussopt import (RayTransferMatrix, FreeSpace, FlatRefraction,
CurvedRefraction, FlatMirror, CurvedMirror, ThinLens, GeometricRay,
BeamParameter, waist2rayleigh, rayleigh2waist, geometric_conj_ab,
geometric_conj_af, geometric_conj_bf, gaussian_conj,
conjugate_gauss_beams)
from .medium import Medium
from .utils import (refraction_angle, deviation, fresnel_coefficients,
brewster_angle, critical_angle, lens_makers_formula, mirror_formula,
lens_formula, hyperfocal_distance, transverse_magnification)
from .polarization import (jones_vector, stokes_vector, jones_2_stokes,
linear_polarizer, phase_retarder, half_wave_retarder,
quarter_wave_retarder, transmissive_filter, reflective_filter,
mueller_matrix, polarizing_beam_splitter)
| 42.230769
| 78
| 0.758349
| 162
| 1,647
| 7.265432
| 0.388889
| 0.06627
| 0.067969
| 0.095157
| 0.905692
| 0.905692
| 0.905692
| 0.905692
| 0.905692
| 0.905692
| 0
| 0.004243
| 0.141469
| 1,647
| 38
| 79
| 43.342105
| 0.828147
| 0
| 0
| 0
| 0
| 0
| 0.350941
| 0.054645
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
825b43a19aa2a825d590518292a7f19d23a5c051
| 21,681
|
py
|
Python
|
micro_admin/migrations/0001_initial.py
|
lance0145/micro-finance
|
1ba6339a9d05ff2f20b020b97a233c766b2ee6e0
|
[
"MIT"
] | 72
|
2015-09-18T07:23:20.000Z
|
2022-03-23T14:35:46.000Z
|
micro_admin/migrations/0001_initial.py
|
mohbadar/micro-finance
|
00fc9ad1e09cd6658aa5fa0dd991cf18fe2927a6
|
[
"MIT"
] | 68
|
2015-01-03T13:44:40.000Z
|
2021-06-10T20:00:23.000Z
|
micro_admin/migrations/0001_initial.py
|
mohbadar/micro-finance
|
00fc9ad1e09cd6658aa5fa0dd991cf18fe2927a6
|
[
"MIT"
] | 73
|
2015-02-10T07:03:42.000Z
|
2022-02-24T21:11:01.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-07 07:54
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(max_length=50, unique=True)),
('email', models.EmailField(max_length=255, unique=True)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100, null=True)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=10)),
('user_roles', models.CharField(choices=[('BranchManager', 'BranchManager'), ('LoanOfficer', 'LoanOfficer'), ('Cashier', 'Cashier')], max_length=20)),
('date_of_birth', models.DateField(default='2000-01-01', null=True)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
('country', models.CharField(max_length=50, null=True)),
('state', models.CharField(max_length=50, null=True)),
('district', models.CharField(max_length=50, null=True)),
('city', models.CharField(max_length=50, null=True)),
('area', models.CharField(max_length=150, null=True)),
('mobile', models.CharField(default='0', max_length=10, null=True)),
('pincode', models.CharField(default='', max_length=10, null=True)),
],
options={
'permissions': (('branch_manager', 'Can manage all accounts under his/her branch.'),),
},
),
migrations.CreateModel(
name='Branch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('opening_date', models.DateField()),
('country', models.CharField(max_length=50)),
('state', models.CharField(max_length=50)),
('district', models.CharField(max_length=50)),
('city', models.CharField(max_length=50)),
('area', models.CharField(max_length=150)),
('phone_number', models.BigIntegerField()),
('pincode', models.IntegerField()),
('is_active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Centers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('created_date', models.DateField()),
('is_active', models.BooleanField(default=True)),
('branch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Branch')),
],
),
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=200)),
('last_name', models.CharField(max_length=200)),
('email', models.EmailField(max_length=255, null=True)),
('account_number', models.CharField(max_length=50, unique=True)),
('date_of_birth', models.DateField()),
('blood_group', models.CharField(default=True, max_length=10, null=True)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=10)),
('client_role', models.CharField(choices=[('FirstLeader', 'FirstLeader'), ('SecondLeader', 'SecondLeader'), ('GroupMember', 'GroupMember')], max_length=20)),
('occupation', models.CharField(max_length=200)),
('annual_income', models.BigIntegerField()),
('joined_date', models.DateField()),
('country', models.CharField(max_length=50)),
('state', models.CharField(max_length=50)),
('district', models.CharField(max_length=50)),
('city', models.CharField(max_length=50)),
('area', models.CharField(max_length=150)),
('mobile', models.CharField(default=True, max_length=20, null=True)),
('pincode', models.CharField(default=True, max_length=20, null=True)),
('photo', models.ImageField(null=True, upload_to='static/images/users')),
('signature', models.ImageField(null=True, upload_to='static/images/signatures')),
('is_active', models.BooleanField(default=True)),
('status', models.CharField(default='UnAssigned', max_length=50, null=True)),
('sharecapital_amount', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('entrancefee_amount', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('membershipfee_amount', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('bookfee_amount', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('insurance_amount', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('branch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Branch')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='FixedDeposits',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deposited_date', models.DateField()),
('status', models.CharField(choices=[('Opened', 'Opened'), ('Closed', 'Closed')], max_length=20)),
('fixed_deposit_number', models.CharField(max_length=50, unique=True)),
('fixed_deposit_amount', models.DecimalField(decimal_places=6, max_digits=19)),
('fixed_deposit_period', models.IntegerField()),
('fixed_deposit_interest_rate', models.DecimalField(decimal_places=2, max_digits=5)),
('nominee_firstname', models.CharField(max_length=50)),
('nominee_lastname', models.CharField(max_length=50)),
('nominee_gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=10)),
('relationship_with_nominee', models.CharField(max_length=50)),
('nominee_date_of_birth', models.DateField()),
('nominee_occupation', models.CharField(max_length=50)),
('nominee_photo', models.ImageField(upload_to='static/images/users')),
('nominee_signature', models.ImageField(upload_to='static/images/signatures')),
('fixed_deposit_interest', models.DecimalField(blank=True, decimal_places=6, max_digits=19, null=True)),
('maturity_amount', models.DecimalField(blank=True, decimal_places=6, max_digits=19, null=True)),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Client')),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('account_number', models.CharField(max_length=50, unique=True)),
('activation_date', models.DateField()),
('is_active', models.BooleanField(default=True)),
('status', models.CharField(default='UnAssigned', max_length=50)),
('branch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Branch')),
('clients', models.ManyToManyField(blank=True, to='micro_admin.Client')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='group_created_by', to=settings.AUTH_USER_MODEL)),
('staff', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='GroupMeetings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meeting_date', models.DateField()),
('meeting_time', models.CharField(max_length=20)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Group')),
],
),
migrations.CreateModel(
name='LoanAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_no', models.CharField(max_length=50, unique=True)),
('interest_type', models.CharField(choices=[('Flat', 'Flat'), ('Declining', 'Declining')], max_length=20)),
('status', models.CharField(choices=[('Applied', 'Applied'), ('Withdrawn', 'Withdrawn'), ('Approved', 'Approved'), ('Rejected', 'Rejected'), ('Closed', 'Closed')], max_length=20)),
('opening_date', models.DateField(auto_now_add=True)),
('approved_date', models.DateField(blank=True, null=True)),
('loan_issued_date', models.DateField(blank=True, null=True)),
('closed_date', models.DateField(blank=True, null=True)),
('loan_amount', models.DecimalField(decimal_places=6, max_digits=19)),
('loan_repayment_period', models.IntegerField()),
('loan_repayment_every', models.IntegerField()),
('loan_repayment_amount', models.DecimalField(blank=True, decimal_places=6, max_digits=19, null=True)),
('total_loan_amount_repaid', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('loanpurpose_description', models.TextField()),
('annual_interest_rate', models.DecimalField(decimal_places=2, max_digits=5)),
('interest_charged', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('total_interest_repaid', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('total_loan_paid', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('total_loan_balance', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('loanprocessingfee_amount', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('no_of_repayments_completed', models.IntegerField(default=0)),
('principle_repayment', models.DecimalField(blank=True, decimal_places=6, max_digits=19, null=True)),
('client', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Client')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Group')),
('loan_issued_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='loan_issued_by', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Payments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('voucher_number', models.CharField(max_length=50, unique=True)),
('payment_type', models.CharField(choices=[('Loans', 'Loans'), ('TravellingAllowance', 'TravellingAllowance'), ('Paymentofsalary', 'Paymentofsalary'), ('PrintingCharges', 'PrintingCharges'), ('StationaryCharges', 'StationaryCharges'), ('OtherCharges', 'OtherCharges'), ('SavingsWithdrawal', 'SavingsWithdrawal'), ('FixedWithdrawal', 'FixedWithdrawal'), ('RecurringWithdrawal', 'RecurringWithdrawal')], max_length=25)),
('amount', models.DecimalField(decimal_places=6, max_digits=19)),
('interest', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('total_amount', models.DecimalField(decimal_places=6, max_digits=19)),
('totalamount_in_words', models.CharField(max_length=200)),
('branch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Branch')),
('client', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Client')),
('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Group')),
('staff', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Receipts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('receipt_number', models.CharField(max_length=50, unique=True)),
('sharecapital_amount', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('entrancefee_amount', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('membershipfee_amount', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('bookfee_amount', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('loanprocessingfee_amount', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('savingsdeposit_thrift_amount', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('fixeddeposit_amount', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('recurringdeposit_amount', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('loanprinciple_amount', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('loaninterest_amount', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('insurance_amount', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('savings_balance_atinstant', models.DecimalField(blank=True, decimal_places=6, max_digits=19, null=True)),
('demand_loanprinciple_amount_atinstant', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('demand_loaninterest_amount_atinstant', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('principle_loan_balance_atinstant', models.DecimalField(blank=True, decimal_places=6, default=0, max_digits=19, null=True)),
('branch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Branch')),
('client', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Client')),
('group', models.ForeignKey(blank=True, default=0, null=True, on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Group')),
('group_loan_account', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='group_loan_account', to='micro_admin.LoanAccount')),
('member_loan_account', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='micro_admin.LoanAccount')),
('staff', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='RecurringDeposits',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deposited_date', models.DateField()),
('reccuring_deposit_number', models.CharField(max_length=50, unique=True)),
('status', models.CharField(choices=[('Opened', 'Opened'), ('Closed', 'Closed')], max_length=20)),
('recurring_deposit_amount', models.DecimalField(decimal_places=6, max_digits=19)),
('recurring_deposit_period', models.IntegerField()),
('recurring_deposit_interest_rate', models.DecimalField(decimal_places=2, max_digits=5)),
('nominee_firstname', models.CharField(max_length=50)),
('nominee_lastname', models.CharField(max_length=50)),
('nominee_gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=10)),
('relationship_with_nominee', models.CharField(max_length=50)),
('nominee_date_of_birth', models.DateField()),
('nominee_occupation', models.CharField(max_length=50)),
('nominee_photo', models.ImageField(upload_to='static/images/users')),
('nominee_signature', models.ImageField(upload_to='static/images/signatures')),
('recurring_deposit_interest', models.DecimalField(blank=True, decimal_places=6, max_digits=19, null=True)),
('maturity_amount', models.DecimalField(blank=True, decimal_places=6, max_digits=19, null=True)),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Client')),
],
),
migrations.CreateModel(
name='SavingsAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_no', models.CharField(max_length=50, unique=True)),
('status', models.CharField(choices=[('Applied', 'Applied'), ('Withdrawn', 'Withdrawn'), ('Approved', 'Approved'), ('Rejected', 'Rejected'), ('Closed', 'Closed')], max_length=20)),
('opening_date', models.DateField()),
('min_required_balance', models.DecimalField(decimal_places=2, max_digits=5)),
('savings_balance', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('annual_interest_rate', models.DecimalField(decimal_places=2, max_digits=5)),
('total_deposits', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('total_withdrawals', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('fixeddeposit_amount', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('fixeddepositperiod', models.IntegerField(blank=True, null=True)),
('recurringdeposit_amount', models.DecimalField(decimal_places=6, default=0, max_digits=19)),
('recurringdepositperiod', models.IntegerField(blank=True, null=True)),
('client', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Client')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Group')),
],
),
migrations.AddField(
model_name='centers',
name='groups',
field=models.ManyToManyField(blank=True, to='micro_admin.Group'),
),
migrations.AddField(
model_name='user',
name='branch',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='micro_admin.Branch'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, related_name='user_permissions', to='auth.Permission'),
),
]
| 74.25
| 434
| 0.62511
| 2,305
| 21,681
| 5.68026
| 0.109328
| 0.043993
| 0.059116
| 0.078821
| 0.804399
| 0.783396
| 0.737952
| 0.717483
| 0.686092
| 0.648591
| 0
| 0.020401
| 0.222268
| 21,681
| 291
| 435
| 74.505155
| 0.756079
| 0.00309
| 0
| 0.487633
| 1
| 0
| 0.185369
| 0.038777
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.003534
| 0.014134
| 0
| 0.028269
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
82639d04709bb7e35e36b9d519f7420e58a05774
| 67
|
py
|
Python
|
pymusical/__init__.py
|
jgueting/pymusical
|
5dfebbe496dd9e2e3aa04fbc9485b8f8ff588eba
|
[
"MIT"
] | null | null | null |
pymusical/__init__.py
|
jgueting/pymusical
|
5dfebbe496dd9e2e3aa04fbc9485b8f8ff588eba
|
[
"MIT"
] | null | null | null |
pymusical/__init__.py
|
jgueting/pymusical
|
5dfebbe496dd9e2e3aa04fbc9485b8f8ff588eba
|
[
"MIT"
] | null | null | null |
from pymusical.converter import MusicConverter, MusicConverterError
| 67
| 67
| 0.910448
| 6
| 67
| 10.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 67
| 1
| 67
| 67
| 0.968254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
82ba72859cc184d9e3af6870d7d31bfe1e648030
| 113
|
py
|
Python
|
__init__.py
|
Harryjun/pytorch-vsumm-reinforce
|
2200d58e855ae3ea42c98107dc059f691d138671
|
[
"MIT"
] | 18
|
2019-10-17T02:05:40.000Z
|
2021-05-08T15:39:49.000Z
|
__init__.py
|
Harryjun/pytorch-vsumm-reinforce
|
2200d58e855ae3ea42c98107dc059f691d138671
|
[
"MIT"
] | null | null | null |
__init__.py
|
Harryjun/pytorch-vsumm-reinforce
|
2200d58e855ae3ea42c98107dc059f691d138671
|
[
"MIT"
] | 1
|
2020-07-27T20:46:14.000Z
|
2020-07-27T20:46:14.000Z
|
import sys
sys.path.append('./utils/')
from file_process import *
from knapsack import *
from vsum_tool import *
| 18.833333
| 27
| 0.761062
| 17
| 113
| 4.941176
| 0.647059
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132743
| 113
| 5
| 28
| 22.6
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.070796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7d568330ec0526ae317a57d151bb8319e73a140e
| 4,464
|
py
|
Python
|
project/emccfr_all_games.py
|
aditya140/rlcard
|
de203b9b74a653019452aeb0622345f33dd42eda
|
[
"MIT"
] | null | null | null |
project/emccfr_all_games.py
|
aditya140/rlcard
|
de203b9b74a653019452aeb0622345f33dd42eda
|
[
"MIT"
] | null | null | null |
project/emccfr_all_games.py
|
aditya140/rlcard
|
de203b9b74a653019452aeb0622345f33dd42eda
|
[
"MIT"
] | null | null | null |
''' An example of solve Leduc Hold'em with CFR
'''
import torch
import os
import sys
sys.path.append(".")
import numpy as np
import rlcard
from rlcard.agents import EMCCFRAgent, RandomAgent
from rlcard import models
from rlcard.utils import set_global_seed, tournament
from rlcard.utils import Logger
def train_leduc():
# Make environment and enable human mode
env = rlcard.make('leduc-holdem', config={'seed': 0, 'allow_step_back':True})
eval_env = rlcard.make('leduc-holdem', config={'seed': 0})
# Set the iterations numbers and how frequently we evaluate the performance and save model
evaluate_every = 100
save_plot_every = 1000
evaluate_num = 10000
episode_num = 10000
# The paths for saving the logs and learning curves
log_dir = './experiments/leduc_holdem_emccfr_result/'
# Set a global seed
set_global_seed(0)
# Initilize CFR Agent
model_path = 'models/leduc_holdem_emccfr'
agent = EMCCFRAgent(env,model_path = model_path)
agent.load() # If we have saved model, we first load the model
# Evaluate CFR against pre-trained NFSP
eval_env.set_agents([agent, models.load('leduc-holdem-nfsp').agents[0]])
# Init a Logger to plot the learning curve
logger = Logger(log_dir)
for episode in range(episode_num):
agent.train()
print('\rIteration {}'.format(episode), end='')
# Evaluate the performance. Play with NFSP agents.
if episode % evaluate_every == 0:
agent.save() # Save model
logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0])
# Close files in the logger
logger.close_files()
# Plot the learning curve
logger.plot('EMCCFR')
def train_uno():
# Make environment and enable human mode
env = rlcard.make('uno', config={'seed': 0, 'allow_step_back':True})
eval_env = rlcard.make('uno', config={'seed': 0})
# Set the iterations numbers and how frequently we evaluate the performance and save model
evaluate_every = 100
save_plot_every = 1000
evaluate_num = 10000
episode_num = 10000
# The paths for saving the logs and learning curves
log_dir = './experiments/uno_emccfr_result/'
# Set a global seed
set_global_seed(0)
# Initilize CFR Agent
agent = EMCCFRAgent(env)
agent.load() # If we have saved model, we first load the model
# Evaluate CFR against pre-trained NFSP
eval_env.set_agents([agent, models.load('uno-nfsp').agents[0]])
# Init a Logger to plot the learning curve
logger = Logger(log_dir)
for episode in range(episode_num):
agent.train()
print('\rIteration {}'.format(episode), end='')
# Evaluate the performance. Play with NFSP agents.
if episode % evaluate_every == 0:
agent.save() # Save model
logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0])
# Close files in the logger
logger.close_files()
# Plot the learning curve
logger.plot('EMCCFR')
def train_mahjong():
# Make environment and enable human mode
env = rlcard.make('mahjong', config={'seed': 0, 'allow_step_back':True})
eval_env = rlcard.make('mahjong', config={'seed': 0})
# Set the iterations numbers and how frequently we evaluate the performance and save model
evaluate_every = 100
save_plot_every = 1000
evaluate_num = 10000
episode_num = 10000
# The paths for saving the logs and learning curves
log_dir = './experiments/mahjong_emccfr_result/'
# Set a global seed
set_global_seed(0)
# Initilize CFR Agent
agent = EMCCFRAgent(env)
agent.load() # If we have saved model, we first load the model
# Evaluate CFR against pre-trained NFSP
eval_env.set_agents([agent, models.load('mahjong-nfsp').agents[0]])
# Init a Logger to plot the learning curve
logger = Logger(log_dir)
for episode in range(episode_num):
agent.train()
print('\rIteration {}'.format(episode), end='')
# Evaluate the performance. Play with NFSP agents.
if episode % evaluate_every == 0:
agent.save() # Save model
logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0])
# Close files in the logger
logger.close_files()
# Plot the learning curve
logger.plot('EMCCFR')
if __name__=="__main__":
train_leduc()
# train_uno()
# train_mahjong()
| 30.162162
| 94
| 0.677419
| 610
| 4,464
| 4.813115
| 0.170492
| 0.015327
| 0.026567
| 0.040872
| 0.859332
| 0.859332
| 0.859332
| 0.85252
| 0.845027
| 0.798025
| 0
| 0.019954
| 0.225358
| 4,464
| 147
| 95
| 30.367347
| 0.829092
| 0.320341
| 0
| 0.618421
| 0
| 0
| 0.118434
| 0.045166
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039474
| false
| 0
| 0.118421
| 0
| 0.157895
| 0.039474
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7d5958e81a3c96be8904d1145d2daff5d82d8b41
| 1,309
|
py
|
Python
|
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/chinese/__init__.py
|
ParadoxARG/Recognizers-Text
|
70c2a368e48fb0694f8a185574d6dd6076b29362
|
[
"MIT"
] | 10
|
2019-05-11T18:07:14.000Z
|
2021-08-20T03:02:47.000Z
|
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/chinese/__init__.py
|
ParadoxARG/Recognizers-Text
|
70c2a368e48fb0694f8a185574d6dd6076b29362
|
[
"MIT"
] | 76
|
2018-11-09T18:19:44.000Z
|
2019-08-20T20:29:53.000Z
|
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/chinese/__init__.py
|
ParadoxARG/Recognizers-Text
|
70c2a368e48fb0694f8a185574d6dd6076b29362
|
[
"MIT"
] | 18
|
2019-08-19T12:11:00.000Z
|
2021-10-12T09:36:27.000Z
|
from .base_date_time_extractor import *
from .duration_extractor_config import *
from .duration_extractor import *
from .time_extractor import *
from .date_extractor_config import *
from .date_extractor import *
from .timeperiod_extractor import *
from .dateperiod_extractor_config import *
from .dateperiod_extractor import *
from .datetime_extractor_config import *
from .datetime_extractor import *
from .datetimeperiod_extractor_config import *
from .datetimeperiod_extractor import *
from .set_extractor_config import *
from .set_extractor import *
from .holiday_extractor_config import *
from .merged_extractor_config import *
from .merged_extractor import *
from .duration_parser_config import *
from .duration_parser import *
from .time_parser import *
from .date_parser_config import *
from .date_parser import *
from .timeperiod_parser_config import *
from .timeperiod_parser import *
from .dateperiod_parser_config import *
from .dateperiod_parser import *
from .datetime_parser_config import *
from .datetime_parser import *
from .datetimeperiod_parser_config import *
from .datetimeperiod_parser import *
from .holiday_parser_config import *
from .holiday_parser import *
from .set_parser_config import *
from .set_parser import *
from .merged_parser_config import *
from .merged_parser import *
| 34.447368
| 46
| 0.830405
| 167
| 1,309
| 6.173653
| 0.095808
| 0.349176
| 0.263822
| 0.192047
| 0.068865
| 0.068865
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113063
| 1,309
| 37
| 47
| 35.378378
| 0.888028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7d681fdf8380487d0dbc5b16bbcf61ea66ffdd9e
| 124
|
py
|
Python
|
tests/conftest.py
|
nanten2/necst-lib
|
763477825c24b4307028b2d8ac1d08954512899b
|
[
"MIT"
] | 1
|
2022-02-04T12:12:46.000Z
|
2022-02-04T12:12:46.000Z
|
tests/conftest.py
|
nanten2/neclib
|
763477825c24b4307028b2d8ac1d08954512899b
|
[
"MIT"
] | 14
|
2022-02-09T06:32:28.000Z
|
2022-03-27T10:27:20.000Z
|
tests/conftest.py
|
nanten2/neclib
|
763477825c24b4307028b2d8ac1d08954512899b
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import pytest
@pytest.fixture
def data_dir() -> Path:
return Path(__file__).parent / "_data"
| 15.5
| 42
| 0.725806
| 17
| 124
| 4.941176
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169355
| 124
| 7
| 43
| 17.714286
| 0.815534
| 0
| 0
| 0
| 0
| 0
| 0.040323
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
7d80a2d4612796abfb35f3900633eb4752f47e91
| 74
|
py
|
Python
|
src/aspire/basis/fpswf_2d.py
|
janden/ASPIRE-Python
|
5bcf831881fd0e42630c3b99671c5ed08de260ea
|
[
"MIT"
] | null | null | null |
src/aspire/basis/fpswf_2d.py
|
janden/ASPIRE-Python
|
5bcf831881fd0e42630c3b99671c5ed08de260ea
|
[
"MIT"
] | null | null | null |
src/aspire/basis/fpswf_2d.py
|
janden/ASPIRE-Python
|
5bcf831881fd0e42630c3b99671c5ed08de260ea
|
[
"MIT"
] | null | null | null |
from aspire.basis.pswf_2d import PSWF2D
class FPSWF2D(PSWF2D):
pass
| 12.333333
| 39
| 0.756757
| 11
| 74
| 5
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065574
| 0.175676
| 74
| 5
| 40
| 14.8
| 0.836066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
7db55bb2614582e0cb28478e6149e2e228fd34fd
| 124
|
py
|
Python
|
src/python/zensols/deepnlp/index/__init__.py
|
plandes/deepnlp
|
49820084ccf797d59535d5920559ab768bf2ec73
|
[
"MIT"
] | 7
|
2020-05-11T07:13:56.000Z
|
2021-09-27T13:03:46.000Z
|
src/python/zensols/deepnlp/index/__init__.py
|
plandes/deepnlp
|
49820084ccf797d59535d5920559ab768bf2ec73
|
[
"MIT"
] | null | null | null |
src/python/zensols/deepnlp/index/__init__.py
|
plandes/deepnlp
|
49820084ccf797d59535d5920559ab768bf2ec73
|
[
"MIT"
] | 1
|
2022-02-12T00:22:26.000Z
|
2022-02-12T00:22:26.000Z
|
"""Contains classes for vectorizers for indexing document.
"""
from .domain import *
from .lsi import *
from .lda import *
| 17.714286
| 58
| 0.725806
| 16
| 124
| 5.625
| 0.6875
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169355
| 124
| 6
| 59
| 20.666667
| 0.873786
| 0.443548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7dd7b8b37b180c758ca299046bc44ee50698a8c1
| 1,836
|
py
|
Python
|
util.py/bond.py
|
yomichi/HPhi-samples
|
040cbf101dbe4c19df90fec22a21107730ba1879
|
[
"BSL-1.0"
] | null | null | null |
util.py/bond.py
|
yomichi/HPhi-samples
|
040cbf101dbe4c19df90fec22a21107730ba1879
|
[
"BSL-1.0"
] | null | null | null |
util.py/bond.py
|
yomichi/HPhi-samples
|
040cbf101dbe4c19df90fec22a21107730ba1879
|
[
"BSL-1.0"
] | null | null | null |
def bond(output, i,j,Jz,Jx,S2=1):
if S2==1:
bond_half(output, i,j,Jz,Jx)
elif S2==2:
bond_one(output, i,j,Jz,Jx)
else:
error("S2 should be 1 or 2.")
def bond_half(output, i,j, Jz,Jx):
z = 0.25*Jz
x = 0.5*Jx
# diagonal
output.write('{} 0 {} 0 {} 0 {} 0 {} 0.0 \n'.format(i,i,j,j,z))
output.write('{} 1 {} 1 {} 0 {} 0 {} 0.0 \n'.format(i,i,j,j,-z))
output.write('{} 0 {} 0 {} 1 {} 1 {} 0.0 \n'.format(i,i,j,j,-z))
output.write('{} 1 {} 1 {} 1 {} 1 {} 0.0 \n'.format(i,i,j,j,z))
# off-diagonal
# S_i^+ S_j^-
output.write('{} 1 {} 0 {} 0 {} 1 {} 0.0 \n'.format(i,i,j,j,x))
# S_j^+ S_i^-
output.write('{} 1 {} 0 {} 0 {} 1 {} 0.0 \n'.format(j,j,i,i,x))
def bond_one(output, i,j, Jz,Jx):
# diagonal
output.write('{} 0 {} 0 {} 0 {} 0 {} 0.0 \n'.format(i,i,j,j,Jz))
output.write('{} 2 {} 2 {} 0 {} 0 {} 0.0 \n'.format(i,i,j,j,-Jz))
output.write('{} 0 {} 0 {} 2 {} 2 {} 0.0 \n'.format(i,i,j,j,-Jz))
output.write('{} 2 {} 2 {} 2 {} 2 {} 0.0 \n'.format(i,i,j,j,Jz))
# off-diagonal
# S_i^+ S_j^-
output.write('{} 1 {} 0 {} 0 {} 1 {} 0.0 \n'.format(i,i,j,j,Jx))
output.write('{} 2 {} 1 {} 0 {} 1 {} 0.0 \n'.format(i,i,j,j,Jx))
output.write('{} 1 {} 0 {} 1 {} 2 {} 0.0 \n'.format(i,i,j,j,Jx))
output.write('{} 2 {} 1 {} 1 {} 2 {} 0.0 \n'.format(i,i,j,j,Jx))
# S_j^+ S_i^-
output.write('{} 1 {} 0 {} 0 {} 1 {} 0.0 \n'.format(j,j,i,i,Jx))
output.write('{} 2 {} 1 {} 0 {} 1 {} 0.0 \n'.format(j,j,i,i,Jx))
output.write('{} 1 {} 0 {} 1 {} 2 {} 0.0 \n'.format(j,j,i,i,Jx))
output.write('{} 2 {} 1 {} 1 {} 2 {} 0.0 \n'.format(j,j,i,i,Jx))
def interall_header(output, L, S2=1):
output.write('=== header start\n')
output.write('NInterAll {}\n'.format(6*S2*L))
output.write('=== header (reserved)\n')
output.write('=== header (reserved)\n')
output.write('=== header end\n')
| 39.06383
| 67
| 0.494009
| 388
| 1,836
| 2.304124
| 0.097938
| 0.080537
| 0.060403
| 0.181208
| 0.831096
| 0.817673
| 0.817673
| 0.732662
| 0.655481
| 0.655481
| 0
| 0.085078
| 0.193355
| 1,836
| 46
| 68
| 39.913043
| 0.518569
| 0.049564
| 0
| 0.057143
| 0
| 0
| 0.366359
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0
| 0
| 0.114286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
81e6f9899791154757eceb01cc41dfcabdf0cfd3
| 125
|
py
|
Python
|
netkan/tests/__init__.py
|
Olympic1/NetKAN-Infra
|
ddad74c4942664e22719930a71ff5cc43f229352
|
[
"MIT"
] | null | null | null |
netkan/tests/__init__.py
|
Olympic1/NetKAN-Infra
|
ddad74c4942664e22719930a71ff5cc43f229352
|
[
"MIT"
] | null | null | null |
netkan/tests/__init__.py
|
Olympic1/NetKAN-Infra
|
ddad74c4942664e22719930a71ff5cc43f229352
|
[
"MIT"
] | null | null | null |
from .indexer import *
from .scheduler import *
from .utils import *
from .metadata import *
from .download_counter import *
| 20.833333
| 31
| 0.76
| 16
| 125
| 5.875
| 0.5
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 125
| 5
| 32
| 25
| 0.895238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c4a9ea260ab66eeceef383c04a4dbacd2d5c358e
| 35,171
|
py
|
Python
|
tests/v2_tests/test_zip.py
|
HiroakiMikami/pfio
|
1ac997dcba7babd5d91dd8c4f2793d27a6bab69b
|
[
"MIT"
] | null | null | null |
tests/v2_tests/test_zip.py
|
HiroakiMikami/pfio
|
1ac997dcba7babd5d91dd8c4f2793d27a6bab69b
|
[
"MIT"
] | null | null | null |
tests/v2_tests/test_zip.py
|
HiroakiMikami/pfio
|
1ac997dcba7babd5d91dd8c4f2793d27a6bab69b
|
[
"MIT"
] | null | null | null |
import io
import multiprocessing
import os
import pickle
import shutil
import subprocess
import sys
import tempfile
import unittest
from datetime import datetime
from zipfile import ZipFile
import pytest
from parameterized import parameterized
from pfio.testing import make_random_str, make_zip
from pfio.v2 import ZipFileStat, local
ZIP_TEST_FILENAME_LIST = {
"dir_name1": "testdir1",
"dir_name2": "testdir2",
"zipped_file_name": "testfile1",
"testfile_name": "testfile2",
"nested_dir_name": "nested_dir",
"nested_zip_file_name": "nested.zip",
}
NON_EXIST_LIST = ["does_not_exist", "does_not_exist/", "does/not/exist"]
class TestZip(unittest.TestCase):
def setUp(self):
# The following zip layout is created for all the tests
# outside.zip
# | - testdir1
# | | - nested1.zip
# | | - nested_dir
# | | - nested
# | - testdir2
# | | - testfile1
# | - testfile2
self.test_string = "this is a test string\n"
self.nested_test_string = \
"this is a test string for nested zip\n"
self.test_string_b = self.test_string.encode("utf-8")
self.nested_test_string_b = \
self.nested_test_string.encode("utf-8")
# the most outside zip
self.zip_file_name = "outside"
# nested zip and nested file
self.tmpdir = tempfile.TemporaryDirectory()
self.nested_zipped_file_name = "nested"
self.nested_dir_name = ZIP_TEST_FILENAME_LIST["nested_dir_name"]
self.nested_dir_path = os.path.join(self.tmpdir.name,
self.nested_dir_name)
self.nested_zip_file_name = \
ZIP_TEST_FILENAME_LIST["nested_zip_file_name"]
# directory and file
self.dir_name1 = ZIP_TEST_FILENAME_LIST["dir_name1"]
self.dir_name2 = ZIP_TEST_FILENAME_LIST["dir_name2"]
self.zipped_file_name = ZIP_TEST_FILENAME_LIST["zipped_file_name"]
self.testfile_name = ZIP_TEST_FILENAME_LIST["testfile_name"]
# paths used in making outside.zip
dir_path1 = os.path.join(self.tmpdir.name, self.dir_name1)
dir_path2 = os.path.join(self.tmpdir.name, self.dir_name2)
testfile_path = os.path.join(self.tmpdir.name, self.testfile_name)
nested_dir_path = os.path.join(self.tmpdir.name, self.nested_dir_name)
zipped_file_path = os.path.join(dir_path2, self.zipped_file_name)
nested_zipped_file_path = os.path.join(
nested_dir_path, self.nested_zipped_file_name)
nested_zip_file_path = os.path.join(
dir_path1, self.nested_zip_file_name)
# paths used in tests
self.zip_file_path = self.zip_file_name + ".zip"
self.zipped_file_path = os.path.join(self.dir_name2,
self.zipped_file_name)
self.nested_zip_path = os.path.join(
self.dir_name1, self.nested_zip_file_name)
self.nested_zipped_file_path = os.path.join(
self.nested_dir_name, self.nested_zipped_file_name)
os.mkdir(dir_path1)
os.mkdir(dir_path2)
os.mkdir(nested_dir_path)
with open(zipped_file_path, "w") as tmpfile:
tmpfile.write(self.test_string)
with open(nested_zipped_file_path, "w") as tmpfile:
tmpfile.write(self.nested_test_string)
with open(testfile_path, "w") as tmpfile:
tmpfile.write(self.test_string)
make_zip(nested_zip_file_path,
root_dir=self.tmpdir.name,
base_dir=self.nested_dir_name)
shutil.rmtree(nested_dir_path)
# this will include outside.zip itself into the zip
make_zip(self.zip_file_path,
root_dir=self.tmpdir.name,
base_dir=".")
def tearDown(self):
self.tmpdir.cleanup()
local.remove(self.zip_file_path)
def test_read_bytes(self):
with local.open_zip(os.path.abspath(self.zip_file_path)) as z:
with z.open(self.zipped_file_path, "rb") as zipped_file:
self.assertEqual(self.test_string_b, zipped_file.read())
def test_read_string(self):
with local.open_zip(os.path.abspath(self.zip_file_path)) as z:
with z.open(self.zipped_file_path, "r") as zipped_file:
self.assertEqual(self.test_string, zipped_file.readline())
def test_write_bytes(self):
testfile_name = "testfile3"
test_string = "this is a written string\n"
test_string_b = test_string.encode("utf-8")
with local.open_zip(os.path.abspath(self.zip_file_path), 'w') as z:
with z.open(testfile_name, "wb") as zipped_file:
zipped_file.write(test_string_b)
with local.open_zip(os.path.abspath(self.zip_file_path)) as z:
with z.open(testfile_name, "rb") as zipped_file:
self.assertEqual(test_string_b, zipped_file.readline())
def test_write_string(self):
testfile_name = "testfile3"
test_string = "this is a written string\n"
with local.open_zip(os.path.abspath(self.zip_file_path), 'w') as z:
with z.open(testfile_name, "w") as zipped_file:
zipped_file.write(test_string)
with local.open_zip(os.path.abspath(self.zip_file_path)) as z:
with z.open(testfile_name, "r") as zipped_file:
self.assertEqual(test_string, zipped_file.readline())
def test_open_non_exist(self):
non_exist_file = "non_exist_file.txt"
with local.open_zip(os.path.abspath(self.zip_file_path)) as z:
# ZipFile raises KeyError while io module raises IOError
self.assertRaises(KeyError, z.open, non_exist_file)
@parameterized.expand([
# not normalized path
['././{}//../{}/{}'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"])]
])
def test_open_non_normalized_path(self, path_or_prefix):
with local.open_zip(os.path.abspath(self.zip_file_path)) as z:
with z.open(path_or_prefix, "r") as zipped_file:
self.assertEqual(self.test_string, zipped_file.read())
@parameterized.expand([
# default case get the first level from the root
["",
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["testfile_name"]],
False],
# Problem 1 in issue #66
[ZIP_TEST_FILENAME_LIST["dir_name2"],
[ZIP_TEST_FILENAME_LIST["zipped_file_name"]],
False],
# problem 2 in issue #66
[ZIP_TEST_FILENAME_LIST["dir_name2"],
[ZIP_TEST_FILENAME_LIST["zipped_file_name"]],
False],
# not normalized path
['{}//{}//../'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
[ZIP_TEST_FILENAME_LIST["zipped_file_name"]],
False],
# not normalized path root
['{}//..//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["testfile_name"]],
False],
# not normalized path beyond root
['//..//',
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["testfile_name"]],
False],
# not normalized path beyond root
['{}//..//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["testfile_name"]],
False],
# starting with slash
['/',
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["testfile_name"]],
False],
# recursive test
['',
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["nested_zip_file_name"]),
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
ZIP_TEST_FILENAME_LIST["testfile_name"]],
True],
[ZIP_TEST_FILENAME_LIST["dir_name2"],
[ZIP_TEST_FILENAME_LIST["zipped_file_name"]],
True],
# problem 2 in issue #66
[ZIP_TEST_FILENAME_LIST["dir_name2"],
[ZIP_TEST_FILENAME_LIST["zipped_file_name"]],
True],
# not normalized path
['{}//{}//../'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
[ZIP_TEST_FILENAME_LIST["zipped_file_name"]],
True],
# not normalized path root
['{}//..//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["nested_zip_file_name"]),
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
ZIP_TEST_FILENAME_LIST["testfile_name"]],
True],
# not normalized path beyond root
['//..//',
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["nested_zip_file_name"]),
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
ZIP_TEST_FILENAME_LIST["testfile_name"]],
True],
# starting with slash
['/',
[ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["nested_zip_file_name"]),
os.path.join(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
ZIP_TEST_FILENAME_LIST["testfile_name"]],
True]]
)
def test_list(self, path_or_prefix, expected_list, recursive):
with local.open_zip(self.zip_file_path) as z:
zip_generator = z.list(path_or_prefix, recursive=recursive)
zip_list = list(zip_generator)
self.assertEqual(sorted(expected_list),
sorted(zip_list))
@parameterized.expand([
# non_exist_file
['does_not_exist', FileNotFoundError],
# not exist but share the prefix
['{}'.format(ZIP_TEST_FILENAME_LIST["dir_name2"][:1]),
FileNotFoundError],
# broken path
['{}//{}/'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"][:1]),
FileNotFoundError],
# list a file
['{}//{}///'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
NotADirectoryError]
])
def test_list_with_errors(self, path_or_prefix, error):
with local.open_zip(self.zip_file_path) as z:
with self.assertRaises(error):
list(z.list(path_or_prefix))
with self.assertRaises(error):
list(z.list(path_or_prefix, recursive=True))
@parameterized.expand([
# path ends with slash
['{}//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
True],
# not normalized path
['{}//{}'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
False],
['{}//..//{}/{}'.format(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
False],
# problem 2 in issue #66
[ZIP_TEST_FILENAME_LIST["dir_name2"],
True],
# not normalized path
['{}//{}//../'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
True],
# not normalized path root
['{}//..//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
False],
# not normalized path beyond root
['//..//',
False],
# starting with slash
['/',
False]]
)
def test_isdir(self, path_or_prefix, expected):
with local.open_zip(self.zip_file_path) as z:
self.assertEqual(z.isdir(path_or_prefix),
expected)
@parameterized.expand(NON_EXIST_LIST)
def test_isdir_non_exist(self, path_or_prefix):
with local.open_zip(self.zip_file_path) as z:
self.assertFalse(z.isdir(path_or_prefix))
def test_mkdir(self):
with local.open_zip(self.zip_file_path) as z:
self.assertRaises(io.UnsupportedOperation, z.mkdir, "test")
def test_makedirs(self):
with local.open_zip(self.zip_file_path) as z:
self.assertRaises(io.UnsupportedOperation,
z.makedirs, "test/test")
def test_pickle(self):
pickle_file_name = "test_pickle.pickle"
test_data = {'test_elem1': b'balabala',
'test_elem2': 'balabala'}
pickle_zip = "test_pickle.zip"
with open(pickle_file_name, "wb") as f:
pickle.dump(test_data, f)
with ZipFile(pickle_zip, "w") as test_zip:
test_zip.write(pickle_file_name)
with local.open_zip(pickle_zip) as z:
with z.open(pickle_file_name, 'rb') as f:
loaded_obj = pickle.load(f)
self.assertEqual(test_data, loaded_obj)
os.remove(pickle_file_name)
os.remove(pickle_zip)
@parameterized.expand([
# path ends with slash
['{}//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
True],
# not normalized path
['{}//{}'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
True],
['{}//..//{}/{}'.format(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
True],
['{}//..//{}/{}'.format(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"][:-1]
),
False],
# # not normalized path
['{}//{}//../'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
True],
# not normalized path root
['{}//..//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
False],
# not normalized path beyond root
['//..//',
False],
# starting with slash
['/',
False]]
)
def test_exists(self, path_or_prefix, expected):
with local.open_zip(self.zip_file_path) as z:
self.assertEqual(z.exists(path_or_prefix),
expected)
@parameterized.expand(NON_EXIST_LIST)
def test_not_exists(self, non_exist_file):
with local.open_zip(self.zip_file_path) as z:
self.assertFalse(z.exists(non_exist_file))
def test_remove(self):
with local.open_zip(self.zip_file_path) as z:
self.assertRaises(io.UnsupportedOperation,
z.remove, "test/test", False)
def test_nested_zip(self):
with local.open_zip(self.zip_file_path) as z:
with z.open_zip(
self.nested_zip_path) as nested_zip:
with nested_zip.open(self.nested_zipped_file_path) as f:
self.assertEqual(f.read(), self.nested_test_string)
with nested_zip.open(self.nested_zipped_file_path, "r") as f:
self.assertEqual(f.read(), self.nested_test_string)
with nested_zip.open(self.nested_zipped_file_path, "rb") as f:
self.assertEqual(f.read(), self.nested_test_string_b)
@parameterized.expand([
# path ends with slash
['{}//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
'{}/'.format(ZIP_TEST_FILENAME_LIST["dir_name2"])],
# not normalized path
['{}//{}'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
'{}/{}'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"])],
['{}//..//{}/{}'.format(ZIP_TEST_FILENAME_LIST["dir_name1"],
ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
'{}/{}'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"])],
['{}//{}//../'.format(ZIP_TEST_FILENAME_LIST["dir_name2"],
ZIP_TEST_FILENAME_LIST["zipped_file_name"]),
'{}/'.format(ZIP_TEST_FILENAME_LIST["dir_name2"])]
])
def test_stat(self, path_or_prefix, expected):
with local.open_zip(self.zip_file_path) as z:
self.assertEqual(expected, z.stat(path_or_prefix).filename)
@parameterized.expand([
# not normalized path root
'{}//..//'.format(ZIP_TEST_FILENAME_LIST["dir_name2"]),
# not normalized path beyond root
'//..//',
# root
'/'] + NON_EXIST_LIST)
def test_stat_non_exist(self, path_or_prefix):
with local.open_zip(self.zip_file_path) as z:
with self.assertRaises(FileNotFoundError):
z.stat(path_or_prefix)
def test_stat_file(self):
test_file_name = 'testdir2/testfile1'
expected = ZipFile(self.zip_file_path).getinfo(test_file_name)
with local.open_zip(self.zip_file_path) as z:
stat = z.stat(test_file_name)
self.assertIsInstance(stat, ZipFileStat)
self.assertTrue(stat.filename.endswith(test_file_name))
self.assertEqual(stat.size, expected.file_size)
self.assertEqual(stat.mode, expected.external_attr >> 16)
self.assertFalse(stat.isdir())
expected_mtime = datetime(*expected.date_time).timestamp()
self.assertIsInstance(stat.last_modified, float)
self.assertEqual(stat.last_modified, expected_mtime)
for k in ('filename', 'orig_filename', 'comment', 'create_system',
'create_version', 'extract_version', 'flag_bits',
'volume', 'internal_attr', 'external_attr', 'CRC',
'header_offset', 'compress_size', 'compress_type'):
self.assertEqual(getattr(stat, k), getattr(expected, k))
def test_stat_directory(self):
test_dir_name = 'testdir2/'
expected = ZipFile(self.zip_file_path).getinfo(test_dir_name)
with local.open_zip(self.zip_file_path) as z:
stat = z.stat(test_dir_name)
self.assertIsInstance(stat, ZipFileStat)
self.assertTrue(stat.filename.endswith(test_dir_name))
self.assertEqual(stat.size, expected.file_size)
self.assertEqual(stat.mode, expected.external_attr >> 16)
self.assertTrue(stat.isdir())
expected_mtime = datetime(*expected.date_time).timestamp()
self.assertIsInstance(stat.last_modified, float)
self.assertEqual(stat.last_modified, expected_mtime)
for k in ('filename', 'orig_filename', 'comment', 'create_system',
'create_version', 'extract_version', 'flag_bits',
'volume', 'internal_attr', 'external_attr', 'CRC',
'header_offset', 'compress_size', 'compress_type'):
self.assertEqual(getattr(stat, k), getattr(expected, k))
def test_writing_after_listing(self):
testfile_name = "testfile3"
test_string = "this is a written string\n"
with local.open_zip(
os.path.abspath(self.zip_file_path), 'w') as z:
list(z.list())
with z.open(testfile_name, "w") as zipped_file:
zipped_file.write(test_string)
@pytest.mark.skipif(sys.version_info > (3, 5),
reason="requires python3.5 or lower")
def test_mode_w_exception(self):
testfile_name = "testfile3"
test_string = "this is a written string\n"
with local.open_zip(
os.path.abspath(self.zip_file_path)) as z:
with self.assertRaises(ValueError):
with z.open(testfile_name, "w") as zipped_file:
zipped_file.write(test_string)
class TestZipWithLargeData(unittest.TestCase):
def setUp(self):
# The following zip layout is created for all the tests
# outside.zip
# | - testfile1
n = 1 << 20
self.test_string = make_random_str(n)
# the most outside zip
self.zip_file_name = "outside"
# nested zip and nested file
self.tmpdir = tempfile.TemporaryDirectory()
# test file
self.testfile_name = "testfile1"
# paths used in making outside.zip
testfile_path = os.path.join(self.tmpdir.name, self.testfile_name)
# paths used in tests
self.zip_file_path = self.zip_file_name + ".zip"
with open(testfile_path, "w") as tmpfile:
tmpfile.write(self.test_string)
# this will include outside.zip itself into the zip
make_zip(self.zip_file_path,
root_dir=self.tmpdir.name,
base_dir=".")
def tearDown(self):
self.tmpdir.cleanup()
local.remove(self.zip_file_path)
def test_read_multi_processes(self):
barrier = multiprocessing.Barrier(2)
with local.open_zip(
os.path.abspath(self.zip_file_path)) as z:
with z.open(self.testfile_name) as f:
f.read()
def func():
# accessing the shared container isn't supported in v2
with self.assertRaises(RuntimeError):
with z.open(self.testfile_name) as f:
barrier.wait()
f.read()
p1 = multiprocessing.Process(target=func)
p2 = multiprocessing.Process(target=func)
p1.start()
p2.start()
p1.join(timeout=1)
p2.join(timeout=1)
self.assertEqual(p1.exitcode, 0)
self.assertEqual(p2.exitcode, 0)
NO_DIRECTORY_FILENAME_LIST = {
"dir1_name": "testdir1",
"dir2_name": "testdir2",
"dir3_name": "testdir3",
"testfile1_name": "testfile1",
"testfile2_name": "testfile2",
"testfile3_name": "testfile3",
"testfile4_name": "testfile4",
}
class TestZipListNoDirectory(unittest.TestCase):
def setUp(self):
# The following zip layout is created for all the tests
# The difference is despite showing in the following layout for
# readabilty, the directories are not included in the zip
# outside.zip
# | - testdir1
# | - | - testfile1
# | - | - testdir2
# | - | - | - testfile2
# | - testdir3
# | | - testfile3
# | - testfile4
self.test_string = "this is a test string\n"
# the most outside zip
self.zip_file_name = "outside.zip"
# nested zip and nested file
self.tmpdir = tempfile.TemporaryDirectory()
# directory and file
self.dir1_name = NO_DIRECTORY_FILENAME_LIST["dir1_name"]
self.dir2_name = NO_DIRECTORY_FILENAME_LIST["dir2_name"]
self.dir3_name = NO_DIRECTORY_FILENAME_LIST["dir3_name"]
self.testfile1_name = NO_DIRECTORY_FILENAME_LIST["testfile1_name"]
self.testfile2_name = NO_DIRECTORY_FILENAME_LIST["testfile2_name"]
self.testfile3_name = NO_DIRECTORY_FILENAME_LIST["testfile3_name"]
self.testfile4_name = NO_DIRECTORY_FILENAME_LIST["testfile4_name"]
# paths used in making outside.zip
dir1_path = os.path.join(self.tmpdir.name, self.dir1_name)
dir2_path = os.path.join(dir1_path, self.dir2_name)
dir3_path = os.path.join(self.tmpdir.name, self.dir3_name)
testfile1_path = os.path.join(dir1_path, self.testfile1_name)
testfile2_path = os.path.join(dir2_path, self.testfile2_name)
testfile3_path = os.path.join(dir3_path, self.testfile3_name)
testfile4_path = os.path.join(self.tmpdir.name, self.testfile4_name)
# paths used in tests
for dir in [dir1_path, dir2_path, dir3_path]:
os.mkdir(dir)
for file_path in [testfile1_path, testfile2_path,
testfile3_path, testfile4_path]:
with open(file_path, "w") as f:
f.write(self.test_string)
# create zip without directory
self.pwd = os.getcwd()
os.chdir(self.tmpdir.name)
cmd = ["zip", "-rD", self.zip_file_name, "."]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
assert stderr == b""
def tearDown(self):
os.chdir(self.pwd)
self.tmpdir.cleanup()
@parameterized.expand([
# default case get the first level from the root
["", [NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
False],
# Problem 1 in issue #66
[NO_DIRECTORY_FILENAME_LIST["dir1_name"],
[NO_DIRECTORY_FILENAME_LIST["testfile1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"]],
False],
# problem 2 in issue #66
[os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"]),
[NO_DIRECTORY_FILENAME_LIST["testfile2_name"]],
False],
# not normalized path
['{}//{}//../'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
[NO_DIRECTORY_FILENAME_LIST["testfile1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"]],
False],
# not normalized path root
['{}//..//'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"]),
[NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
False],
# not normalized path beyond root
['//..//',
[NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
False],
# not normalized path beyond root
['{}//..//'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"]),
[NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
False],
# starting with slash
['/', [NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
False],
# recursive test
['',
[os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile3_name"]),
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
True],
[NO_DIRECTORY_FILENAME_LIST["dir1_name"],
[NO_DIRECTORY_FILENAME_LIST["testfile1_name"],
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"])],
True],
# problem 2 in issue #66
[NO_DIRECTORY_FILENAME_LIST["dir1_name"],
[NO_DIRECTORY_FILENAME_LIST["testfile1_name"],
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"])],
True],
# not normalized path
['{}//{}//../'.format(
NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
[NO_DIRECTORY_FILENAME_LIST["testfile1_name"],
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"])],
True],
# not normalized path root
['{}//..//'.format(NO_DIRECTORY_FILENAME_LIST["dir2_name"]),
[os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile3_name"]),
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
True],
# not normalized path beyond root
['//..//',
[os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile3_name"]),
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
True],
# not normalized path beyond root
['{}//..//../'.format(NO_DIRECTORY_FILENAME_LIST["dir2_name"]),
[os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile3_name"]),
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
True],
# starting with slash
['/',
[os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile2_name"]),
os.path.join(NO_DIRECTORY_FILENAME_LIST["dir3_name"],
NO_DIRECTORY_FILENAME_LIST["testfile3_name"]),
NO_DIRECTORY_FILENAME_LIST["testfile4_name"]],
True]
])
def test_list(self, path_or_prefix, expected_list, recursive):
with local.open_zip(self.zip_file_name) as z:
zip_generator = z.list(path_or_prefix, recursive=recursive)
zip_list = list(zip_generator)
self.assertEqual(sorted(expected_list),
sorted(zip_list))
@parameterized.expand([
# non_exist_file
['does_not_exist', FileNotFoundError],
# not exist but share the prefix
['t', FileNotFoundError],
# broken path
['{}//t/'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"]),
FileNotFoundError],
# list a file
['{}//{}///'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
NotADirectoryError],
# list a non_exist_dir but share the surfix
['{}/'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"][:-1]),
FileNotFoundError]
])
def test_list_with_errors(self, path_or_prefix, error):
with local.open_zip(self.zip_file_name) as z:
with self.assertRaises(error):
list(z.list(path_or_prefix))
with self.assertRaises(error):
list(z.list(path_or_prefix, recursive=True))
@parameterized.expand([
# path ends with slash
['{}//'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"]), True],
# not normalized path
['{}//{}'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
False],
['{}//..//{}/{}'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["dir2_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
False],
# problem 2 in issue #66
[NO_DIRECTORY_FILENAME_LIST["dir1_name"], True],
# not normalized path
['{}//{}//../'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"],
NO_DIRECTORY_FILENAME_LIST["testfile1_name"]),
True],
# not normalized path root
['{}//..//'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"]), False],
# not normalized path beyond root
['//..//', False],
# not normalized path beyond root
['{}//..//'.format(NO_DIRECTORY_FILENAME_LIST["dir1_name"]), False],
# starting with slash
['/', False]
])
def test_isdir(self, path_or_prefix, expected):
with local.open_zip(self.zip_file_name) as z:
self.assertEqual(z.isdir(path_or_prefix),
expected)
@parameterized.expand([
["does_not_exist"],
["does_not_exist/"],
["does/not/exist"]
])
def test_isdir_not_exist(self, dir):
with local.open_zip(self.zip_file_name) as z:
self.assertFalse(z.isdir(dir))
| 41.377647
| 79
| 0.602286
| 4,114
| 35,171
| 4.784638
| 0.062713
| 0.13229
| 0.085349
| 0.108108
| 0.828795
| 0.806594
| 0.784698
| 0.746037
| 0.717639
| 0.699858
| 0
| 0.01248
| 0.282335
| 35,171
| 849
| 80
| 41.426384
| 0.767363
| 0.07654
| 0
| 0.622084
| 0
| 0
| 0.125305
| 0
| 0
| 0
| 0
| 0
| 0.07465
| 1
| 0.054432
| false
| 0
| 0.023328
| 0
| 0.082426
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c4e7a2a2ac1c872fead87410405f22d1617f105a
| 74
|
py
|
Python
|
src/utils/callbacks/__init__.py
|
kryvokhyzha/bert-for-ukranian-ner
|
48da40f09cb216ad51a97c303998157858fbe8bc
|
[
"MIT"
] | null | null | null |
src/utils/callbacks/__init__.py
|
kryvokhyzha/bert-for-ukranian-ner
|
48da40f09cb216ad51a97c303998157858fbe8bc
|
[
"MIT"
] | null | null | null |
src/utils/callbacks/__init__.py
|
kryvokhyzha/bert-for-ukranian-ner
|
48da40f09cb216ad51a97c303998157858fbe8bc
|
[
"MIT"
] | null | null | null |
from utils.callbacks.AccuracyCallbackCustom import AccuracyCallbackCustom
| 37
| 73
| 0.918919
| 6
| 74
| 11.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 74
| 1
| 74
| 74
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4848054099bf561beeffed4aa75a0a6768d7dbb6
| 36
|
py
|
Python
|
tests/test_mathformsmk.py
|
KarlosMuradyan/mathformsmk
|
de916750bfcdff3c7a1f226a66ddaed7b2e9b92f
|
[
"MIT"
] | null | null | null |
tests/test_mathformsmk.py
|
KarlosMuradyan/mathformsmk
|
de916750bfcdff3c7a1f226a66ddaed7b2e9b92f
|
[
"MIT"
] | null | null | null |
tests/test_mathformsmk.py
|
KarlosMuradyan/mathformsmk
|
de916750bfcdff3c7a1f226a66ddaed7b2e9b92f
|
[
"MIT"
] | null | null | null |
from mathformsmk import mathformsmk
| 18
| 35
| 0.888889
| 4
| 36
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4863e39ebc09e78ccd37269574e1238fe9bab7fb
| 3,471
|
py
|
Python
|
tests/admin/test_user_phones.py
|
joshua-cerniglia/duo_client_python
|
9d91cdd505d9ed999f8d79305181587d626186bd
|
[
"Apache-2.0"
] | 96
|
2015-01-02T08:03:29.000Z
|
2022-03-28T13:31:39.000Z
|
tests/admin/test_user_phones.py
|
joshua-cerniglia/duo_client_python
|
9d91cdd505d9ed999f8d79305181587d626186bd
|
[
"Apache-2.0"
] | 87
|
2015-05-12T02:44:33.000Z
|
2022-01-20T05:53:27.000Z
|
tests/admin/test_user_phones.py
|
joshua-cerniglia/duo_client_python
|
9d91cdd505d9ed999f8d79305181587d626186bd
|
[
"Apache-2.0"
] | 110
|
2015-03-03T20:23:42.000Z
|
2021-12-16T23:01:29.000Z
|
from .. import util
import duo_client.admin
from .base import TestAdmin
class TestUserPhones(TestAdmin):
def test_get_user_phones_iterator(self):
"""Test to get phones iterator by user id
"""
iterator = self.client_list.get_user_phones_iterator(
'DU012345678901234567')
response = next(iterator)
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/phones')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_phones(self):
"""Test to get phones by user id
"""
response = self.client_list.get_user_phones('DU012345678901234567')[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/phones')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_phones_with_offset(self):
"""Test to get phones by user id with pagination params
"""
response = self.client_list.get_user_phones(
'DU012345678901234567', offset=30)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/phones')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_phones_with_limit(self):
"""Test to get phones by user id with pagination params
"""
response = self.client_list.get_user_phones(
'DU012345678901234567', limit=10)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/phones')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['10'],
'offset': ['0'],
})
def test_get_user_phones_with_limit_and_offset(self):
"""Test to get phones by user id with pagination params
"""
response = self.client_list.get_user_phones(
'DU012345678901234567', limit=10, offset=30)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/phones')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['10'],
'offset': ['30'],
})
if __name__ == '__main':
unittest.main()
| 38.566667
| 78
| 0.519447
| 334
| 3,471
| 5.197605
| 0.140719
| 0.129608
| 0.074885
| 0.040323
| 0.889401
| 0.866935
| 0.851382
| 0.851382
| 0.804147
| 0.783986
| 0
| 0.096128
| 0.352636
| 3,471
| 89
| 79
| 39
| 0.676458
| 0.075771
| 0
| 0.636364
| 0
| 0
| 0.160731
| 0.067759
| 0
| 0
| 0
| 0
| 0.227273
| 1
| 0.075758
| false
| 0
| 0.045455
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4886dc933255d27840a44f9c2fc19a05adc03c51
| 22
|
py
|
Python
|
hello-scripts/harrison-mitchell.py
|
StuWares/Hacktoberfest2018
|
fb1efa15c37cd03bb9da89981aa26d152414a273
|
[
"MIT"
] | null | null | null |
hello-scripts/harrison-mitchell.py
|
StuWares/Hacktoberfest2018
|
fb1efa15c37cd03bb9da89981aa26d152414a273
|
[
"MIT"
] | null | null | null |
hello-scripts/harrison-mitchell.py
|
StuWares/Hacktoberfest2018
|
fb1efa15c37cd03bb9da89981aa26d152414a273
|
[
"MIT"
] | null | null | null |
print("He"+"l"*2+"o")
| 11
| 21
| 0.454545
| 5
| 22
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0.045455
| 22
| 1
| 22
| 22
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
6ff884019d0c956fdc3e80e0cc76347601fa2f55
| 11,100
|
py
|
Python
|
boardfarm/lib/gui_helper.py
|
superice119/boardfarm
|
c525b4da94bf745d30c4a9f675aa4a7ae184b1fd
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
boardfarm/lib/gui_helper.py
|
superice119/boardfarm
|
c525b4da94bf745d30c4a9f675aa4a7ae184b1fd
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
boardfarm/lib/gui_helper.py
|
superice119/boardfarm
|
c525b4da94bf745d30c4a9f675aa4a7ae184b1fd
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# Copyright (c) 2018
#
# All rights reserved.
#
# This file is distributed under the Clear BSD license.
# The full text can be found in LICENSE in the root directory.
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import time
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.select import Select
def enter_input(web_gui, input_path, input_value):
"""To enter the text box value in web page
:param web_gui: web driver after initailizing page
:type web_gui : string
:param input_path : element id of particular box
:type input_path : string
:param input_value : text box value to be enter
:type input_value : string
:raises Exception : If error thrown returns False
:return: True
:rtype: boolean
"""
try:
input_tab = web_gui.find_element_by_id(input_path)
input_tab.clear()
input_tab.send_keys(input_value)
return True
except NoSuchElementException:
return False
def click_button_id(web_gui, clickbutton):
"""To click the button using the element id
:param web_gui: web driver after initailizing page
:type web_gui : string
:param clickbutton : web element id of the button
:type clickbutton : string
:raises Exception : If error thrown returns False
:return: True
:rtype: boolean
"""
try:
click_tab = web_gui.find_element_by_id(clickbutton)
click_tab.click()
time.sleep(5)
return True
except NoSuchElementException:
return False
def click_button_xpath(web_gui, clickbutton):
"""To click the page button using the xpath
:param web_gui: web driver after initailizing page
:type web_gui : string
:param clickbutton : web element id of the button
:type clickbutton : string
:raises Exception : If error thrown returns False
:return: True
:rtype: boolean
"""
try:
click_tab = web_gui.find_element_by_xpath(clickbutton)
click_tab.click()
time.sleep(5)
return True
except NoSuchElementException:
return False
def select_option_by_id(web_gui, select_button, select_value):
"""To select the option from drop down using id
:param web_gui: web driver after initailizing page
:type web_gui : string
:param select_button : web element id of drop down
:type select_button : string
:param select_value : value to be selected
:type select_value : string
:raises Exception : If error thrown returns False
:return : value to be chosen
:rtype : string
"""
try:
select = Select(web_gui.find_element_by_id(select_button))
select.select_by_visible_text(select_value)
time.sleep(5)
return select
except NoSuchElementException:
return None
def select_option_by_name(web_gui, select_button, select_value):
"""To select the option from drop down using element name
:param web_gui: web driver after initailizing page
:type web_gui : string
:param select_button : web element id of drop down
:type select_button : string
:param select_value : value to be selected
:type select_value : string
:raises Exception : If error thrown returns None
:return : value to be chosen
:rtype : string
"""
try:
select = Select(web_gui.find_element_by_name(select_button))
select.select_by_visible_text(select_value)
time.sleep(5)
return select
except NoSuchElementException:
return None
def select_option_by_xpath(web_gui, select_button, select_value):
"""To select the option from drop down using xpath
:param web_gui: web driver after initailizing page
:type web_gui : string
:param select_button : web element id of drop down
:type select_button : string
:param select_value : value to be selected
:type select_value : string
:raises Exception : If error thrown returns None
:return : value to be chosen
:rtype : string
"""
try:
select = Select(web_gui.find_element_by_xpath(select_button))
select.select_by_visible_text(select_value)
time.sleep(5)
return select
except NoSuchElementException:
return None
def get_drop_down_value(web_gui, get_value):
"""To get the drop down value using id
:param web_gui: web driver after initailizing page
:type web_gui : string
:param get_value : text value to check whether it exists in drop down
:type get_value : string
:raises Exception : If error thrown returns None
:return : value to be chosen
:rtype : string
"""
try:
select = Select(web_gui.find_element_by_id(get_value))
selected_option = select.first_selected_option
selected_value = selected_option.text
return selected_value
except NoSuchElementException:
return None
def get_radio_button_value(web_gui, get_value):
"""To get the radio button status whether chosen or not
:param web_gui: web driver after initailizing page
:type web_gui : string
:param get_value : web element id for the radio button
:type get_value : string
:raises Exception : If error thrown returns None
:return : True or False
:rtype : boolean
"""
try:
radio_button = web_gui.find_elements_by_id(get_value)
for radiobutton in radio_button:
radio = radiobutton.get_attribute('src')
if "radio-box-checked" in radio:
return True
else:
return False
except NoSuchElementException:
return None
def get_text_value(web_gui, get_value):
"""To get the radio button status whether chosen or not
:param web_gui: web driver after initailizing page
:type web_gui : string
:param get_value : web element id for the radio button
:type get_value : string
:raises Exception : If error thrown returns None
:return : True or False
:rtype : boolean
"""
try:
text_button = web_gui.find_element_by_id(get_value)
text_value = text_button.text
return text_value
except NoSuchElementException:
return None
def get_text_value_by_xpath(web_gui, get_value):
"""To get the text box value using xpath
:param web_gui: web driver after initailizing page
:type web_gui : string
:param get_value : web element xpath for the text box
:type get_value : string
:raises Exception : If error thrown returns None
:return : text box value for required element
:rtype : string or boolean
"""
try:
text_button = web_gui.find_element_by_xpath(get_value)
text_value = text_button.text
return text_value
except NoSuchElementException:
return None
def get_value_from_disabled_input(web_gui, get_value):
"""To get the value for diabled element
:param web_gui: web driver after initailizing page
:type web_gui : string
:param get_value : web element id for required input
:type get_value : string
:raises Exception : If error thrown returns None
:return : text value for required element
:rtype : string
"""
js = "return document.getElementById(\"{!s}\").value;".format(
str(get_value))
text_value = web_gui.execute_script(js)
return str(text_value)
def get_icon_check_value_by_id(web_gui, get_value):
"""To get the icon button status whether chosen or not using id
:param web_gui: web driver after initailizing page
:type web_gui : string
:param get_value : web element id for the icon button
:type get_value : string
:raises Exception : If error thrown returns None
:return : True if icon button selected else false
:rtype : boolean
"""
try:
icon_button = web_gui.find_elements_by_id(get_value)
for iconbutton in icon_button:
icon = iconbutton.get_attribute('src')
if "icon-check.svg" in icon:
return True
else:
return False
except NoSuchElementException:
return None
def get_icon_check_value_by_xpath(web_gui, get_value):
"""To get the icon button status whether chosen or not using xpath
:param web_gui: web driver after initailizing page
:type web_gui : string
:param get_value : web element xpath for the icon button
:type get_value : string
:raises Exception : If error thrown returns None
:return : True if icon button selected else false
:rtype : boolean
"""
try:
icon_button = web_gui.find_elements_by_xpath(get_value)
for iconbutton in icon_button:
icon = iconbutton.get_attribute('src')
if "icon-check.svg" in icon:
return True
else:
return False
except NoSuchElementException:
return None
def check_element_is_enable_by_id(web_gui, check_value):
"""To get the enabled text button value using id
:param web_gui: web driver after initailizing page
:type web_gui : string
:param get_value : web element id for the enabled element
:type get_value : string
:raises Exception : If error thrown returns None
:return : enabled text button value
:rtype : string or boolean
"""
try:
text_button = web_gui.find_element_by_id(check_value)
text_value = text_button.is_enabled()
return text_value
except NoSuchElementException:
return None
def get_check_box_value_by_id(web_gui, get_value):
"""To get the check box whether chosen or not using id
:param web_gui: web driver after initailizing page
:type web_gui : string
:param get_value : web element id for the check box
:type get_value : string
:raises Exception : If error thrown returns None
:return : true or false based on check box status
:rtype : bool
"""
try:
box_button = web_gui.find_elements_by_id(get_value)
for boxbutton in box_button:
box = boxbutton.get_attribute('src')
if "check-box-checked.png" in box or 'radio-box-checked.png' in box:
return True
else:
return False
except NoSuchElementException:
return None
def get_check_box_value_by_xpath(web_gui, get_value):
"""To get the check box whether chosen or not using xpath
:param web_gui: web driver after initailizing page
:type web_gui : string
:param get_value : web element xpath for the check box
:type get_value : string
:raises Exception : If error thrown returns None
:return : true or false based on check box status
:rtype : boolean
"""
try:
box_button = web_gui.find_elements_by_xpath(get_value)
for boxbutton in box_button:
box = boxbutton.get_attribute('src')
if "check-box-checked.png" in box or 'radio-box-checked.png' in box:
return True
else:
return False
except NoSuchElementException:
return None
| 31.714286
| 80
| 0.679369
| 1,512
| 11,100
| 4.795635
| 0.08664
| 0.052958
| 0.024273
| 0.030892
| 0.845263
| 0.841953
| 0.820852
| 0.807475
| 0.79989
| 0.787616
| 0
| 0.001461
| 0.25991
| 11,100
| 349
| 81
| 31.805158
| 0.881193
| 0.481261
| 0
| 0.664286
| 0
| 0
| 0.036001
| 0.021327
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.021429
| 0
| 0.392857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6ff9c2509a147ce16db92ea206c57b895d473d04
| 235
|
py
|
Python
|
plots/model_explorer/plotters/bar_plot/date_distribution/__init__.py
|
ZviBaratz/pylabber
|
35337284f3d0615249f642743b993b7dad407390
|
[
"Apache-2.0"
] | 3
|
2020-08-28T21:33:07.000Z
|
2021-07-19T17:52:17.000Z
|
plots/model_explorer/plotters/bar_plot/date_distribution/__init__.py
|
TheLabbingProject/pylabber
|
27d6073e7bde871c16912a8ea5e0e389711bbd9f
|
[
"Apache-2.0"
] | 74
|
2019-09-04T11:40:16.000Z
|
2022-01-03T19:43:04.000Z
|
plots/model_explorer/plotters/bar_plot/date_distribution/__init__.py
|
ZviBaratz/pylabber
|
35337284f3d0615249f642743b993b7dad407390
|
[
"Apache-2.0"
] | 3
|
2019-05-07T07:09:05.000Z
|
2019-08-30T15:40:47.000Z
|
from plots.model_explorer.plotters.bar_plot.date_distribution.configuration import (
DateDistributionConfiguration,
)
from plots.model_explorer.plotters.bar_plot.date_distribution.date_distribution import (
DateDistribution,
)
| 33.571429
| 88
| 0.846809
| 25
| 235
| 7.68
| 0.52
| 0.25
| 0.145833
| 0.229167
| 0.552083
| 0.552083
| 0.552083
| 0.552083
| 0.552083
| 0
| 0
| 0
| 0.085106
| 235
| 6
| 89
| 39.166667
| 0.893023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
82f00f758a329d238b5910d192e23413353d4959
| 199
|
py
|
Python
|
src/utils/io.py
|
shirin1996/PyBot
|
4676ccca6b47fce4d3f20a7e158ea9278eb1b508
|
[
"MIT"
] | 1
|
2022-01-30T20:27:31.000Z
|
2022-01-30T20:27:31.000Z
|
src/utils/io.py
|
shirinyamani/MsgBot
|
0b95cea203ff97d631fba7cbf48c23c76f7d91e9
|
[
"MIT"
] | null | null | null |
src/utils/io.py
|
shirinyamani/MsgBot
|
0b95cea203ff97d631fba7cbf48c23c76f7d91e9
|
[
"MIT"
] | null | null | null |
import json
def read_json(file_name):
with open(file_name, 'r') as f:
return json.load(f)
def write_json(data, file_name):
with open(file_name, 'w') as f:
json.dump(data, f)
| 22.111111
| 35
| 0.638191
| 35
| 199
| 3.457143
| 0.485714
| 0.264463
| 0.198347
| 0.264463
| 0.396694
| 0.396694
| 0
| 0
| 0
| 0
| 0
| 0
| 0.231156
| 199
| 9
| 36
| 22.111111
| 0.79085
| 0
| 0
| 0
| 0
| 0
| 0.01
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
d204008f3a1e70dea11c0be776f29fb0236fb655
| 4,218
|
py
|
Python
|
test/integration/remote_plugins/plugins/system_registration_test.py
|
scott-taubman/beer-garden
|
bac825849f7791e14064942566fbec63a83e6f87
|
[
"MIT"
] | 230
|
2018-02-03T01:33:45.000Z
|
2022-02-20T22:07:25.000Z
|
test/integration/remote_plugins/plugins/system_registration_test.py
|
scott-taubman/beer-garden
|
bac825849f7791e14064942566fbec63a83e6f87
|
[
"MIT"
] | 961
|
2018-02-06T11:22:40.000Z
|
2022-03-24T15:22:33.000Z
|
test/integration/remote_plugins/plugins/system_registration_test.py
|
scott-taubman/beer-garden
|
bac825849f7791e14064942566fbec63a83e6f87
|
[
"MIT"
] | 33
|
2018-02-04T18:00:07.000Z
|
2021-12-15T13:07:22.000Z
|
import pytest
from brewtils.errors import ValidationError
try:
from helper import delete_plugins
from helper.assertion import assert_system_running
from helper.plugin import (
TestPluginV1,
TestPluginV1BetterDescriptions,
TestPluginV2,
create_plugin,
start_plugin,
stop_plugin,
)
except (ImportError, ValueError):
from ...helper import delete_plugins
from ...helper.assertion import assert_system_running
from ...helper.plugin import (
TestPluginV1,
TestPluginV1BetterDescriptions,
TestPluginV2,
create_plugin,
start_plugin,
stop_plugin,
)
@pytest.mark.usefixtures("easy_client")
class TestSystemRegistration(object):
@pytest.fixture(autouse=True)
def delete_test_plugin(self):
"""Ensure there are no "test" plugins before or after the test"""
delete_plugins(self.easy_client, "test")
yield
delete_plugins(self.easy_client, "test")
def test_system_register_successful(self):
plugin = create_plugin("test", "1.0.0", TestPluginV1)
start_plugin(plugin, self.easy_client)
assert_system_running(self.easy_client, "test", "1.0.0")
stop_plugin(plugin)
def test_system_register_update_data(self):
# Register the standard plugin, then stop it
plugin = create_plugin("test", "1.0.0", TestPluginV1)
start_plugin(plugin, self.easy_client)
assert_system_running(self.easy_client, "test", "1.0.0")
stop_plugin(plugin)
# Now create the new plugin and register that one
plugin = create_plugin(
"test",
"1.0.0",
TestPluginV1BetterDescriptions,
description="A better description",
metadata={"foo": "bar"},
icon_name="fa-coffee",
display_name="new_display_name",
)
start_plugin(plugin, self.easy_client)
assert_system_running(
self.easy_client,
"test",
"1.0.0",
system={
"description": "A better description",
"metadata": {"foo": "bar"},
"icon_name": "fa-coffee",
"display_name": "new_display_name",
},
)
stop_plugin(plugin)
def test_system_register_dev_different_commands(self):
# Register the standard plugin, then stop it
plugin = create_plugin("test", "1.0.0.dev", TestPluginV1)
start_plugin(plugin, self.easy_client)
assert_system_running(self.easy_client, "test", "1.0.0.dev")
stop_plugin(plugin)
# Now create the new plugin and register that one
plugin = create_plugin("test", "1.0.0.dev", TestPluginV2)
start_plugin(plugin, self.easy_client)
assert_system_running(self.easy_client, "test", "1.0.0.dev")
stop_plugin(plugin)
def test_system_register_different_commands_should_fail(self):
plugin = create_plugin("test", "1.0.0", TestPluginV1)
start_plugin(plugin, self.easy_client)
assert_system_running(self.easy_client, "test", "1.0.0")
stop_plugin(plugin)
plugin = create_plugin("test", "1.0.0", TestPluginV2)
with pytest.raises(ValidationError):
self.easy_client.create_system(plugin.system)
def test_system_register_different_versions(self):
plugin = create_plugin("test", "1.0.0", TestPluginV1)
start_plugin(plugin, self.easy_client)
assert_system_running(self.easy_client, "test", "1.0.0")
plugin = create_plugin("test", "2.0.0", TestPluginV2)
start_plugin(plugin, self.easy_client)
assert_system_running(self.easy_client, "test", "1.0.0")
assert_system_running(self.easy_client, "test", "2.0.0")
def test_system_register_same_instance_name(self):
plugin = create_plugin("test", "1.0.0", TestPluginV1)
start_plugin(plugin, self.easy_client)
assert_system_running(self.easy_client, "test", "1.0.0")
plugin = create_plugin("test", "1.0.0", TestPluginV1)
with pytest.raises(ValidationError):
self.easy_client.create_system(plugin.system)
| 36.678261
| 73
| 0.64367
| 498
| 4,218
| 5.204819
| 0.164659
| 0.092593
| 0.124228
| 0.051312
| 0.837191
| 0.822145
| 0.798225
| 0.743827
| 0.735725
| 0.735725
| 0
| 0.025229
| 0.248222
| 4,218
| 114
| 74
| 37
| 0.792179
| 0.057373
| 0
| 0.547368
| 0
| 0
| 0.092261
| 0
| 0
| 0
| 0
| 0
| 0.126316
| 1
| 0.073684
| false
| 0
| 0.094737
| 0
| 0.178947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d21ea368193945127c11d948d6e43cd9c4c55fac
| 22
|
py
|
Python
|
xlibris/__init__.py
|
konsbn/xlibris
|
d6ae33cd58212db3160b22128af9f209921f6205
|
[
"MIT"
] | 9
|
2016-01-12T18:56:19.000Z
|
2021-09-24T16:08:14.000Z
|
xlibris/__init__.py
|
konsbn/xlibris
|
d6ae33cd58212db3160b22128af9f209921f6205
|
[
"MIT"
] | null | null | null |
xlibris/__init__.py
|
konsbn/xlibris
|
d6ae33cd58212db3160b22128af9f209921f6205
|
[
"MIT"
] | 1
|
2021-09-21T21:37:11.000Z
|
2021-09-21T21:37:11.000Z
|
from xlibris import *
| 11
| 21
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d243175cd6681cc44ab0a359d42a5ecadb9c3acb
| 9,900
|
py
|
Python
|
src/tests/control/test_teams.py
|
upsidedownpancake/pretix
|
bfeeb1028c9eccab4936029db7c38edd4cd5aad5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/tests/control/test_teams.py
|
upsidedownpancake/pretix
|
bfeeb1028c9eccab4936029db7c38edd4cd5aad5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/tests/control/test_teams.py
|
upsidedownpancake/pretix
|
bfeeb1028c9eccab4936029db7c38edd4cd5aad5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import pytest
from django.core import mail as djmail
from django.utils.timezone import now
from pretix.base.models import Event, Organizer, Team, User
@pytest.fixture
def organizer():
return Organizer.objects.create(name='Dummy', slug='dummy')
@pytest.fixture
def event(organizer):
event = Event.objects.create(
organizer=organizer, name='Dummy', slug='dummy',
date_from=now()
)
return event
@pytest.fixture
def admin_team(organizer):
return Team.objects.create(organizer=organizer, can_change_teams=True, name='Admin team')
@pytest.fixture
def admin_user(admin_team):
u = User.objects.create_user('dummy@dummy.dummy', 'dummy')
admin_team.members.add(u)
return u
@pytest.mark.django_db
def test_list_of_teams(event, admin_user, client):
client.login(email='dummy@dummy.dummy', password='dummy')
resp = client.get('/control/organizer/dummy/teams')
assert 'Admin team' in resp.rendered_content
@pytest.mark.django_db
def test_team_detail_view(event, admin_user, admin_team, client):
client.login(email='dummy@dummy.dummy', password='dummy')
resp = client.get('/control/organizer/dummy/team/{}/'.format(admin_team.pk))
assert 'Admin team' in resp.rendered_content
assert admin_user.email in resp.rendered_content
@pytest.mark.django_db
def test_team_add_user(event, admin_user, admin_team, client):
client.login(email='dummy@dummy.dummy', password='dummy')
u = User.objects.create_user('dummy2@dummy.dummy', 'dummy')
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'user': u.email
}, follow=True)
assert 'Admin team' in resp.rendered_content
assert admin_user.email in resp.rendered_content
assert u.email in resp.rendered_content
assert u in admin_team.members.all()
@pytest.mark.django_db
def test_team_create_invite(event, admin_user, admin_team, client):
client.login(email='dummy@dummy.dummy', password='dummy')
djmail.outbox = []
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'user': 'foo@example.org'
}, follow=True)
assert 'Admin team' in resp.rendered_content
assert admin_user.email in resp.rendered_content
assert 'foo@example.org' in resp.rendered_content
assert admin_team.invites.first().email == 'foo@example.org'
assert len(djmail.outbox) == 1
@pytest.mark.django_db
def test_team_create_token(event, admin_user, admin_team, client):
client.login(email='dummy@dummy.dummy', password='dummy')
djmail.outbox = []
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'name': 'Test token'
}, follow=True)
assert 'Test token' in resp.rendered_content
assert admin_team.tokens.first().name == 'Test token'
assert admin_team.tokens.first().token in resp.rendered_content
@pytest.mark.django_db
def test_team_remove_token(event, admin_user, admin_team, client):
client.login(email='dummy@dummy.dummy', password='dummy')
tk = admin_team.tokens.create(name='Test token')
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'remove-token': str(tk.pk)
}, follow=True)
assert tk.token not in resp.rendered_content
assert 'Test token' in resp.rendered_content
tk.refresh_from_db()
assert not tk.active
@pytest.mark.django_db
def test_team_revoke_invite(event, admin_user, admin_team, client):
client.login(email='dummy@dummy.dummy', password='dummy')
inv = admin_team.invites.create(email='foo@example.org')
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'remove-invite': str(inv.pk)
}, follow=True)
assert 'Admin team' in resp.rendered_content
assert admin_user.email in resp.rendered_content
assert not admin_team.invites.exists()
@pytest.mark.django_db
def test_team_remove_user(event, admin_user, admin_team, client):
client.login(email='dummy@dummy.dummy', password='dummy')
u = User.objects.create_user('dummy2@dummy.dummy', 'dummy')
admin_team.members.add(u)
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'remove-member': u.pk
}, follow=True)
assert 'Admin team' in resp.rendered_content
assert admin_user.email in resp.rendered_content
assert u not in admin_team.members.all()
@pytest.mark.django_db
def test_team_remove_last_admin(event, admin_user, admin_team, client):
client.login(email='dummy@dummy.dummy', password='dummy')
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'remove-member': admin_user.pk
}, follow=True)
assert 'alert-danger' in resp.rendered_content
assert admin_user in admin_team.members.all()
t2 = Team.objects.create(organizer=event.organizer, name='Admin team 2')
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'remove-member': admin_user.pk
}, follow=True)
assert 'alert-danger' in resp.rendered_content
assert admin_user in admin_team.members.all()
t2.members.add(admin_user)
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'remove-member': admin_user.pk
}, follow=True)
assert 'alert-danger' in resp.rendered_content
assert admin_user in admin_team.members.all()
t2.can_change_teams = True
t2.save()
resp = client.post('/control/organizer/dummy/team/{}/'.format(admin_team.pk), {
'remove-member': admin_user.pk
}, follow=True)
assert 'alert-danger' not in resp.rendered_content
assert admin_user not in admin_team.members.all()
@pytest.mark.django_db
def test_create_team(event, admin_user, admin_team, client):
client.login(email='dummy@dummy.dummy', password='dummy')
client.post('/control/organizer/dummy/team/add', {
'name': 'Foo',
'can_create_events': 'on',
'limit_events': str(event.pk),
'can_change_event_settings': 'on'
}, follow=True)
t = Team.objects.last()
assert t.can_change_event_settings
assert t.can_create_events
assert not t.can_change_organizer_settings
assert list(t.limit_events.all()) == [event]
assert list(t.members.all()) == [admin_user]
@pytest.mark.django_db
def test_update_team(event, admin_user, admin_team, client):
client.login(email='dummy@dummy.dummy', password='dummy')
client.post('/control/organizer/dummy/team/{}/edit'.format(admin_team.pk), {
'name': 'Admin',
'can_change_teams': 'on',
'limit_events': str(event.pk),
'can_change_event_settings': 'on'
}, follow=True)
admin_team.refresh_from_db()
assert admin_team.can_change_event_settings
assert not admin_team.can_change_organizer_settings
assert list(admin_team.limit_events.all()) == [event]
@pytest.mark.django_db
def test_update_last_team_to_be_no_admin(event, admin_user, admin_team, client):
client.login(email='dummy@dummy.dummy', password='dummy')
resp = client.post('/control/organizer/dummy/team/{}/edit'.format(admin_team.pk), {
'name': 'Admin',
'can_change_event_settings': 'on'
}, follow=True)
assert 'alert-danger' in resp.rendered_content
@pytest.mark.django_db
def test_remove_team(event, admin_user, admin_team, client):
client.login(email='dummy@dummy.dummy', password='dummy')
t2 = Team.objects.create(organizer=event.organizer, name='Admin team 2')
resp = client.post('/control/organizer/dummy/team/{}/delete'.format(t2.pk), {}, follow=True)
assert Team.objects.count() == 1
assert 'alert-success' in resp.rendered_content
@pytest.mark.django_db
def test_remove_last_admin_team(event, admin_user, admin_team, client):
client.login(email='dummy@dummy.dummy', password='dummy')
resp = client.post('/control/organizer/dummy/team/{}/delete'.format(admin_team.pk), {}, follow=True)
assert Team.objects.count() == 1
assert 'alert-danger' in resp.rendered_content
@pytest.mark.django_db
def test_invite_invalid_token(event, admin_team, client):
i = admin_team.invites.create(email='foo@bar.com')
resp = client.get('/control/invite/foo{}bar'.format(i.token), follow=True)
assert b'alert-danger' in resp.content
assert b'invalid link' in resp.content
@pytest.mark.django_db
def test_invite_existing_team_member(event, admin_team, client):
u = User.objects.create_user('dummy2@dummy.dummy', 'dummy')
admin_team.members.add(u)
client.login(email='dummy2@dummy.dummy', password='dummy')
i = admin_team.invites.create(email='foo@bar.com')
resp = client.get('/control/invite/{}'.format(i.token), follow=True)
assert b'alert-danger' in resp.content
assert b'already are part of' in resp.content
@pytest.mark.django_db
def test_invite_authenticated(event, admin_team, client):
u = User.objects.create_user('dummy2@dummy.dummy', 'dummy')
client.login(email='dummy2@dummy.dummy', password='dummy')
i = admin_team.invites.create(email='foo@bar.com')
resp = client.get('/control/invite/{}'.format(i.token), follow=True)
assert b'alert-success' in resp.content
assert u in admin_team.members.all()
assert not admin_team.invites.exists()
@pytest.mark.django_db
def test_invite_new_user(event, admin_team, client):
i = admin_team.invites.create(email='foo@bar.com')
resp = client.get('/control/invite/{}'.format(i.token), follow=True)
assert b'<form' in resp.content
resp = client.post('/control/invite/{}'.format(i.token), {
'email': 'dummy@example.org',
'password': 'asdsdgfgjh',
'password_repeat': 'asdsdgfgjh'
}, follow=True)
assert b'alert-success' in resp.content
assert admin_team.members.filter(email='dummy@example.org').exists()
assert not admin_team.invites.exists()
| 36.397059
| 104
| 0.710707
| 1,397
| 9,900
| 4.868289
| 0.082319
| 0.092633
| 0.049405
| 0.074107
| 0.812381
| 0.790472
| 0.76327
| 0.718424
| 0.694751
| 0.689752
| 0
| 0.002013
| 0.146869
| 9,900
| 271
| 105
| 36.531365
| 0.80322
| 0
| 0
| 0.582938
| 0
| 0
| 0.195152
| 0.068384
| 0
| 0
| 0
| 0
| 0.265403
| 1
| 0.104265
| false
| 0.085308
| 0.018957
| 0.009479
| 0.14218
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
9631f802745b75178fbc96c16fb234d1bdef2377
| 7,017
|
py
|
Python
|
math_recognition/decoder.py
|
bkkaggle/math-recognition
|
aecfb21656a7744945862f4b34520905e50f3ad1
|
[
"MIT"
] | 10
|
2019-12-27T04:35:42.000Z
|
2021-01-26T14:37:12.000Z
|
math_recognition/decoder.py
|
bilal2vec/math-recognition
|
aecfb21656a7744945862f4b34520905e50f3ad1
|
[
"MIT"
] | 1
|
2020-10-26T06:25:25.000Z
|
2020-10-31T01:31:27.000Z
|
math_recognition/decoder.py
|
bkkaggle/math-recognition
|
aecfb21656a7744945862f4b34520905e50f3ad1
|
[
"MIT"
] | 3
|
2020-02-11T06:22:15.000Z
|
2020-11-08T10:52:57.000Z
|
import os
import random
from typing import Dict, Tuple
from overrides import overrides
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import allennlp
from allennlp.common import Registrable, Params
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.token_embedders import Embedding
from math_recognition.attention import CaptioningAttention
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class CaptioningDecoder(nn.Module, Registrable):
def __init__(self, vocab: Vocabulary):
super(CaptioningDecoder, self).__init__()
self.vocab = vocab
def forward(self, x: torch.Tensor, h: torch.Tensor, c: torch.Tensor, predicted_indices: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError()
def get_output_dim(self) -> int:
raise NotImplementedError()
# Input dim is dim of h and c
def get_input_dim(self) -> int:
raise NotImplementedError()
@CaptioningDecoder.register('image-captioning')
class ImageCaptioningDecoder(CaptioningDecoder):
def __init__(self, vocab: Vocabulary, attention: CaptioningAttention, embedding_dim:int = 256, decoder_dim:int = 256):
super(ImageCaptioningDecoder, self).__init__(vocab=vocab)
self._vocab_size = self.vocab.get_vocab_size()
self._embedding_dim = embedding_dim
self._decoder_dim = decoder_dim
self._embedding = Embedding(self._vocab_size, self._embedding_dim)
self._attention = attention
self._decoder_cell = nn.LSTMCell(self._embedding.get_output_dim() + self._attention.get_output_dim(), self._decoder_dim)
self._linear = nn.Linear(self._decoder_dim, self._vocab_size)
@overrides
def forward(self, x: torch.Tensor, h: torch.Tensor, c: torch.Tensor, predicted_indices: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
# Shape: (batch_size, embedding_dim)
embedding = self._embedding(predicted_indices).float().view(-1, self._embedding_dim)
# Shape: (batch_size, encoder_dim) (batch_size, h * w, 1)
attention, attention_weights = self._attention(x, h)
## Change to not use teacher forcing all the time
# Shape: (batch_size, decoder_dim) (batch_size, decoder_dim)
h, c = self._decoder_cell(torch.cat([attention, embedding], dim=1), (h, c))
# Get output predictions (one per character in vocab)
# Shape: (batch_size, vocab_size)
preds = self._linear(h)
return h, c, preds, attention_weights
@overrides
def get_output_dim(self) -> int:
return self._vocab_size
@overrides
def get_input_dim(self) -> int:
return self._decoder_dim
@CaptioningDecoder.register('WAP')
class WAPDecoder(CaptioningDecoder):
def __init__(self, vocab: Vocabulary, attention: CaptioningAttention, embedding_dim:int = 256, decoder_dim:int = 256):
super(WAPDecoder, self).__init__(vocab=vocab)
self._vocab_size = self.vocab.get_vocab_size()
self._embedding_dim = embedding_dim
self._decoder_dim = decoder_dim
self._embedding = Embedding(self._vocab_size, self._embedding_dim)
self._attention = attention
self._decoder_cell = nn.GRUCell(self._embedding.get_output_dim() + self._attention.get_output_dim(), self._decoder_dim)
self._linear = nn.Linear(self._decoder_dim, self._vocab_size)
@overrides
def forward(self, x: torch.Tensor, h: torch.Tensor, predicted_indices: torch.Tensor, sum_attention_weights: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
# Shape: (batch_size, embedding_dim)
embedding = self._embedding(predicted_indices).float().view(-1, self._embedding_dim)
# Shape: (batch_size, encoder_dim) (batch_size, h * w, 1) (batch_size, h * w)
attention, attention_weights, sum_attention_weights = self._attention(x, h, sum_attention_weights)
## Change to not use teacher forcing all the time
# Shape: (batch_size, decoder_dim) (batch_size, decoder_dim)
h = self._decoder_cell(torch.cat([attention, embedding], dim=1), h)
# Get output predictions (one per character in vocab)
# Shape: (batch_size, vocab_size)
preds = self._linear(h)
return h, preds, attention_weights, sum_attention_weights
@overrides
def get_output_dim(self) -> int:
return self._vocab_size
@overrides
def get_input_dim(self) -> int:
return self._decoder_dim
@CaptioningDecoder.register('multiscale')
class MultiscaleDecoder(CaptioningDecoder):
def __init__(self, vocab: Vocabulary, attention: CaptioningAttention, embedding_dim: int = 256, decoder_dim:int = 256):
super(MultiscaleDecoder, self).__init__(vocab=vocab)
self._vocab_size = self.vocab.get_vocab_size()
self._embedding_dim = embedding_dim
self._decoder_dim = decoder_dim
self._embedding = Embedding(self._vocab_size, self._embedding_dim)
self._dropout = nn.Dropout(0.1)
# Output size of state cell must be decoder dim since state is transformed by the state cell
self._state_cell = nn.GRUCell(self._embedding.get_output_dim(), self._decoder_dim)
self._attention = attention
self._decoder_cell = nn.GRUCell(self._attention.get_output_dim(), self._decoder_dim)
self._linear = nn.Linear(self._decoder_dim, self._vocab_size)
@overrides
def forward(self, x: torch.Tensor, h: torch.Tensor, predicted_indices: torch.Tensor, sum_attention_weights_0: torch.Tensor, sum_attention_weights_1: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
# Shape: (batch_size, embedding_dim)
embedding = self._embedding(predicted_indices).float().view(-1, self._embedding_dim)
embedding = self._dropout(embedding)
# Shape: (batch_size, decoder_dim)
h = self._state_cell(embedding, h)
# Shape: (batch_size, encoder_dim) (batch_size, h * w, 1)
attention, attention_weights, sum_attention_weights_0, sum_attention_weights_1 = self._attention(x, h, sum_attention_weights_0, sum_attention_weights_1)
## Change to not use teacher forcing all the time
# Shape: (batch_size, decoder_dim) (batch_size, decoder_dim)
h = self._decoder_cell(attention, h)
# Get output predictions (one per character in vocab)
# Shape: (batch_size, vocab_size)
preds = self._linear(h)
return h, preds, attention_weights, sum_attention_weights_0, sum_attention_weights_1
@overrides
def get_output_dim(self) -> int:
return self._vocab_size
@overrides
def get_input_dim(self) -> int:
return self._decoder_dim
| 41.276471
| 232
| 0.699159
| 887
| 7,017
| 5.224352
| 0.126268
| 0.078334
| 0.039275
| 0.05697
| 0.804489
| 0.780967
| 0.753776
| 0.741692
| 0.73306
| 0.723997
| 0
| 0.006626
| 0.204218
| 7,017
| 170
| 233
| 41.276471
| 0.823245
| 0.144506
| 0
| 0.539216
| 0
| 0
| 0.006356
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.156863
| false
| 0
| 0.147059
| 0.058824
| 0.431373
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
96ab5d1f5e7c7653545ff148f0a826eb8b00be7e
| 17,542
|
py
|
Python
|
rest_api/simple_supply_rest_api/database.py
|
celio-jpeg/bev
|
2a7473a93885ac91d2aa32048dd760d5976934b8
|
[
"Apache-2.0"
] | 1
|
2020-10-27T15:28:40.000Z
|
2020-10-27T15:28:40.000Z
|
rest_api/simple_supply_rest_api/database.py
|
celio-jpeg/bev
|
2a7473a93885ac91d2aa32048dd760d5976934b8
|
[
"Apache-2.0"
] | null | null | null |
rest_api/simple_supply_rest_api/database.py
|
celio-jpeg/bev
|
2a7473a93885ac91d2aa32048dd760d5976934b8
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import logging
import aiopg
import psycopg2
from psycopg2.extras import RealDictCursor
LATEST_BLOCK_NUM = """
SELECT max(block_num) FROM blocks
"""
LOGGER = logging.getLogger(__name__)
class Database(object):
"""Manages connection to the postgres database and makes async queries
"""
def __init__(self, host, port, name, user, password, loop):
self._dsn = 'dbname={} user={} password={} host={} port={}'.format(
name, user, password, host, port)
self._loop = loop
self._conn = None
async def connect(self, retries=5, initial_delay=1, backoff=2):
"""Initializes a connection to the database
Args:
retries (int): Number of times to retry the connection
initial_delay (int): Number of seconds wait between reconnects
backoff (int): Multiplies the delay after each retry
"""
LOGGER.info('Connecting to database')
delay = initial_delay
for attempt in range(retries):
try:
self._conn = await aiopg.connect(
dsn=self._dsn, loop=self._loop, echo=True)
LOGGER.info('Successfully connected to database')
return
except psycopg2.OperationalError:
LOGGER.debug(
'Connection failed.'
' Retrying connection (%s retries remaining)',
retries - attempt)
await asyncio.sleep(delay)
delay *= backoff
self._conn = await aiopg.connect(
dsn=self._dsn, loop=self._loop, echo=True)
LOGGER.info('Successfully connected to database')
def disconnect(self):
"""Closes connection to the database
"""
self._conn.close()
async def fetch_current_elections_resources(self, voter_id, timestamp):
fetch_elections = """
SELECT e.*,v.name AS "admin_name",(SELECT vote_id FROM votes WHERE voter_id='{0}'
AND election_id=e.election_id LIMIT 1)
IS NOT NULL AS "voted"
FROM elections e JOIN voters v ON e.admin_id = v.voter_id
AND election_id IN (SELECT election_id FROM poll_registrations WHERE voter_id='{0}' AND status='1'
AND ({2}) >= start_block_num AND ({2}) < end_block_num)
AND start_timestamp <= {1}
AND end_timestamp >= {1}
AND e.status = '1'
AND ({2}) >= e.start_block_num
AND ({2}) < e.end_block_num
ORDER BY start_timestamp DESC;
""".format(voter_id, timestamp, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch_elections)
return await cursor.fetchall()
async def fetch_past_elections_resources(self, voter_id, timestamp):
fetch_elections = """
SELECT e.*,v.name AS "admin_name",(SELECT vote_id FROM votes WHERE voter_id='{0}'
AND election_id=e.election_id LIMIT 1)
IS NOT NULL AS "voted"
FROM elections e JOIN voters v ON e.admin_id = v.voter_id
AND election_id IN (SELECT election_id FROM poll_registrations WHERE voter_id='{0}' AND status='1'
AND ({2}) >= start_block_num AND ({2}) < end_block_num)
AND end_timestamp < {1}
AND e.status = '1'
AND ({2}) >= e.start_block_num
AND ({2}) < e.end_block_num
ORDER BY start_timestamp DESC;
""".format(voter_id, timestamp, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch_elections)
return await cursor.fetchall()
async def fetch_public_elections_resources(self, timestamp):
fetch_elections = """
SELECT *
FROM elections
WHERE start_timestamp <= {0}
AND end_timestamp >= {0}
AND status = '1'
AND results_permission = 'PUBLIC'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num
ORDER BY start_timestamp DESC;
""".format(timestamp, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch_elections)
return await cursor.fetchall()
async def fetch_public_past_elections_resources(self, voter_id, timestamp):
fetch_elections = """
SELECT e.*,v.name AS "admin_name",(SELECT vote_id FROM votes WHERE voter_id='{0}'
AND election_id=e.election_id LIMIT 1)
IS NOT NULL AS "voted"
FROM elections e JOIN voters v ON e.admin_id = v.voter_id
WHERE e.results_permission = 'PUBLIC'
AND e.status = '1'
AND e.end_timestamp < {1}
AND ({2}) >= e.start_block_num
AND ({2}) < e.end_block_num
ORDER BY start_timestamp DESC;
""".format(voter_id, timestamp, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch_elections)
return await cursor.fetchall()
async def fetch_admin_elections_resources(self, admin_id):
fetch_elections = """
SELECT *
FROM elections
WHERE admin_id = '{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num
ORDER BY start_timestamp DESC;
""".format(admin_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch_elections)
return await cursor.fetchall()
async def fetch_admins_resources(self):
fetch = """
SELECT voter_id, name, type
FROM voters
WHERE ({0}) >= start_block_num
AND ({0}) < end_block_num
AND type = 'ADMIN' OR type = 'SUPERADMIN'
ORDER BY type DESC;
""".format(LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchall()
async def fetch_voters_resources(self, voter_id=None):
fetch = """
SELECT voter_id
FROM voters
WHERE type = 'VOTER'
AND voter_id LIKE '%{0}%'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num
ORDER BY type DESC;
""".format(voter_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchall()
async def insert_voting_option_num_vote_resource(self,
voting_option_id,
name,
election_id):
num_votes = 0
insert = """
INSERT INTO count_votes (
voting_option_id,
name,
election_id,
num_votes)
VALUES ('{}', '{}', '{}', '{}')
""".format(
voting_option_id,
name,
election_id,
num_votes)
async with self._conn.cursor() as cursor:
await cursor.execute(insert)
self._conn.commit()
async def update_voting_option_num_vote_resource(self,
voting_option_id,
num_votes):
update = """
UPDATE count_votes
SET num_votes = '{1}'
WHERE voting_option_id = '{0}'
""".format(
voting_option_id,
num_votes)
async with self._conn.cursor() as cursor:
await cursor.execute(update)
self._conn.commit()
async def fetch_auth_resource(self, public_key=None):
fetch = """
SELECT * FROM auth WHERE public_key='{}'
""".format(public_key)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_voter_resource(self, voter_id=None, public_key=None):
fetch = """
SELECT * FROM voters WHERE """ + ("""voter_id""" if voter_id else """public_key""") + """='{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(voter_id if voter_id else public_key, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def is_voter_created(self, voter_id):
fetch = """
SELECT voter_id
FROM voters
WHERE voter_id = '{0}';
""".format(voter_id)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def is_superadmin_created(self):
fetch = """
SELECT voter_id
FROM voters
WHERE type='SUPERADMIN'
"""
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_election_with_can_vote_resource(self, voter_id=None, election_id=None):
fetch = """
SELECT e.*, v.name AS "admin_name", (SELECT voter_id FROM poll_registrations WHERE voter_id='{0}'
AND election_id='{1}'
AND status='1' LIMIT 1)
IS NOT NULL AS "can_vote", (SELECT vote_id FROM votes WHERE voter_id='{0}'
AND election_id='{1}' LIMIT 1)
IS NOT NULL AS "voted"
FROM elections e JOIN voters v ON e.admin_id = v.voter_id
WHERE election_id='{1}'
AND e.status = '1'
AND ({2}) >= e.start_block_num
AND ({2}) < e.end_block_num;
""".format(voter_id, election_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_election_resource(self, election_id=None):
fetch = """
SELECT e.*, v.name AS "admin_name"
FROM elections e JOIN voters v ON e.admin_id = v.voter_id
WHERE election_id='{0}'
AND ({1}) >= e.start_block_num
AND ({1}) < e.end_block_num;
""".format(election_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_election_with_can_vote_resource_admin(self, voter_id=None, election_id=None):
fetch = """
SELECT e.*, v.name AS "admin_name", (SELECT voter_id FROM poll_registrations WHERE voter_id='{0}'
AND election_id='{1}'
AND status='1' LIMIT 1)
IS NOT NULL AS "can_vote", (SELECT vote_id FROM votes WHERE voter_id='{0}'
AND election_id='{1}' LIMIT 1)
IS NOT NULL AS "voted"
FROM elections e JOIN voters v ON e.admin_id = v.voter_id
WHERE election_id='{1}'
AND ({2}) >= e.start_block_num
AND ({2}) < e.end_block_num;
""".format(voter_id, election_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_number_of_votes(self, election_id=None):
fetch = """
SELECT * FROM count_votes
WHERE election_id='{0}';
""".format(election_id)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchall()
async def fetch_poll_book(self, election_id=None):
fetch = """
SELECT * FROM poll_registrations
WHERE election_id='{0}'
AND status='1'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(election_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchall()
async def fetch_poll_book_registration(self, election_id=None, voter_id=None):
fetch = """
SELECT * FROM poll_registrations
WHERE election_id='{0}'
AND voter_id='{1}'
AND status='1'
AND ({2}) >= start_block_num
AND ({2}) < end_block_num;
""".format(election_id, voter_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def count_poll_book(self, election_id=None):
fetch = """
SELECT COUNT(*)
FROM poll_registrations
WHERE election_id='{0}'
AND status='1'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(election_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_voting_option_resource(self, voting_option_id=None):
fetch = """
SELECT * FROM voting_options
WHERE voting_option_id='{0}'
AND status='1'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(voting_option_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_voting_option_num_vote_resource(self, voting_option_id=None):
fetch = """
SELECT * FROM count_votes
WHERE voting_option_id='{0}';
""".format(voting_option_id)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_election_voting_options_resource(self, election_id=None):
fetch = """
SELECT * FROM voting_options
WHERE election_id='{0}'
AND status='1'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(election_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchall()
async def fetch_vote_resource(self, vote_id=None):
fetch = """
SELECT * FROM votes WHERE timestamp=(SELECT MAX(timestamp) FROM votes WHERE vote_id='{0}')
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(vote_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_my_vote__election_resource(self, voter_id=None, election_id=None):
fetch = """
SELECT * FROM votes WHERE timestamp=(SELECT MAX(timestamp) FROM votes
WHERE voter_id='{0}' AND election_id='{1}')
AND ({2}) >= start_block_num
AND ({2}) < end_block_num;
""".format(voter_id, election_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def create_auth_entry(self,
public_key,
encrypted_private_key,
hashed_password):
insert = """
INSERT INTO auth (
public_key,
encrypted_private_key,
hashed_password
)
VALUES ('{}', '{}', '{}');
""".format(
public_key,
encrypted_private_key.hex(),
hashed_password.hex())
async with self._conn.cursor() as cursor:
await cursor.execute(insert)
self._conn.commit()
| 39.958998
| 117
| 0.554669
| 1,968
| 17,542
| 4.702744
| 0.082317
| 0.051864
| 0.036521
| 0.047758
| 0.823555
| 0.796542
| 0.781199
| 0.763803
| 0.724581
| 0.696164
| 0
| 0.009434
| 0.353437
| 17,542
| 438
| 118
| 40.050228
| 0.80656
| 0.006043
| 0
| 0.686111
| 0
| 0
| 0.452153
| 0.008286
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005556
| false
| 0.016667
| 0.013889
| 0
| 0.088889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7365f8520f73fbdd517b7991acc3da9b4d316501
| 24
|
py
|
Python
|
UIC/models/__init__.py
|
hikvisionresearch/Unsupervised-Image-Classification
|
0db8f00ece36ef0ee491e082e21b47fedf05c30d
|
[
"MIT"
] | 30
|
2021-05-11T09:13:52.000Z
|
2022-03-16T10:55:45.000Z
|
UIC/models/__init__.py
|
hikvisionresearch/Unsupervised-Image-Classification
|
0db8f00ece36ef0ee491e082e21b47fedf05c30d
|
[
"MIT"
] | 1
|
2021-08-22T15:28:09.000Z
|
2021-08-22T15:28:09.000Z
|
UIC/models/__init__.py
|
hikvisionresearch/Unsupervised-Image-Classification
|
0db8f00ece36ef0ee491e082e21b47fedf05c30d
|
[
"MIT"
] | 2
|
2021-05-16T04:09:35.000Z
|
2021-08-14T11:55:43.000Z
|
from .resnet50 import *
| 12
| 23
| 0.75
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0.166667
| 24
| 1
| 24
| 24
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
73b33908dac28882d10968660cd6539b91c629b4
| 8,820
|
py
|
Python
|
dfirtrack_config/tests/artifact/test_artifact_exporter_spreadsheet_xls_config_forms.py
|
stuhli/dfirtrack
|
9260c91e4367b36d4cb1ae7efe4e2d2452f58e6e
|
[
"Apache-2.0"
] | 273
|
2018-04-18T22:09:15.000Z
|
2021-06-04T09:15:48.000Z
|
dfirtrack_config/tests/artifact/test_artifact_exporter_spreadsheet_xls_config_forms.py
|
stuhli/dfirtrack
|
9260c91e4367b36d4cb1ae7efe4e2d2452f58e6e
|
[
"Apache-2.0"
] | 75
|
2018-08-31T11:05:37.000Z
|
2021-06-08T14:15:07.000Z
|
dfirtrack_config/tests/artifact/test_artifact_exporter_spreadsheet_xls_config_forms.py
|
stuhli/dfirtrack
|
9260c91e4367b36d4cb1ae7efe4e2d2452f58e6e
|
[
"Apache-2.0"
] | 61
|
2018-11-12T22:55:48.000Z
|
2021-06-06T15:16:16.000Z
|
from django.test import TestCase
from dfirtrack_artifacts.models import Artifactstatus
from dfirtrack_config.forms import ArtifactExporterSpreadsheetXlsConfigForm
class ArtifactExporterSpreadsheetXlsConfigFormTestCase(TestCase):
"""artifact exporter spreadsheet XLS config form tests"""
@classmethod
def setUpTestData(cls):
# create object
Artifactstatus.objects.create(
artifactstatus_name='artifactstatus_1',
artifactstatus_slug='artifactstatus_1',
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_choice_artifactstatus_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_choice_artifactstatus'].label,
'Export only artifacts with this artifactstatus',
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_artifact_id_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_artifact_id'].label, 'Export artifact ID'
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_system_id_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_system_id'].label, 'Export system ID'
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_system_name_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_system_name'].label, 'Export system name'
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_artifactstatus_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_artifactstatus'].label,
'Export artifactstatus',
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_artifactpriority_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_artifactpriority'].label,
'Export artifactpriority',
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_artifacttype_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_artifacttype'].label, 'Export artifacttype'
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_artifact_source_path_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_artifact_source_path'].label,
'Export source path',
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_artifact_storage_path_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_artifact_storage_path'].label,
'Export storage path',
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_artifact_note_internal_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_artifact_note_internal'].label,
'Export internal note',
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_artifact_note_external_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_artifact_note_external'].label,
'Export external note',
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_artifact_note_analysisresult_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_artifact_note_analysisresult'].label,
'Export analysis result',
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_artifact_md5_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_artifact_md5'].label, 'Export MD5'
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_artifact_sha1_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_artifact_sha1'].label, 'Export SHA1'
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_artifact_sha256_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_artifact_sha256'].label, 'Export SHA256'
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_artifact_create_time_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_artifact_create_time'].label,
'Export create time',
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_artifact_modify_time_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_artifact_modify_time'].label,
'Export modify time',
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_worksheet_artifactstatus_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_worksheet_artifactstatus'].label,
'Export worksheet to explain artifactstatus',
)
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_worksheet_artifacttype_form_label(
self,
):
"""test form label"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm()
# compare
self.assertEqual(
form.fields['artifactlist_xls_worksheet_artifacttype'].label,
'Export worksheet to explain artifacttype',
)
def test_artifact_exporter_spreadsheet_xls_config_form_empty(self):
"""test minimum form requirements / INVALID"""
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm(data={})
# compare
self.assertFalse(form.is_valid())
def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_choice_artifactstatus_form_filled(
self,
):
"""test minimum form requirements / VALID"""
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
)
# get object
form = ArtifactExporterSpreadsheetXlsConfigForm(
data={
'artifactlist_xls_choice_artifactstatus': [
artifactstatus_id,
],
}
)
# compare
self.assertTrue(form.is_valid())
| 30.839161
| 111
| 0.65805
| 772
| 8,820
| 7.120466
| 0.09456
| 0.10915
| 0.108059
| 0.120065
| 0.813353
| 0.747862
| 0.739858
| 0.739858
| 0.72767
| 0.706567
| 0
| 0.002802
| 0.271769
| 8,820
| 285
| 112
| 30.947368
| 0.853028
| 0.097506
| 0
| 0.453488
| 0
| 0
| 0.147835
| 0.088906
| 0
| 0
| 0
| 0
| 0.122093
| 1
| 0.127907
| false
| 0
| 0.017442
| 0
| 0.151163
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
73bb28bf3c556576785e72ce8701f0dd36e35546
| 7,817
|
py
|
Python
|
model/config.py
|
nachiket273/efficientnetv2
|
fdcbcf48ad84d4b16c0edc18f55a27ee5bafd2de
|
[
"MIT"
] | 1
|
2021-12-01T20:12:49.000Z
|
2021-12-01T20:12:49.000Z
|
model/config.py
|
nachiket273/efficientnetv2
|
fdcbcf48ad84d4b16c0edc18f55a27ee5bafd2de
|
[
"MIT"
] | null | null | null |
model/config.py
|
nachiket273/efficientnetv2
|
fdcbcf48ad84d4b16c0edc18f55a27ee5bafd2de
|
[
"MIT"
] | null | null | null |
CFG = {
'in_ch': 3,
'out_ch': 24,
'kernel_size': 3,
'stride': 2,
'width_mult': 1,
'divisor': 8,
'actn_layer': None,
'layers': [
{'channels': 24, 'expansion': 1, 'kernel_size': 3, 'stride': 1,
'nums': 2, 'norm_layer': None, 'dropout_ratio': 0.1, 'dc_ratio': 0.2,
'reduction_ratio': 0.25, 'actn_layer': None, 'fused': True,
'use_se': False},
{'channels': 48, 'expansion': 4, 'kernel_size': 3, 'stride': 2,
'nums': 4, 'norm_layer': None, 'dropout_ratio': 0.1, 'dc_ratio': 0.2,
'reduction_ratio': 0.25, 'actn_layer': None, 'fused': True,
'use_se': False},
{'channels': 64, 'expansion': 4, 'kernel_size': 3, 'stride': 2,
'nums': 4, 'norm_layer': None, 'dropout_ratio': 0.1, 'dc_ratio': 0.2,
'reduction_ratio': 0.25, 'actn_layer': None, 'fused': True,
'use_se': False},
{'channels': 128, 'expansion': 4, 'kernel_size': 3, 'stride': 2,
'nums': 6, 'norm_layer': None, 'dropout_ratio': 0.1, 'dc_ratio': 0.2,
'reduction_ratio': 0.25, 'actn_layer': None, 'fused': False,
'use_se': True},
{'channels': 160, 'expansion': 6, 'kernel_size': 3, 'stride': 1,
'nums': 9, 'norm_layer': None, 'dropout_ratio': 0.1, 'dc_ratio': 0.2,
'reduction_ratio': 0.25, 'actn_layer': None, 'fused': False,
'use_se': True},
{'channels': 256, 'expansion': 6, 'kernel_size': 3, 'stride': 2,
'nums': 15, 'norm_layer': None, 'dropout_ratio': 0.1, 'dc_ratio': 0.2,
'reduction_ratio': 0.25, 'actn_layer': None, 'fused': False,
'use_se': True}
]
}
def get_default_cfg():
return CFG
def get_cfg(name='efficientnetv2_s'):
name = name.lower()
if name == 'efficientnetv2_s':
cfg = get_default_cfg()
elif name == 'efficientnetv2_m':
cfg = get_default_cfg()
cfg['layers'] = [
{'channels': 24, 'expansion': 1, 'kernel_size': 3, 'stride': 1,
'nums': 3, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': True, 'use_se': False},
{'channels': 48, 'expansion': 4, 'kernel_size': 3, 'stride': 2,
'nums': 5, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': True, 'use_se': False},
{'channels': 80, 'expansion': 4, 'kernel_size': 3, 'stride': 2,
'nums': 5, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': True, 'use_se': False},
{'channels': 160, 'expansion': 4, 'kernel_size': 3, 'stride': 2,
'nums': 7, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': False, 'use_se': True},
{'channels': 176, 'expansion': 6, 'kernel_size': 3, 'stride': 1,
'nums': 14, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': False, 'use_se': True},
{'channels': 304, 'expansion': 6, 'kernel_size': 3, 'stride': 2,
'nums': 18, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': False, 'use_se': True},
{'channels': 512, 'expansion': 6, 'kernel_size': 3, 'stride': 1,
'nums': 5, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': False, 'use_se': True}
]
elif name == 'efficientnetv2_l':
cfg = get_default_cfg()
cfg['layers'] = [
{'channels': 32, 'expansion': 1, 'kernel_size': 3, 'stride': 1,
'nums': 4, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': True, 'use_se': False},
{'channels': 64, 'expansion': 4, 'kernel_size': 3, 'stride': 2,
'nums': 7, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': True, 'use_se': False},
{'channels': 96, 'expansion': 4, 'kernel_size': 3, 'stride': 2,
'nums': 7, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': True, 'use_se': False},
{'channels': 192, 'expansion': 4, 'kernel_size': 3, 'stride': 2,
'nums': 10, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': False, 'use_se': True},
{'channels': 224, 'expansion': 6, 'kernel_size': 3, 'stride': 1,
'nums': 19, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': False, 'use_se': True},
{'channels': 384, 'expansion': 6, 'kernel_size': 3, 'stride': 2,
'nums': 25, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': False, 'use_se': True},
{'channels': 640, 'expansion': 6, 'kernel_size': 3, 'stride': 1,
'nums': 7, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': False, 'use_se': True}
]
elif name == 'efficientnetv2_xl':
cfg = get_default_cfg()
cfg['layers'] = [
{'channels': 32, 'expansion': 1, 'kernel_size': 3, 'stride': 1,
'nums': 4, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': True, 'use_se': False},
{'channels': 64, 'expansion': 4, 'kernel_size': 3, 'stride': 2,
'nums': 8, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': True, 'use_se': False},
{'channels': 96, 'expansion': 4, 'kernel_size': 3, 'stride': 2,
'nums': 8, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': True, 'use_se': False},
{'channels': 192, 'expansion': 4, 'kernel_size': 3, 'stride': 2,
'nums': 16, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': False, 'use_se': True},
{'channels': 256, 'expansion': 6, 'kernel_size': 3, 'stride': 1,
'nums': 24, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': False, 'use_se': True},
{'channels': 512, 'expansion': 6, 'kernel_size': 3, 'stride': 2,
'nums': 32, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': False, 'use_se': True},
{'channels': 640, 'expansion': 6, 'kernel_size': 3, 'stride': 1,
'nums': 8, 'norm_layer': None, 'dropout_ratio': 0.1,
'dc_ratio': 0.2, 'reduction_ratio': 0.25, 'actn_layer': None,
'fused': False, 'use_se': True}
]
else:
raise ValueError("No pretrained config available"
" for name {}".format(name))
return cfg
| 53.541096
| 79
| 0.521939
| 1,005
| 7,817
| 3.850746
| 0.081592
| 0.125581
| 0.079587
| 0.122997
| 0.924548
| 0.919897
| 0.919897
| 0.914987
| 0.86124
| 0.86124
| 0
| 0.068849
| 0.280926
| 7,817
| 145
| 80
| 53.910345
| 0.619641
| 0
| 0
| 0.631206
| 0
| 0
| 0.388512
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014184
| false
| 0
| 0
| 0.007092
| 0.028369
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
73ea795b18c880afbf438b724b855276692a0da1
| 2,762
|
py
|
Python
|
account/migrations/0001_initial.py
|
JishnuTU/D-M-Intelligence-S
|
aa094bc8a4d20fddd9c6a043559833226f46044d
|
[
"MIT"
] | 4
|
2019-10-17T00:27:09.000Z
|
2021-04-09T05:17:19.000Z
|
account/migrations/0001_initial.py
|
Jishnu04/D-M-Intelligence-S
|
aa094bc8a4d20fddd9c6a043559833226f46044d
|
[
"MIT"
] | 1
|
2020-07-09T18:39:16.000Z
|
2020-09-21T11:30:17.000Z
|
account/migrations/0001_initial.py
|
Jishnu04/D-M-Intelligence-S
|
aa094bc8a4d20fddd9c6a043559833226f46044d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-12 08:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Accomadation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=70)),
('usertype', models.CharField(max_length=50)),
('capacity', models.IntegerField()),
('status', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='updates_capacity', to='home.User')),
],
),
migrations.CreateModel(
name='Hospital',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=70)),
('usertype', models.CharField(max_length=50)),
('canoccupy', models.IntegerField()),
('status', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='updates_occupy', to='home.User')),
],
),
migrations.CreateModel(
name='Pronearea',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=70)),
('usertype', models.CharField(max_length=50)),
('population', models.IntegerField()),
('status', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='updates_population', to='home.User')),
],
),
migrations.CreateModel(
name='Volunteer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=70)),
('usertype', models.CharField(max_length=50)),
('humanaid', models.IntegerField()),
('status', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='updates_humanaid', to='home.User')),
],
),
]
| 43.84127
| 143
| 0.580014
| 266
| 2,762
| 5.879699
| 0.263158
| 0.076726
| 0.092072
| 0.122762
| 0.77046
| 0.77046
| 0.703325
| 0.703325
| 0.703325
| 0.703325
| 0
| 0.018417
| 0.272629
| 2,762
| 62
| 144
| 44.548387
| 0.76008
| 0.02462
| 0
| 0.592593
| 1
| 0
| 0.108881
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.12963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fbb3965ef572cb99f9fe6ec71c63a225cc16c6f0
| 89
|
py
|
Python
|
tests/data/modules/test_1920/plugins/__init__.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 6
|
2021-03-09T10:24:02.000Z
|
2022-01-16T03:52:11.000Z
|
tests/data/modules/test_1920/plugins/__init__.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 1,319
|
2020-12-18T08:52:29.000Z
|
2022-03-31T18:17:32.000Z
|
tests/data/modules/test_1920/plugins/__init__.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 4
|
2021-03-03T15:36:50.000Z
|
2022-03-11T11:41:51.000Z
|
from inmanta.plugins import plugin
@plugin
def some_name() -> "bool":
return False
| 12.714286
| 34
| 0.707865
| 12
| 89
| 5.166667
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191011
| 89
| 6
| 35
| 14.833333
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0.044944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
837668f001ebb2a8dd37fdad6281fd12594ce5f6
| 45
|
py
|
Python
|
enthought/pyface/constant.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/pyface/constant.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/pyface/constant.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from pyface.constant import *
| 15
| 29
| 0.777778
| 6
| 45
| 5.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 2
| 30
| 22.5
| 0.921053
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8388c23bf816498c9fb30f19eadf0721ff38c0b3
| 135
|
py
|
Python
|
roman/ur/__init__.py
|
drMJ/roman
|
9650e73ec6fbb2d8044aa1bbf89fd671843ea54e
|
[
"MIT"
] | 14
|
2020-04-03T03:48:35.000Z
|
2021-11-08T11:17:41.000Z
|
roman/ur/__init__.py
|
drMJ/roman
|
9650e73ec6fbb2d8044aa1bbf89fd671843ea54e
|
[
"MIT"
] | 5
|
2020-04-17T21:59:35.000Z
|
2022-01-21T23:21:45.000Z
|
roman/ur/__init__.py
|
drMJ/roman
|
9650e73ec6fbb2d8044aa1bbf89fd671843ea54e
|
[
"MIT"
] | 10
|
2020-04-16T15:44:25.000Z
|
2021-11-10T08:22:52.000Z
|
from .realtime.constants import *
from .arm import *
from .connection import *
from .sim_connection import *
from .controllers import *
| 27
| 33
| 0.777778
| 17
| 135
| 6.117647
| 0.470588
| 0.384615
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140741
| 135
| 5
| 34
| 27
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
83893a090af1a7cb63f45b4de726e633bc2ebbe7
| 557
|
py
|
Python
|
service/classification/dataset/transform.py
|
Navan0/poc-of-hm
|
9b14325908445462721d7bed64e09b6c5d39f694
|
[
"Apache-2.0"
] | null | null | null |
service/classification/dataset/transform.py
|
Navan0/poc-of-hm
|
9b14325908445462721d7bed64e09b6c5d39f694
|
[
"Apache-2.0"
] | null | null | null |
service/classification/dataset/transform.py
|
Navan0/poc-of-hm
|
9b14325908445462721d7bed64e09b6c5d39f694
|
[
"Apache-2.0"
] | null | null | null |
from torchvision import transforms
xception_default_data_transforms = {
'train': transforms.Compose([
transforms.Resize((299, 299)),
transforms.ToTensor(),
transforms.Normalize([0.5]*3, [0.5]*3)
]),
'val': transforms.Compose([
transforms.Resize((299, 299)),
transforms.ToTensor(),
transforms.Normalize([0.5] * 3, [0.5] * 3)
]),
'test': transforms.Compose([
transforms.Resize((299, 299)),
transforms.ToTensor(),
transforms.Normalize([0.5] * 3, [0.5] * 3)
]),
}
| 27.85
| 50
| 0.576302
| 59
| 557
| 5.389831
| 0.305085
| 0.037736
| 0.056604
| 0.311321
| 0.773585
| 0.773585
| 0.773585
| 0.773585
| 0.773585
| 0.773585
| 0
| 0.085714
| 0.245961
| 557
| 19
| 51
| 29.315789
| 0.671429
| 0
| 0
| 0.666667
| 0
| 0
| 0.021544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
839b52a0818a565dddbd10229f17f334c9ac291a
| 795
|
py
|
Python
|
src/core/sobol_gen/utility.py
|
JustinRuan/Pathological-images
|
478f0b568068e591e282e9566786e683ec39a108
|
[
"MIT"
] | 2
|
2022-01-17T12:04:02.000Z
|
2022-03-08T21:59:39.000Z
|
sobol_gen/utility.py
|
elderfd/sobol_gen
|
25bbfae734239bef4304cb1b0b32662b517d1b56
|
[
"MIT"
] | null | null | null |
sobol_gen/utility.py
|
elderfd/sobol_gen
|
25bbfae734239bef4304cb1b0b32662b517d1b56
|
[
"MIT"
] | 1
|
2020-03-08T09:00:43.000Z
|
2020-03-08T09:00:43.000Z
|
import numpy
def high_bit_pos(i):
"""Converts a positive integer to base 2 and returns the position of the high order bit.
Keyword arguments:
i -- Integer to find high order bit within
"""
if i < 0:
raise RuntimeError("Supplied value {0} was not positive".format(i))
i = numpy.floor(i)
bit = 0
while i > 0:
bit += 1
i //= 2
return bit
def low_bit_pos(i):
"""Converts a positive integer to base 2 and returns the position of the low order bit.
Keyword arguments:
i -- Integer to find high order bit within
"""
if i < 0:
raise RuntimeError("Supplied value {0} was not positive".format(i))
i = numpy.floor(i)
bit = 1
while i != 2 * (i // 2):
bit += 1
i //= 2
return bit
| 22.083333
| 92
| 0.584906
| 122
| 795
| 3.778689
| 0.311475
| 0.078091
| 0.078091
| 0.065076
| 0.893709
| 0.893709
| 0.828633
| 0.828633
| 0.828633
| 0.828633
| 0
| 0.027624
| 0.316981
| 795
| 35
| 93
| 22.714286
| 0.821363
| 0.372327
| 0
| 0.631579
| 0
| 0
| 0.151515
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
83ad01e914b86305b970f57c8fb68f209c828d81
| 102
|
py
|
Python
|
veripb/__init__.py
|
StephanGocht/refpy
|
e244dc5c21ebb2887c428b3b3ada003528afa27a
|
[
"MIT"
] | 5
|
2020-03-03T16:16:56.000Z
|
2022-01-31T09:23:36.000Z
|
veripb/__init__.py
|
StephanGocht/VeriPB
|
a6b4314be574f09af0736600583dc714a469c0d7
|
[
"MIT"
] | 25
|
2019-11-19T17:23:21.000Z
|
2022-02-23T16:51:46.000Z
|
veripb/__init__.py
|
StephanGocht/refpy
|
e244dc5c21ebb2887c428b3b3ada003528afa27a
|
[
"MIT"
] | 1
|
2019-07-02T12:19:15.000Z
|
2019-07-02T12:19:15.000Z
|
from veripb.exceptions import ParseError, InvalidProof
from veripb.utils import run,runUI,run_cmd_main
| 51
| 54
| 0.872549
| 15
| 102
| 5.8
| 0.733333
| 0.229885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 102
| 2
| 55
| 51
| 0.925532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
83b14f244f2f688852b66c5d267b4b51d129c66b
| 37
|
py
|
Python
|
__init__.py
|
yswd82/pymoi
|
73d3ad221bbd431916ff94ee1bbf64b75ee3bbc7
|
[
"MIT"
] | 1
|
2021-08-15T01:26:02.000Z
|
2021-08-15T01:26:02.000Z
|
__init__.py
|
yswd82/pymoi
|
73d3ad221bbd431916ff94ee1bbf64b75ee3bbc7
|
[
"MIT"
] | 4
|
2021-08-10T06:06:51.000Z
|
2021-08-17T09:38:10.000Z
|
__init__.py
|
yswd82/pymoi
|
73d3ad221bbd431916ff94ee1bbf64b75ee3bbc7
|
[
"MIT"
] | null | null | null |
from pymoi import core, reader, util
| 18.5
| 36
| 0.783784
| 6
| 37
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 37
| 1
| 37
| 37
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
83c15c4c25e6d5e5a9d3653a755a44f2dc84b667
| 119,261
|
py
|
Python
|
tests/unit/gapic/transcoder_v1beta1/test_transcoder_service.py
|
renovate-bot/python-video-transcoder-1
|
e9c1c229fe88d200d0f60314814078e79e3f1524
|
[
"Apache-2.0"
] | 5
|
2021-03-05T22:36:04.000Z
|
2022-02-01T09:58:04.000Z
|
tests/unit/gapic/transcoder_v1beta1/test_transcoder_service.py
|
renovate-bot/python-video-transcoder-1
|
e9c1c229fe88d200d0f60314814078e79e3f1524
|
[
"Apache-2.0"
] | 51
|
2020-08-24T15:43:20.000Z
|
2022-03-07T16:43:36.000Z
|
tests/unit/gapic/transcoder_v1beta1/test_transcoder_service.py
|
renovate-bot/python-video-transcoder-1
|
e9c1c229fe88d200d0f60314814078e79e3f1524
|
[
"Apache-2.0"
] | 8
|
2020-08-24T15:39:52.000Z
|
2022-02-24T17:43:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.video.transcoder_v1beta1.services.transcoder_service import (
TranscoderServiceAsyncClient,
)
from google.cloud.video.transcoder_v1beta1.services.transcoder_service import (
TranscoderServiceClient,
)
from google.cloud.video.transcoder_v1beta1.services.transcoder_service import pagers
from google.cloud.video.transcoder_v1beta1.services.transcoder_service import transports
from google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.video.transcoder_v1beta1.types import resources
from google.cloud.video.transcoder_v1beta1.types import services
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert TranscoderServiceClient._get_default_mtls_endpoint(None) is None
assert (
TranscoderServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
TranscoderServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
TranscoderServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
TranscoderServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
TranscoderServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [TranscoderServiceClient, TranscoderServiceAsyncClient,]
)
def test_transcoder_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "transcoder.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.TranscoderServiceGrpcTransport, "grpc"),
(transports.TranscoderServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_transcoder_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [TranscoderServiceClient, TranscoderServiceAsyncClient,]
)
def test_transcoder_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "transcoder.googleapis.com:443"
def test_transcoder_service_client_get_transport_class():
transport = TranscoderServiceClient.get_transport_class()
available_transports = [
transports.TranscoderServiceGrpcTransport,
]
assert transport in available_transports
transport = TranscoderServiceClient.get_transport_class("grpc")
assert transport == transports.TranscoderServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TranscoderServiceClient, transports.TranscoderServiceGrpcTransport, "grpc"),
(
TranscoderServiceAsyncClient,
transports.TranscoderServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
TranscoderServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TranscoderServiceClient),
)
@mock.patch.object(
TranscoderServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TranscoderServiceAsyncClient),
)
def test_transcoder_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(TranscoderServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(TranscoderServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
TranscoderServiceClient,
transports.TranscoderServiceGrpcTransport,
"grpc",
"true",
),
(
TranscoderServiceAsyncClient,
transports.TranscoderServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
TranscoderServiceClient,
transports.TranscoderServiceGrpcTransport,
"grpc",
"false",
),
(
TranscoderServiceAsyncClient,
transports.TranscoderServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
TranscoderServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TranscoderServiceClient),
)
@mock.patch.object(
TranscoderServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TranscoderServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_transcoder_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TranscoderServiceClient, transports.TranscoderServiceGrpcTransport, "grpc"),
(
TranscoderServiceAsyncClient,
transports.TranscoderServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_transcoder_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TranscoderServiceClient, transports.TranscoderServiceGrpcTransport, "grpc"),
(
TranscoderServiceAsyncClient,
transports.TranscoderServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_transcoder_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_transcoder_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.TranscoderServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = TranscoderServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_create_job(transport: str = "grpc", request_type=services.CreateJobRequest):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Job(
name="name_value",
input_uri="input_uri_value",
output_uri="output_uri_value",
priority=898,
state=resources.Job.ProcessingState.PENDING,
failure_reason="failure_reason_value",
ttl_after_completion_days=2670,
template_id="template_id_value",
)
response = client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.CreateJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Job)
assert response.name == "name_value"
assert response.input_uri == "input_uri_value"
assert response.output_uri == "output_uri_value"
assert response.priority == 898
assert response.state == resources.Job.ProcessingState.PENDING
assert response.failure_reason == "failure_reason_value"
assert response.ttl_after_completion_days == 2670
def test_create_job_from_dict():
test_create_job(request_type=dict)
def test_create_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
client.create_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.CreateJobRequest()
@pytest.mark.asyncio
async def test_create_job_async(
transport: str = "grpc_asyncio", request_type=services.CreateJobRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.Job(
name="name_value",
input_uri="input_uri_value",
output_uri="output_uri_value",
priority=898,
state=resources.Job.ProcessingState.PENDING,
failure_reason="failure_reason_value",
ttl_after_completion_days=2670,
)
)
response = await client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.CreateJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Job)
assert response.name == "name_value"
assert response.input_uri == "input_uri_value"
assert response.output_uri == "output_uri_value"
assert response.priority == 898
assert response.state == resources.Job.ProcessingState.PENDING
assert response.failure_reason == "failure_reason_value"
assert response.ttl_after_completion_days == 2670
@pytest.mark.asyncio
async def test_create_job_async_from_dict():
await test_create_job_async(request_type=dict)
def test_create_job_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.CreateJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
call.return_value = resources.Job()
client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_job_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.CreateJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Job())
await client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_job_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Job()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_job(
parent="parent_value", job=resources.Job(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].job == resources.Job(name="name_value")
def test_create_job_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_job(
services.CreateJobRequest(),
parent="parent_value",
job=resources.Job(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_job_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Job()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Job())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_job(
parent="parent_value", job=resources.Job(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].job == resources.Job(name="name_value")
@pytest.mark.asyncio
async def test_create_job_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_job(
services.CreateJobRequest(),
parent="parent_value",
job=resources.Job(name="name_value"),
)
def test_list_jobs(transport: str = "grpc", request_type=services.ListJobsRequest):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_jobs_from_dict():
test_list_jobs(request_type=dict)
def test_list_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
client.list_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobsRequest()
@pytest.mark.asyncio
async def test_list_jobs_async(
transport: str = "grpc_asyncio", request_type=services.ListJobsRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_jobs_async_from_dict():
await test_list_jobs_async(request_type=dict)
def test_list_jobs_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.ListJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
call.return_value = services.ListJobsResponse()
client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_jobs_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.ListJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobsResponse()
)
await client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_jobs_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_jobs_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_jobs(
services.ListJobsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_jobs_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_jobs_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_jobs(
services.ListJobsRequest(), parent="parent_value",
)
def test_list_jobs_pager():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobsResponse(
jobs=[resources.Job(), resources.Job(), resources.Job(),],
next_page_token="abc",
),
services.ListJobsResponse(jobs=[], next_page_token="def",),
services.ListJobsResponse(jobs=[resources.Job(),], next_page_token="ghi",),
services.ListJobsResponse(jobs=[resources.Job(), resources.Job(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, resources.Job) for i in results)
def test_list_jobs_pages():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobsResponse(
jobs=[resources.Job(), resources.Job(), resources.Job(),],
next_page_token="abc",
),
services.ListJobsResponse(jobs=[], next_page_token="def",),
services.ListJobsResponse(jobs=[resources.Job(),], next_page_token="ghi",),
services.ListJobsResponse(jobs=[resources.Job(), resources.Job(),],),
RuntimeError,
)
pages = list(client.list_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_jobs_async_pager():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_jobs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobsResponse(
jobs=[resources.Job(), resources.Job(), resources.Job(),],
next_page_token="abc",
),
services.ListJobsResponse(jobs=[], next_page_token="def",),
services.ListJobsResponse(jobs=[resources.Job(),], next_page_token="ghi",),
services.ListJobsResponse(jobs=[resources.Job(), resources.Job(),],),
RuntimeError,
)
async_pager = await client.list_jobs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, resources.Job) for i in responses)
@pytest.mark.asyncio
async def test_list_jobs_async_pages():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_jobs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobsResponse(
jobs=[resources.Job(), resources.Job(), resources.Job(),],
next_page_token="abc",
),
services.ListJobsResponse(jobs=[], next_page_token="def",),
services.ListJobsResponse(jobs=[resources.Job(),], next_page_token="ghi",),
services.ListJobsResponse(jobs=[resources.Job(), resources.Job(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_jobs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_get_job(transport: str = "grpc", request_type=services.GetJobRequest):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Job(
name="name_value",
input_uri="input_uri_value",
output_uri="output_uri_value",
priority=898,
state=resources.Job.ProcessingState.PENDING,
failure_reason="failure_reason_value",
ttl_after_completion_days=2670,
template_id="template_id_value",
)
response = client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.GetJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Job)
assert response.name == "name_value"
assert response.input_uri == "input_uri_value"
assert response.output_uri == "output_uri_value"
assert response.priority == 898
assert response.state == resources.Job.ProcessingState.PENDING
assert response.failure_reason == "failure_reason_value"
assert response.ttl_after_completion_days == 2670
def test_get_job_from_dict():
test_get_job(request_type=dict)
def test_get_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
client.get_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.GetJobRequest()
@pytest.mark.asyncio
async def test_get_job_async(
transport: str = "grpc_asyncio", request_type=services.GetJobRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.Job(
name="name_value",
input_uri="input_uri_value",
output_uri="output_uri_value",
priority=898,
state=resources.Job.ProcessingState.PENDING,
failure_reason="failure_reason_value",
ttl_after_completion_days=2670,
)
)
response = await client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.GetJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.Job)
assert response.name == "name_value"
assert response.input_uri == "input_uri_value"
assert response.output_uri == "output_uri_value"
assert response.priority == 898
assert response.state == resources.Job.ProcessingState.PENDING
assert response.failure_reason == "failure_reason_value"
assert response.ttl_after_completion_days == 2670
@pytest.mark.asyncio
async def test_get_job_async_from_dict():
await test_get_job_async(request_type=dict)
def test_get_job_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.GetJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
call.return_value = resources.Job()
client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_job_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.GetJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Job())
await client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_job_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Job()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_job_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_job(
services.GetJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_job_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Job()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Job())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_job_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_job(
services.GetJobRequest(), name="name_value",
)
def test_delete_job(transport: str = "grpc", request_type=services.DeleteJobRequest):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.DeleteJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_job_from_dict():
test_delete_job(request_type=dict)
def test_delete_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
client.delete_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.DeleteJobRequest()
@pytest.mark.asyncio
async def test_delete_job_async(
transport: str = "grpc_asyncio", request_type=services.DeleteJobRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.DeleteJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_job_async_from_dict():
await test_delete_job_async(request_type=dict)
def test_delete_job_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.DeleteJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
call.return_value = None
client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_job_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.DeleteJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_job_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_job_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_job(
services.DeleteJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_job_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_job_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_job(
services.DeleteJobRequest(), name="name_value",
)
def test_create_job_template(
transport: str = "grpc", request_type=services.CreateJobTemplateRequest
):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.JobTemplate(name="name_value",)
response = client.create_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.CreateJobTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.JobTemplate)
assert response.name == "name_value"
def test_create_job_template_from_dict():
test_create_job_template(request_type=dict)
def test_create_job_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_job_template), "__call__"
) as call:
client.create_job_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.CreateJobTemplateRequest()
@pytest.mark.asyncio
async def test_create_job_template_async(
transport: str = "grpc_asyncio", request_type=services.CreateJobTemplateRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.JobTemplate(name="name_value",)
)
response = await client.create_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.CreateJobTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.JobTemplate)
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_create_job_template_async_from_dict():
await test_create_job_template_async(request_type=dict)
def test_create_job_template_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.CreateJobTemplateRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_job_template), "__call__"
) as call:
call.return_value = resources.JobTemplate()
client.create_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_job_template_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.CreateJobTemplateRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_job_template), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.JobTemplate()
)
await client.create_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_job_template_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.JobTemplate()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_job_template(
parent="parent_value",
job_template=resources.JobTemplate(name="name_value"),
job_template_id="job_template_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].job_template == resources.JobTemplate(name="name_value")
assert args[0].job_template_id == "job_template_id_value"
def test_create_job_template_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_job_template(
services.CreateJobTemplateRequest(),
parent="parent_value",
job_template=resources.JobTemplate(name="name_value"),
job_template_id="job_template_id_value",
)
@pytest.mark.asyncio
async def test_create_job_template_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.JobTemplate()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.JobTemplate()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_job_template(
parent="parent_value",
job_template=resources.JobTemplate(name="name_value"),
job_template_id="job_template_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].job_template == resources.JobTemplate(name="name_value")
assert args[0].job_template_id == "job_template_id_value"
@pytest.mark.asyncio
async def test_create_job_template_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_job_template(
services.CreateJobTemplateRequest(),
parent="parent_value",
job_template=resources.JobTemplate(name="name_value"),
job_template_id="job_template_id_value",
)
def test_list_job_templates(
transport: str = "grpc", request_type=services.ListJobTemplatesRequest
):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobTemplatesResponse(
next_page_token="next_page_token_value",
)
response = client.list_job_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobTemplatesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobTemplatesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_job_templates_from_dict():
test_list_job_templates(request_type=dict)
def test_list_job_templates_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
client.list_job_templates()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobTemplatesRequest()
@pytest.mark.asyncio
async def test_list_job_templates_async(
transport: str = "grpc_asyncio", request_type=services.ListJobTemplatesRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobTemplatesResponse(next_page_token="next_page_token_value",)
)
response = await client.list_job_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobTemplatesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobTemplatesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_job_templates_async_from_dict():
await test_list_job_templates_async(request_type=dict)
def test_list_job_templates_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.ListJobTemplatesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
call.return_value = services.ListJobTemplatesResponse()
client.list_job_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_job_templates_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.ListJobTemplatesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobTemplatesResponse()
)
await client.list_job_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_job_templates_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobTemplatesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_job_templates(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_job_templates_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_job_templates(
services.ListJobTemplatesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_job_templates_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobTemplatesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobTemplatesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_job_templates(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_job_templates_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_job_templates(
services.ListJobTemplatesRequest(), parent="parent_value",
)
def test_list_job_templates_pager():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobTemplatesResponse(
job_templates=[
resources.JobTemplate(),
resources.JobTemplate(),
resources.JobTemplate(),
],
next_page_token="abc",
),
services.ListJobTemplatesResponse(job_templates=[], next_page_token="def",),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(),], next_page_token="ghi",
),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(), resources.JobTemplate(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_job_templates(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, resources.JobTemplate) for i in results)
def test_list_job_templates_pages():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobTemplatesResponse(
job_templates=[
resources.JobTemplate(),
resources.JobTemplate(),
resources.JobTemplate(),
],
next_page_token="abc",
),
services.ListJobTemplatesResponse(job_templates=[], next_page_token="def",),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(),], next_page_token="ghi",
),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(), resources.JobTemplate(),],
),
RuntimeError,
)
pages = list(client.list_job_templates(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_job_templates_async_pager():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobTemplatesResponse(
job_templates=[
resources.JobTemplate(),
resources.JobTemplate(),
resources.JobTemplate(),
],
next_page_token="abc",
),
services.ListJobTemplatesResponse(job_templates=[], next_page_token="def",),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(),], next_page_token="ghi",
),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(), resources.JobTemplate(),],
),
RuntimeError,
)
async_pager = await client.list_job_templates(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, resources.JobTemplate) for i in responses)
@pytest.mark.asyncio
async def test_list_job_templates_async_pages():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobTemplatesResponse(
job_templates=[
resources.JobTemplate(),
resources.JobTemplate(),
resources.JobTemplate(),
],
next_page_token="abc",
),
services.ListJobTemplatesResponse(job_templates=[], next_page_token="def",),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(),], next_page_token="ghi",
),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(), resources.JobTemplate(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_job_templates(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_get_job_template(
transport: str = "grpc", request_type=services.GetJobTemplateRequest
):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.JobTemplate(name="name_value",)
response = client.get_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.GetJobTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.JobTemplate)
assert response.name == "name_value"
def test_get_job_template_from_dict():
test_get_job_template(request_type=dict)
def test_get_job_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
client.get_job_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.GetJobTemplateRequest()
@pytest.mark.asyncio
async def test_get_job_template_async(
transport: str = "grpc_asyncio", request_type=services.GetJobTemplateRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.JobTemplate(name="name_value",)
)
response = await client.get_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.GetJobTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.JobTemplate)
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_get_job_template_async_from_dict():
await test_get_job_template_async(request_type=dict)
def test_get_job_template_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.GetJobTemplateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
call.return_value = resources.JobTemplate()
client.get_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_job_template_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.GetJobTemplateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.JobTemplate()
)
await client.get_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_job_template_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.JobTemplate()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_job_template(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_job_template_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_job_template(
services.GetJobTemplateRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_job_template_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.JobTemplate()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.JobTemplate()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_job_template(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_job_template_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_job_template(
services.GetJobTemplateRequest(), name="name_value",
)
def test_delete_job_template(
transport: str = "grpc", request_type=services.DeleteJobTemplateRequest
):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.DeleteJobTemplateRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_job_template_from_dict():
test_delete_job_template(request_type=dict)
def test_delete_job_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
client.delete_job_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.DeleteJobTemplateRequest()
@pytest.mark.asyncio
async def test_delete_job_template_async(
transport: str = "grpc_asyncio", request_type=services.DeleteJobTemplateRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.DeleteJobTemplateRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_job_template_async_from_dict():
await test_delete_job_template_async(request_type=dict)
def test_delete_job_template_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.DeleteJobTemplateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
call.return_value = None
client.delete_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_job_template_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.DeleteJobTemplateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_job_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_job_template_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_job_template(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_job_template_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_job_template(
services.DeleteJobTemplateRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_job_template_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_job_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_job_template(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_job_template_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_job_template(
services.DeleteJobTemplateRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TranscoderServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TranscoderServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = TranscoderServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TranscoderServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.TranscoderServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.TranscoderServiceGrpcTransport,)
def test_transcoder_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.TranscoderServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_transcoder_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.TranscoderServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.TranscoderServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_job",
"list_jobs",
"get_job",
"delete_job",
"create_job_template",
"list_job_templates",
"get_job_template",
"delete_job_template",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
@requires_google_auth_gte_1_25_0
def test_transcoder_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.TranscoderServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TranscoderServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_transcoder_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.TranscoderServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TranscoderServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_transcoder_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.video.transcoder_v1beta1.services.transcoder_service.transports.TranscoderServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TranscoderServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_transcoder_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TranscoderServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_transcoder_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TranscoderServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_transcoder_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_transcoder_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.TranscoderServiceGrpcTransport, grpc_helpers),
(transports.TranscoderServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_transcoder_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"transcoder.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="transcoder.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
def test_transcoder_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_transcoder_service_host_no_port():
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="transcoder.googleapis.com"
),
)
assert client.transport._host == "transcoder.googleapis.com:443"
def test_transcoder_service_host_with_port():
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="transcoder.googleapis.com:8000"
),
)
assert client.transport._host == "transcoder.googleapis.com:8000"
def test_transcoder_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TranscoderServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_transcoder_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TranscoderServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
def test_transcoder_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TranscoderServiceGrpcTransport,
transports.TranscoderServiceGrpcAsyncIOTransport,
],
)
def test_transcoder_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_job_path():
project = "squid"
location = "clam"
job = "whelk"
expected = "projects/{project}/locations/{location}/jobs/{job}".format(
project=project, location=location, job=job,
)
actual = TranscoderServiceClient.job_path(project, location, job)
assert expected == actual
def test_parse_job_path():
expected = {
"project": "octopus",
"location": "oyster",
"job": "nudibranch",
}
path = TranscoderServiceClient.job_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_job_path(path)
assert expected == actual
def test_job_template_path():
project = "cuttlefish"
location = "mussel"
job_template = "winkle"
expected = "projects/{project}/locations/{location}/jobTemplates/{job_template}".format(
project=project, location=location, job_template=job_template,
)
actual = TranscoderServiceClient.job_template_path(project, location, job_template)
assert expected == actual
def test_parse_job_template_path():
expected = {
"project": "nautilus",
"location": "scallop",
"job_template": "abalone",
}
path = TranscoderServiceClient.job_template_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_job_template_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = TranscoderServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = TranscoderServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = TranscoderServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = TranscoderServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = TranscoderServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = TranscoderServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = TranscoderServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = TranscoderServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = TranscoderServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = TranscoderServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = TranscoderServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.TranscoderServiceTransport, "_prep_wrapped_messages"
) as prep:
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.TranscoderServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = TranscoderServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| 37.776687
| 136
| 0.687241
| 13,723
| 119,261
| 5.735408
| 0.032282
| 0.012985
| 0.01982
| 0.023657
| 0.910237
| 0.881815
| 0.861524
| 0.835733
| 0.812456
| 0.796219
| 0
| 0.004511
| 0.228658
| 119,261
| 3,156
| 137
| 37.788657
| 0.851083
| 0.195462
| 0
| 0.670213
| 0
| 0
| 0.081447
| 0.032212
| 0
| 0
| 0
| 0.000317
| 0.13136
| 1
| 0.047641
| false
| 0.000463
| 0.012488
| 0.000925
| 0.061055
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f7c91231a685403d79689aac00c0aa044b7c8356
| 5,657
|
py
|
Python
|
pydoof/search_api/search.py
|
doofinder/pydoof
|
18ebdbf5710d08bc00dcc28b9c035a9fe47306f0
|
[
"MIT"
] | null | null | null |
pydoof/search_api/search.py
|
doofinder/pydoof
|
18ebdbf5710d08bc00dcc28b9c035a9fe47306f0
|
[
"MIT"
] | 12
|
2015-05-14T17:09:51.000Z
|
2021-12-22T16:47:05.000Z
|
pydoof/search_api/search.py
|
doofinder/pydoof
|
18ebdbf5710d08bc00dcc28b9c035a9fe47306f0
|
[
"MIT"
] | 1
|
2022-01-04T09:09:31.000Z
|
2022-01-04T09:09:31.000Z
|
from enum import Enum, unique
from pydoof.search_api.api_client import SearchAPIClient
from pydoof.helpers import parse_query_params
@unique
class QueryNames(Enum):
MATCH_AND = "match_and"
MATCH_OR = "match_or"
FUZZY = "fuzzy"
PHONETIC = "phonetic_text"
@unique
class Transformers(Enum):
BASIC = "basic"
ONLY_IDS = "onlyid"
def query(hashid, query, filter_=None, exclude=None, index_name=None,
query_name=None, sort=None, page=None, rpp=None, transformer=None,
no_stats=None, **opts):
"""
Queries items indexed in a search engine.
Args:
hashid (str): Unique search engine id. Indicates to which search engine
we are doing the query.
query (str): The terms we are looking for in the items of the search
engine.
filter_ (dict, optional): A dictionary that indicates a filter for
items. For instance, look for those items of color "blue". Default
to None.
exclude (dict, optional): A dictionary that indicates an exclude rule
for items. For instance, exclude those items that belong to `Foo`
category. Default to None
index_name (str, optional): A unique name for a search engine index.
If provided, it will limit result to that index.
query_name (str, optional): Indicates a query name to used. It could be
one of "match_and", "match_or", "fuzzy", or "phonetic_text". If you
do not provide one, search API will select the best one. Default to
None.
sort (lst, optional): Indicates a sorting rule for results. If
sort is a list of strings, each element will define a field to sort
by in ascending order. If sort is a list of dictionaries, you can
set the order. For instance, sort: [{'color': 'desc'}] will sort
results by color name in descending order. If you do not provided
one, results will be ordered by score in descending order. Default
to None.
page (int, optional): Indicates a page of results. If you provide a
page, `query` will return the results from that page. Default to
None.
rpp (int, optional): Indicates how many results to fetch by page,
minimum 1, maximum 100. Default to 10.
transformer (str, optional): Indicates a transformation to apply to
items in result. It could be one of "basic" or "onlyid". If none is
set, items will not be transformed. Default to None.
no_stats (bool, optional): Indicates if the query should be recorded
in search stats. If it is true, it will not be recorded. Default to
False.
"""
query_params = parse_query_params({
'hashid': hashid,
'query': query,
'filter': filter_,
'exclude': exclude,
'type': index_name,
'query_name': query_name,
'sort': sort,
'page': page,
'rpp': rpp,
'transformer': transformer,
'nostats': no_stats
})
api_client = SearchAPIClient(**opts)
return api_client.get(
'/5/search',
query_params=query_params
)
def suggest(hashid, query, filter_=None, exclude=None, sort=None, page=None,
rpp=None, transformer=None, no_stats=None, **opts):
"""
Fetchs suggestions for terms based on the items indexed in a search engine.
Args:
hashid (str): Unique search engine id. Indicates to which search engine
we are doing the query.
query (str): The terms we are looking for in the items of the search
engine.
filter_ (dict, optional): A dictionary that indicates a filter for
items. For instance, look for those items of color "blue". Default
to None.
exclude (dict, optional): A dictionary that indicates an exclude rule
for items. For instance, exclude those items that belong to `Foo`
category. Default to None
sort (lst, optional): Indicates a sorting rule for results. If
sort is a list of strings, each element will define a field to sort
by in ascending order. If sort is a list of dictionaries, you can
set the order. For instance, sort: [{'color': 'desc'}] will sort
results by color name in descending order. If you do not provided
one, results will be ordered by score in descending order. Default
to None.
page (int, optional): Indicates a page of results. If you provide a
page, `suggest` will return the results from that page. Default to
None.
rpp (int, optional): Indicates how many results to fetch by page,
minimum 1, maximum 100. Default to 10.
transformer (str, optional): Indicates a transformation to apply to
items in result. It could be one of "basic" or "onlyid". If none is
set, items will not be transformed. Default to None.
no_stats (bool, optional): Indicates if the query should be recorded
in search stats. If it is true, it will not be recorded. Default to
False.
"""
query_params = parse_query_params({
'hashid': hashid,
'query': query,
'filter': filter_,
'exclude': exclude,
'sort': sort,
'page': page,
'rpp': rpp,
'transformer': transformer,
'nostats': no_stats
})
api_client = SearchAPIClient(**opts)
return api_client.get(
'/5/suggest',
query_params=query_params
)
| 41.903704
| 79
| 0.62542
| 759
| 5,657
| 4.600791
| 0.181818
| 0.03866
| 0.040951
| 0.026346
| 0.814719
| 0.81071
| 0.793528
| 0.793528
| 0.793528
| 0.793528
| 0
| 0.00354
| 0.300866
| 5,657
| 134
| 80
| 42.216418
| 0.879393
| 0.671557
| 0
| 0.576923
| 0
| 0
| 0.117759
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.057692
| 0
| 0.288462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f70086a12cb7a805133ce4cd015dbd84c54c0b65
| 28
|
py
|
Python
|
ent2id/__init__.py
|
skojaku/ent2id
|
1483cc9430999db7a6598dfdf0afa7302ada4893
|
[
"CC0-1.0"
] | null | null | null |
ent2id/__init__.py
|
skojaku/ent2id
|
1483cc9430999db7a6598dfdf0afa7302ada4893
|
[
"CC0-1.0"
] | null | null | null |
ent2id/__init__.py
|
skojaku/ent2id
|
1483cc9430999db7a6598dfdf0afa7302ada4893
|
[
"CC0-1.0"
] | null | null | null |
from ent2id.Ent2Id import *
| 14
| 27
| 0.785714
| 4
| 28
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0.142857
| 28
| 1
| 28
| 28
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f734df86fba52bb0e5b0075c1880a25fc79ec783
| 113
|
py
|
Python
|
Python/pythonProject/exercise/ex047.py
|
JoaoMoreira2002/Linguagens-de-programacao
|
b91a902188428238a567c8f52b2ac9028378c4df
|
[
"MIT"
] | null | null | null |
Python/pythonProject/exercise/ex047.py
|
JoaoMoreira2002/Linguagens-de-programacao
|
b91a902188428238a567c8f52b2ac9028378c4df
|
[
"MIT"
] | null | null | null |
Python/pythonProject/exercise/ex047.py
|
JoaoMoreira2002/Linguagens-de-programacao
|
b91a902188428238a567c8f52b2ac9028378c4df
|
[
"MIT"
] | null | null | null |
for x in range(0, 11):
for c in range(0, 11):
print(x, 'x', c, '= {}'.format(x * c))
print('\t')
| 22.6
| 46
| 0.442478
| 21
| 113
| 2.380952
| 0.47619
| 0.28
| 0.32
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075949
| 0.300885
| 113
| 4
| 47
| 28.25
| 0.556962
| 0
| 0
| 0
| 0
| 0
| 0.061947
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
f737f3d4131b56174d565a0575f0331decd3591a
| 20,724
|
py
|
Python
|
orttraining/orttraining/test/python/orttraining_test_checkpoint.py
|
mszhanyi/onnxruntime
|
6f85d3e5c81c919022ac4a77e5a051da8518b15d
|
[
"MIT"
] | 669
|
2018-12-03T22:00:31.000Z
|
2019-05-06T19:42:49.000Z
|
orttraining/orttraining/test/python/orttraining_test_checkpoint.py
|
mszhanyi/onnxruntime
|
6f85d3e5c81c919022ac4a77e5a051da8518b15d
|
[
"MIT"
] | 440
|
2018-12-03T21:09:56.000Z
|
2019-05-06T20:47:23.000Z
|
orttraining/orttraining/test/python/orttraining_test_checkpoint.py
|
mszhanyi/onnxruntime
|
6f85d3e5c81c919022ac4a77e5a051da8518b15d
|
[
"MIT"
] | 140
|
2018-12-03T21:15:28.000Z
|
2019-05-06T18:02:36.000Z
|
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import subprocess
import os
import shutil
import sys
from checkpoint._test_helpers import makedir
from _test_commons import _single_run, _distributed_run
checkpoint_dir = os.path.abspath("checkpoint/checkpoint_dir/")
makedir(checkpoint_dir)
# test workflow:
# - there are a total of three files that are used for checkpointing tests:
# - orttraining_test_checkpoint.py: co-ordinating all the checkpoint tests
# - orttraining_test_save_checkpoint.py: responsible for saving all checkpoint files and trained states
# - orttraining_test_load_checkpoint.py: loading the saved checkpoints and the saved states and asserting whether
# the saved states match the loaded states.
# - and tests encompassing checkpointing tests for scenarios:
# - from [onnxruntime orttrainer][full_precision, mixed_precision][single node training, data parallel training, distributed zero, distributed megatron, distributed zero+megatron training] to
# [onnxruntime orttrainer, pytorch][full_precision, mixed_precision][single node training, data parallel training, distributed zero, distributed megatron, distributed zero+megatron training]
# - all tests cannot be written in the same process because:
# - some of them require to be run in a distributed environment (using mpirun) while others can be run using a single process.
# - there is a known limitation where the distributed training run context is implemented as a singleton, so in the same process, no more than one
# orttrainer can be instantiated. Hence the need to run these tests in different processes one at a time.
# - workflow:
# - orttraining_test_checkpoint.py calls orttraining_test_save_checkpoint.py to save following files to disk
# - ORTTrainer checkpoint files through the ORTTrainer.save_checkpoint method
# - ORTTrainer states through pickle after extracting all the states of the ORTTrainer through the ORTTrainer.state_dict method
# - for each configuration across [onnxruntime orttrainer][full_precision, mixed_precision][single node training, data parallel training, distributed zero training]
# - orttraining_test_checkpoint.py calls orttraining_test_load_checkpoint.py to load each checkpoint into each orttrainer configuration
# - Saved ORTTrainer checkpoint files are loaded into an ORTTrainer using the ORTTrainer.load_checkpoint method for each ORTTrainer configuration.
# - Saved states are loaded into a python dictionary (called the state dictionary) through pickle
# - state dictionary is extracted from the ORTTrainer after it has loaded the checkpoint file and the onnx graph has been initialized (by calling eval_step)
# through the ORTTrainer.state_dict method.
# - the loaded state dictionary (through pickle) is compared against the extracted state dictionary for:
# - equality (or near equality) of model states
# - equality (or near equality) of optimizer states
# - In some cases the comparison is not directly possible; for example single node trainer to a distributed zero trainer because the extracted state
# dictionary is a distributed one and cannot be compared against a single node trainer directly.
# - First these states are saved using pickle for each rank to a file on disk
# - Wait for all ranks to complete writing the file to disk using barrier()
# - Load all states and aggregate them into 1 state dictionary
# - Compare this aggregated state dictionary against the original one loaded from disk.
# - Similarly, it is not possible to compare mixed precision zero trainer state_dict against full precision zero trainer state_dict because the
# full precision states are sharded in the mixed precision trainer run and not shareded in the full precision trainer run. To compare these two state_dicts:
# - Both state_dicts (mixed precision and full precision) are saved to file for all ranks.
# - Wait for all ranks to complete writing the file to disk using barrier()
# - Load all states and aggregate them into 1 state dictionary fpr both the configs.
# - Compare this aggregated state dictionaries against one another.
save_checkpoint_file = os.path.join("checkpoint", "orttraining_test_save_checkpoint.py")
load_checkpoint_file = os.path.join("checkpoint", "orttraining_test_load_checkpoint.py")
aggregate_checkpoint_file = os.path.join("checkpoint", "orttraining_test_checkpoint_aggregation.py")
optim_state_file = os.path.join("checkpoint", "orttraining_test_load_optimizer_state.py")
backend_api_file = os.path.join("checkpoint", "orttraining_test_backend_api.py")
single_node_full_precision_path = os.path.join(checkpoint_dir, "single_node", "full_precision")
single_node_mixed_precision_path = os.path.join(checkpoint_dir, "single_node", "mixed_precision")
distributed_zero_full_precision_lamb_path = os.path.join(checkpoint_dir, "distributed_zero", "full_precision", "lamb")
distributed_zero_mixed_precision_lamb_path = os.path.join(checkpoint_dir, "distributed_zero", "mixed_precision", "lamb")
# megatron saving and loading uses a different model
single_node_full_precision_bart_path = os.path.join(checkpoint_dir, "bart", "single_node", "full_precision")
single_node_mixed_precision_bart_path = os.path.join(checkpoint_dir, "bart", "single_node", "mixed_precision")
distributed_zero_full_precision_lamb_bart_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero", "full_precision", "lamb"
)
distributed_zero_mixed_precision_lamb_bart_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero", "mixed_precision", "lamb"
)
distributed_megatron_full_precision_lamb_path = os.path.join(
checkpoint_dir, "bart", "distributed_megatron", "full_precision", "lamb"
)
distributed_megatron_mixed_precision_lamb_path = os.path.join(
checkpoint_dir, "bart", "distributed_megatron", "mixed_precision", "lamb"
)
distributed_zero_megatron_full_precision_adam_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero_megatron", "full_precision", "adam"
)
distributed_zero_megatron_mixed_precision_adam_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero_megatron", "mixed_precision", "adam"
)
distributed_zero_megatron_full_precision_lamb_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero_megatron", "full_precision", "lamb"
)
distributed_zero_megatron_mixed_precision_lamb_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero_megatron", "mixed_precision", "lamb"
)
# save all checkpoint files (pre-checkpoint)
_single_run(save_checkpoint_file, "single_node_full_precision", single_node_full_precision_path)
_single_run(save_checkpoint_file, "single_node_mixed_precision", single_node_mixed_precision_path)
_distributed_run(
save_checkpoint_file, "distributed_zero_full_precision_lamb", distributed_zero_full_precision_lamb_path
)
_distributed_run(
save_checkpoint_file, "distributed_zero_mixed_precision_lamb", distributed_zero_mixed_precision_lamb_path
)
_single_run(save_checkpoint_file, "single_node_full_precision_bart", single_node_full_precision_bart_path)
_single_run(save_checkpoint_file, "single_node_mixed_precision_bart", single_node_mixed_precision_bart_path)
_distributed_run(
save_checkpoint_file, "distributed_zero_full_precision_lamb_bart", distributed_zero_full_precision_lamb_bart_path
)
_distributed_run(
save_checkpoint_file, "distributed_zero_mixed_precision_lamb_bart", distributed_zero_mixed_precision_lamb_bart_path
)
_distributed_run(
save_checkpoint_file, "distributed_megatron_full_precision_lamb", distributed_megatron_full_precision_lamb_path
)
_distributed_run(
save_checkpoint_file, "distributed_megatron_mixed_precision_lamb", distributed_megatron_mixed_precision_lamb_path
)
_distributed_run(
save_checkpoint_file,
"distributed_zero_megatron_full_precision_lamb",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
save_checkpoint_file,
"distributed_zero_megatron_mixed_precision_lamb",
distributed_zero_megatron_mixed_precision_lamb_path,
)
# load checkpoint files (post-checkpoint)
# going to single node trainer
_single_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_single_node_full_precision",
single_node_full_precision_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_single_node_full_precision",
single_node_mixed_precision_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_single_node_mixed_precision",
single_node_mixed_precision_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_single_node_mixed_precision",
single_node_full_precision_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_single_node_full_precision",
distributed_zero_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_single_node_full_precision",
distributed_zero_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_single_node_mixed_precision",
distributed_zero_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_single_node_mixed_precision",
distributed_zero_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_single_node_full_precision",
distributed_megatron_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_single_node_full_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_single_node_mixed_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_single_node_mixed_precision",
distributed_megatron_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_single_node_full_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_single_node_full_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_single_node_mixed_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_single_node_mixed_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
# going to distributed zero trainer
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_zero_full_precision",
single_node_full_precision_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_zero_full_precision",
single_node_mixed_precision_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_zero_mixed_precision",
single_node_mixed_precision_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_zero_mixed_precision",
single_node_full_precision_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_zero_full_precision",
distributed_zero_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_zero_full_precision",
distributed_zero_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_zero_mixed_precision",
distributed_zero_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_zero_mixed_precision",
distributed_zero_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_zero_full_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_zero_full_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_zero_mixed_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_zero_mixed_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_zero_full_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_zero_full_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_zero_mixed_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_zero_mixed_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
# going to distributed zero+megatron trainer
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_megatron_full_precision",
single_node_full_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_megatron_full_precision",
single_node_mixed_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_megatron_mixed_precision",
single_node_mixed_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_megatron_mixed_precision",
single_node_full_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_megatron_full_precision",
distributed_zero_full_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_megatron_full_precision",
distributed_zero_mixed_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_megatron_mixed_precision",
distributed_zero_mixed_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_megatron_mixed_precision",
distributed_zero_full_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_megatron_full_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_megatron_full_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_megatron_mixed_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_megatron_mixed_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_megatron_full_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_megatron_full_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_megatron_mixed_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_megatron_mixed_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
# going to distributed zero+megatron trainer
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_zero_megatron_full_precision",
single_node_full_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_zero_megatron_full_precision",
single_node_mixed_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_zero_megatron_mixed_precision",
single_node_mixed_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_zero_megatron_mixed_precision",
single_node_full_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_zero_megatron_full_precision",
distributed_zero_full_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_zero_megatron_full_precision",
distributed_zero_mixed_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_zero_megatron_mixed_precision",
distributed_zero_mixed_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_zero_megatron_mixed_precision",
distributed_zero_full_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_zero_megatron_full_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_zero_megatron_full_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_zero_megatron_mixed_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_zero_megatron_mixed_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_zero_megatron_full_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_zero_megatron_full_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_zero_megatron_mixed_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_zero_megatron_mixed_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
shutil.rmtree(checkpoint_dir)
| 45.150327
| 196
| 0.84226
| 2,655
| 20,724
| 5.941996
| 0.0742
| 0.126458
| 0.074163
| 0.085193
| 0.835827
| 0.818839
| 0.808126
| 0.795068
| 0.767115
| 0.735674
| 0
| 0.000163
| 0.109824
| 20,724
| 458
| 197
| 45.248908
| 0.854998
| 0.210336
| 0
| 0.515152
| 0
| 0
| 0.420788
| 0.39131
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015152
| 0
| 0.015152
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f74fc6b035940199a7b9518dd7958f38c4ea4cdc
| 443
|
py
|
Python
|
tests/test_main.py
|
ChickenProp/gragrapy
|
9c24719c6fc843df2c506388aa21e64617cccc8d
|
[
"MIT"
] | 1
|
2017-04-30T18:26:19.000Z
|
2017-04-30T18:26:19.000Z
|
tests/test_main.py
|
ChickenProp/gragrapy
|
9c24719c6fc843df2c506388aa21e64617cccc8d
|
[
"MIT"
] | 4
|
2017-06-19T09:44:59.000Z
|
2017-06-19T09:58:57.000Z
|
tests/test_main.py
|
ChickenProp/gragrapy
|
9c24719c6fc843df2c506388aa21e64617cccc8d
|
[
"MIT"
] | null | null | null |
from __future__ import (absolute_import, print_function,
unicode_literals, division)
from .context import gragrapy as gg
from gragrapy.__main__ import parse_kwargs
def test_parse_kwargs():
assert parse_kwargs([]) == {}
assert parse_kwargs(['a=b', 'c=d']) == {'a': 'b', 'c': 'd'}
assert parse_kwargs(['a=1', 'c=-5']) == {'a': 1, 'c': -5}
assert parse_kwargs(['a=b=c', 'c=d']) == {'a': 'b=c', 'c': 'd'}
| 36.916667
| 67
| 0.582393
| 64
| 443
| 3.75
| 0.390625
| 0.275
| 0.283333
| 0.225
| 0.329167
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0.011299
| 0.200903
| 443
| 11
| 68
| 40.272727
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.074492
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 1
| 0.111111
| true
| 0
| 0.333333
| 0
| 0.444444
| 0.111111
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f77d32b42006912619abf517ddfd74f62f725987
| 37,933
|
py
|
Python
|
models/resnet_small.py
|
zhaoguangxiang/pytorch-cifar
|
509994fd2035009c7f53192a4c497b97f6295e6e
|
[
"MIT"
] | null | null | null |
models/resnet_small.py
|
zhaoguangxiang/pytorch-cifar
|
509994fd2035009c7f53192a4c497b97f6295e6e
|
[
"MIT"
] | null | null | null |
models/resnet_small.py
|
zhaoguangxiang/pytorch-cifar
|
509994fd2035009c7f53192a4c497b97f6295e6e
|
[
"MIT"
] | null | null | null |
'''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
resnet same as the origin paper
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResSmall(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResSmall, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResSmall20():
return ResSmall(BasicBlock, [3, 3, 3])
def ResSmall32():
return ResSmall(BasicBlock, [5, 5, 5])
def ResSmall44():
return ResSmall(BasicBlock, [7, 7, 7])
def ResSmall56():
return ResSmall(BasicBlock, [9, 9, 9])
def ResSmall110():
return ResSmall(BasicBlock, [18, 18, 18])
class BaseBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BaseBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
# self.shortcut = nn.Sequential()
# if stride != 1 or in_planes != self.expansion * planes:
# self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
# nn.BatchNorm2d(self.expansion * planes)
# )
def forward(self, x):
# print('x size', x.size())
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# out += self.shortcut(x)
# out = F.relu(out)
return out
class RfSmall(nn.Module):
def __init__(self, block, num_blocks, args, num_classes=10):
super(RfSmall, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.num_blocks = num_blocks
self.layer_list = nn.ModuleList()
self.shortcut_list = nn.ModuleList()
self.num_big_block = len(num_blocks)
layer1, shortcut1 = self._make_layer(block, 16, num_blocks[0], stride=1)
layer2, shortcut2 = self._make_layer(block, 32, num_blocks[1], stride=2)
layer3, shortcut3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.layer_list.extend([layer1, layer2, layer3])
self.shortcut_list.extend([shortcut1, shortcut2, shortcut3])
self.linear = nn.Linear(64*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = nn.ModuleList()
shortcuts = nn.ModuleList()
for stride in strides:
# 64*64, 64*64 ..
# 64*128(stride=2) 128*128 ..
# 128*256(stride=2),256*256,..
# 256*512(stride=2) 512*512 ..
layers.append(block(self.in_planes, planes, stride))
shortcut = nn.Sequential()
if stride != 1 or self.in_planes != block.expansion * planes:
shortcut = nn.Sequential(
nn.Conv2d(self.in_planes, block.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(block.expansion * planes)
)
shortcuts.append(shortcut)
self.in_planes = planes * block.expansion
return layers, shortcuts
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
for i in range(self.num_big_block):
for j in range(self.num_blocks[i]):
# out = F.relu(self.bn1(self.conv1(x)))
# out = self.bn2(self.conv2(out))
# out += self.shortcut(x)
# out = F.relu(out)
# return out
layer_i = self.layer_list[i]
shortcut_i = self.shortcut_list[i]
res = shortcut_i[j](out)
out = layer_i[j](out)
out += res
out = F.relu(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
# print('out size',out.size())
out = self.linear(out)
return out
def RfSmall56(args):
return RfSmall(block=BaseBlock, num_blocks=[9, 9, 9], args=args)
def RfSmall110(args):
return RfSmall(block=BaseBlock, num_blocks=[18, 18, 18], args=args)
class LmRnnSmall(nn.Module):
# 只考虑分别设计三个rnn,然后bsz包含height 和width 的情况,层间使用残差连接。dim_type=channel,pass_hidden=0
def __init__(self, block, num_blocks, args, num_classes=10):
super(LmRnnSmall, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.num_blocks = num_blocks
self.num_big_block = len(num_blocks)
self.layer_list = nn.ModuleList()
self.shortcut_list = nn.ModuleList()
self.rnn_list = nn.ModuleList()
self.m_out_list = nn.ModuleList()
self.rnn_memory_size_list = []
self.args = args
self.memory_type = args.memory_type
# self.pass_hidden = args.pass_hidden
self.rnn_ratio = args.rnn_ratio
# self.dim_type = args.dim_type
self.rnn_res = args.rnn_res
layer1, shortcut1, rnn1, m_out_linear1, rnn_memory_size1 = self._make_layer(block, 16, num_blocks[0], stride=1)
layer2, shortcut2, rnn2, m_out_linear2, rnn_memory_size2 = self._make_layer(block, 32, num_blocks[1], stride=2)
layer3, shortcut3, rnn3, m_out_linear3, rnn_memory_size3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.layer_list.extend([layer1, layer2, layer3])
self.shortcut_list.extend([shortcut1, shortcut2, shortcut3])
self.rnn_list.extend([rnn1, rnn2, rnn3])
self.m_out_list.extend([m_out_linear1, m_out_linear2, m_out_linear3])
self.rnn_memory_size_list.extend([rnn_memory_size1,rnn_memory_size2,rnn_memory_size3])
self.linear = nn.Linear(64*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = nn.ModuleList()
shortcuts = nn.ModuleList()
rnn_input_size = block.expansion * planes
rnn_memory_size = int(self.args.rnn_ratio * block.expansion * planes)
if self.memory_type == 'rnn':
rnn = torch.nn.RNNCell(rnn_input_size, rnn_memory_size, bias=True, nonlinearity='tanh')
elif self.memory_type == 'lstm':
rnn = torch.nn.LSTMCell(rnn_input_size, rnn_memory_size, bias=True)
elif self.memory_type == 'gru':
rnn = torch.nn.GRUCell(rnn_input_size, rnn_memory_size, bias=True)
else:
rnn = None
if self.rnn_ratio != 1:
m_out_linear = nn.Linear(self.rnn_memory_size, rnn_input_size)
else:
m_out_linear = None
for i in range(num_blocks):
# 对rnn来说,第一个残差连接虽然等维度,考虑到其他都是传h0,我就把当做和其他大块间残差的一样的
stride = strides[i]
# 16*16, 16*16 ..
# 16*32(stride=2) 32*32 ..
# 32*64(stride=2),64*64 ..
layers.append(block(self.in_planes, planes, stride))
if i == 0:
shortcut = nn.Sequential()
if stride != 1 or self.in_planes != block.expansion * planes:
shortcut = nn.Sequential(
nn.Conv2d(self.in_planes, block.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(block.expansion * planes)
)
shortcuts.append(shortcut)
self.in_planes = planes * block.expansion
return layers, shortcuts, rnn, m_out_linear,rnn_memory_size
def set_m_rnn(self, x, rnn_memory_size):
origin_bsz, channel, height, width, = x.size()
bsz = height * width * origin_bsz
if self.memory_type in ['rnn', 'gru']:
hx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)
return hx
if self.memory_type == 'lstm':
hx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)
cx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)
return (hx, cx)
def m_rnn(self, x, rnn, rnn_hidden):
origin_bsz, channel, height, width = x.size()
in_x = x.permute(0, 2, 3, 1).reshape(origin_bsz*height*width, channel)
if self.memory_type in ['rnn', 'gru']:
hx = rnn(in_x, rnn_hidden)
m_output = hx # bsz, self.rnn_memory_size
rnn_hidden = hx
elif self.memory_type == 'lstm':
hx, cx = rnn(in_x, rnn_hidden)
m_output = hx # bsz, self.rnn_memory_size
rnn_hidden = (hx, cx)
return m_output, rnn_hidden
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# out size torch.Size([128, 16, 32, 32])
for i in range(self.num_big_block):
for j in range(self.num_blocks[i]):
layer_i = self.layer_list[i]
shortcut_i = self.shortcut_list[i]
# print('layer i=0,j=0', layer_i[j])
# print('big_block%d| layer%d| rnn%d: %s|' % (i, j, i, str(self.rnn_list[i])))
# print('out size', out.size())
if j == 0:
res = shortcut_i[j](out)
else:
if j == 1:
rnn_hidden = self.set_m_rnn(out, self.rnn_memory_size_list[i])
bsz, channel, height, width = out.size()
m_out, rnn_hidden = self.m_rnn(out, self.rnn_list[i], rnn_hidden)
if self.m_out_list[i] is not None:
m_out = self.m_out_list[i](m_out)
m_out = torch.reshape(m_out, (bsz, height, width, channel)).permute((0, 3, 1, 2))
res = m_out
out = layer_i[j](out) # [bsz,dim,h,w]
out += res
out = F.relu(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def LmRnnSmall56(args):
return LmRnnSmall(block=BaseBlock, num_blocks=[9, 9, 9], args=args)
def LmRnnSmall110(args):
return LmRnnSmall(block=BaseBlock, num_blocks=[18, 18, 18], args=args)
class LmRnnKbSmallCIFAR10(nn.Module):
# keep batch size same as origin, 32*32*16 ,16*16*32 8*8*64 as the input_size can pass hidden or not pass hidden
def __init__(self, block, num_blocks, args, num_classes=10):
super(LmRnnKbSmallCIFAR10, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.num_blocks = num_blocks
self.num_big_block = len(num_blocks)
self.layer_list = nn.ModuleList()
self.shortcut_list = nn.ModuleList()
self.rnn_list = nn.ModuleList()
self.m_out_list = nn.ModuleList()
self.rnn_memory_size_list = []
self.convs_list = nn.ModuleList()
self.deconvs_list = nn.ModuleList()
self.args = args
self.memory_type = args.memory_type
self.pass_hidden = args.pass_hidden
# self.keep_block_residual = args.keep_block_residual
self.rnn_ratio = args.rnn_ratio
self.num_downs = args.num_downs
self.down_rate = 4 ** self.num_downs
layer1, shortcut1, rnn1, m_out_linear1, rnn_memory_size1, convs1, deconvs1 = self._make_layer(block, 16, num_blocks[0], stride=1, fm=32)
layer2, shortcut2, rnn2, m_out_linear2, rnn_memory_size2, convs2, deconvs2 = self._make_layer(block, 32, num_blocks[1], stride=2, fm=16)
layer3, shortcut3, rnn3, m_out_linear3, rnn_memory_size3, convs3, deconvs3 = self._make_layer(block, 64, num_blocks[2], stride=2, fm=8)
self.layer_list.extend([layer1, layer2, layer3])
self.shortcut_list.extend([shortcut1, shortcut2, shortcut3])
self.rnn_list.extend([rnn1, rnn2, rnn3])
self.m_out_list.extend([m_out_linear1, m_out_linear2, m_out_linear3])
self.rnn_memory_size_list.extend([rnn_memory_size1, rnn_memory_size2, rnn_memory_size3])
self.convs_list.extend([convs1, convs2, convs3])
self.deconvs_list.extend([deconvs1, deconvs2, deconvs3])
self.linear = nn.Linear(64*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride, fm):
strides = [stride] + [1]*(num_blocks-1)
layers = nn.ModuleList()
shortcuts = nn.ModuleList()
cur_fig_size = int(fm * fm / self.down_rate)
rnn_input_size = block.expansion * planes * cur_fig_size
rnn_memory_size = int(self.args.rnn_ratio * block.expansion * planes * cur_fig_size)
if self.memory_type == 'rnn':
rnn = torch.nn.RNNCell(rnn_input_size, rnn_memory_size, bias=True, nonlinearity='tanh')
elif self.memory_type == 'lstm':
rnn = torch.nn.LSTMCell(rnn_input_size, rnn_memory_size, bias=True)
elif self.memory_type == 'gru':
rnn = torch.nn.GRUCell(rnn_input_size, rnn_memory_size, bias=True)
else:
rnn = None
if self.rnn_ratio != 1:
m_out_linear = nn.Linear(rnn_memory_size, rnn_input_size)
else:
m_out_linear = None
if self.num_downs > 0:
convs = nn.ModuleList()
deconvs = nn.ModuleList()
for j in range(self.num_downs):
convs.append(nn.Conv2d(in_channels=block.expansion*planes, out_channels=block.expansion*planes,
kernel_size=3, stride=2, padding=1))
deconvs.append(nn.ConvTranspose2d(block.expansion*planes, block.expansion*planes, kernel_size=3,
stride=2, padding=1))
else:
convs=None
deconvs=None
for i in range(num_blocks):
# 对rnn来说,第一个残差连接虽然等维度,考虑到其他都是传h0,我就把当做和其他大块间残差的一样的
stride = strides[i]
# 16*16, 16*16 ..
# 16*32(stride=2) 32*32 ..
# 32*64(stride=2),64*64 ..
layers.append(block(self.in_planes, planes, stride))
if i == 0:
if not self.pass_hidden:
shortcut = nn.Sequential()
if stride != 1 or self.in_planes != block.expansion * planes:
shortcut = nn.Sequential(
nn.Conv2d(self.in_planes, block.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(block.expansion * planes)
)
shortcuts.append(shortcut)
else:
# if self.keep_block_residual:
# shortcut = nn.Sequential()
# if stride != 1 or self.in_planes != block.expansion * planes:
# shortcut = nn.Sequential(
# nn.Conv2d(self.in_planes, block.expansion * planes, kernel_size=1, stride=stride,
# bias=False),
# nn.BatchNorm2d(block.expansion * planes)
# )
# shortcuts.append(shortcut)
memory_shortcut = nn.Sequential()
if stride != 1 or self.in_planes != block.expansion * planes:
memory_shortcut = nn.Sequential(nn.Linear(rnn_memory_size*2, rnn_memory_size),
nn.BatchNorm2d(rnn_memory_size))
shortcuts.append(memory_shortcut)
self.in_planes = planes * block.expansion
return layers, shortcuts, rnn, m_out_linear, rnn_memory_size, convs, deconvs
def set_m_rnn(self, x, rnn_memory_size):
# origin_bsz, channel, height, width, = x.size()
# bsz = height * width * origin_bsz
bsz = x.size()[0]
if self.memory_type in ['rnn', 'gru']:
hx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)
return hx
if self.memory_type == 'lstm':
hx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)
cx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)
return (hx, cx)
def m_rnn(self, x, cur_i, rnn_hidden):
input_size_list = []
rnn = self.rnn_list[cur_i]
if self.convs_list:
# 可能四层dim变长的deconv更合理
convs = self.convs_list[cur_i]
for j in range(self.num_downs):
input_size_list.append(x.size())
x = convs[j](x)
bsz, channel, new_height, new_width = x.size()
x = x.permute([0, 2, 3, 1]).reshape(bsz, int(self.rnn_memory_size_list[cur_i]/ self.args.rnn_ratio)) # bsz, new_height * new_width * channel
if self.memory_type in ['rnn', 'gru']:
hx = rnn(x, rnn_hidden)
m_output = hx # bsz, self.rnn_memory_size
rnn_hidden = hx
elif self.memory_type == 'lstm':
hx, cx = rnn(x, rnn_hidden)
m_output = hx # bsz, self.rnn_memory_size
rnn_hidden = (hx, cx)
if self.m_out_list[cur_i] is not None:
m_output = self.m_out_list[cur_i](m_output)
m_output = torch.reshape(m_output, (bsz, new_height, new_height, channel,)).permute((0, 3, 1, 2))
if self.deconvs_list:
deconvs = self.deconvs_list[cur_i]
for j in range(self.num_downs):
m_output = deconvs[j](m_output, output_size=input_size_list[-j-1])
return m_output, rnn_hidden
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# out size torch.Size([128, 16, 32, 32])
rnn_hidden = 0 # 0 to error
for i in range(self.num_big_block):
for j in range(self.num_blocks[i]):
layer_i = self.layer_list[i]
shortcut_i = self.shortcut_list[i]
print('layer i=0,j=0', layer_i[j])
print('big_block%d| layer%d| rnn%d: %s|' % (i, j, i, str(self.rnn_list[i])))
print('out size', out.size())
if not self.pass_hidden or i == 0:
if j == 0:
res = shortcut_i[j](out)
else:
if j == 1:
rnn_hidden = self.set_m_rnn(out, self.rnn_memory_size_list[i])
m_out, rnn_hidden = self.m_rnn(out, i, rnn_hidden)
res = m_out
if self.pass_hidden and i > 0:
if j == 0:
print('shortcut_i[j]', shortcut_i[j])
rnn_hidden = shortcut_i[j](rnn_hidden)
m_out, rnn_hidden = self.m_rnn(out, i, rnn_hidden)
res = m_out
out = layer_i[j](out) # [bsz,dim,h,w]
out += res
out = F.relu(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def LmRnnKbSmall56CIFAR10(args):
return LmRnnKbSmallCIFAR10(block=BaseBlock, num_blocks=[9, 9, 9], args=args)
def LmRnnKbSmall110CIFAR10(args):
return LmRnnKbSmallCIFAR10(block=BaseBlock, num_blocks=[18, 18, 18], args=args)
class DepthTransposeCNN(nn.Module):
def __init__(self,in_dim, out_dim, kernel_size=4, is_out=False):
super(DepthTransposeCNN, self).__init__()
self.nets = nn.ModuleList()
self.is_out = is_out
self.nets.extend([nn.ConvTranspose2d(in_channels=in_dim, out_channels=in_dim,
kernel_size=kernel_size, stride=2, padding=1, groups=in_dim),
nn.ConvTranspose2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, stride=1,
padding=0, bias=False)])
if not is_out:
self.nets.extend([nn.BatchNorm2d(in_dim),
nn.ReLU(True),
nn.BatchNorm2d(out_dim),
nn.ReLU(True)])
def forward(self, x, output_size):
bsz, dim, h, w = output_size
if self.is_out:
x = self.nets[0](x, output_size=[bsz, dim * 2, h, w])
x = self.nets[1](x, output_size=output_size)
else:
x = self.nets[0](x, output_size=[bsz, dim * 2, h, w])
x = self.nets[2](x)
x = self.nets[3](x)
x = self.nets[1](x, output_size=output_size)
x = self.nets[4](x)
x = self.nets[5](x)
return x
class TransposeCNN(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size=4, is_out=False):
super(TransposeCNN, self).__init__()
self.nets = nn.ModuleList()
self.is_out = is_out
self.nets.extend([nn.ConvTranspose2d(in_channels=in_dim, out_channels=out_dim,
kernel_size=kernel_size, stride=2, padding=1), ])
if not is_out:
self.nets.extend([nn.BatchNorm2d(out_dim),
nn.ReLU(True)])
def forward(self, x, output_size):
if self.is_out:
x = self.nets[0](x, output_size=output_size)
else:
x = self.nets[0](x, output_size=output_size)
x = self.nets[1](x)
x = self.nets[2](x)
return x
class LmRnnConsistentSmallCIFAR10(nn.Module):
# keep batch size same as origin, 32*32*16 ,16*16*32 8*8*64 as the input_size can pass hidden or not pass hidden
def __init__(self, block, num_blocks, args, num_classes=10):
super(LmRnnConsistentSmallCIFAR10, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.num_blocks = num_blocks
self.num_big_block = len(num_blocks)
self.layer_list = nn.ModuleList()
self.shortcut_list = nn.ModuleList()
self.rnn_list = nn.ModuleList()
self.m_out_list = nn.ModuleList()
self.rnn_memory_size_list = []
self.convs_list = nn.ModuleList()
self.deconvs_list = nn.ModuleList()
self.args = args
self.memory_type = args.memory_type
self.rnn_ratio = args.rnn_ratio
self.conv_activate = args.conv_activate
self.memory_before = args.memory_before
self.depth_separate = args.depth_separate
self.consistent_separate_rnn = args.consistent_separate_rnn
self.dcgan_init = args.dcgan_init
self.dcgan_kernel= args.dcgan_kernel
self.dcgan_share_conv = args.dcgan_share_conv
layer1, shortcut1, rnn1, m_out_linear1, rnn_memory_size1, convs1, deconvs1 = self._make_layer(block, 16, num_blocks[0], stride=1, fm=32,)
layer2, shortcut2, rnn2, m_out_linear2, rnn_memory_size2, convs2, deconvs2 = self._make_layer(block, 32, num_blocks[1], stride=2, fm=16,)
layer3, shortcut3, rnn3, m_out_linear3, rnn_memory_size3, convs3, deconvs3 = self._make_layer(block, 64, num_blocks[2], stride=2, fm=8,)
self.layer_list.extend([layer1, layer2, layer3])
self.shortcut_list.extend([shortcut1, shortcut2, shortcut3])
if not self.consistent_separate_rnn:
rnn2 = rnn1
rnn3 = rnn1
self.rnn_list.extend([rnn1, rnn2, rnn3])
if not self.consistent_separate_rnn:
m_out_linear2 = m_out_linear1
m_out_linear3 = m_out_linear1
self.m_out_list.extend([m_out_linear1, m_out_linear2, m_out_linear3])
self.rnn_memory_size_list.extend([rnn_memory_size1, rnn_memory_size2, rnn_memory_size3])
if self.dcgan_share_conv:
# 32*32*16, 16*16*32, 8*8*64, 4*4*128,2*2*256,1*1*512
# 1*1*512, 2*2*256, 4*4*128, 8*8*64, 16*16*32, 32*32*16,
dim_list = [512, 256, 128, 64, 32, 16]
convs2 = convs1[1:]
convs3 = convs1[2:]
deconvs2 = deconvs1[:-2].append(DepthTransposeCNN(in_dim=dim_list[-3], out_dim=dim_list[-2], kernel_size=self.dcgan_kernel,
is_out=True) if self.depth_separate else TransposeCNN(in_dim=dim_list[-3], out_dim=dim_list[-2], kernel_size=self.dcgan_kernel, is_out=True))
deconvs3 = deconvs2[:-3].append(DepthTransposeCNN(in_dim=dim_list[-4], out_dim=dim_list[-3], kernel_size=self.dcgan_kernel,
is_out=True) if self.depth_separate else TransposeCNN(in_dim=dim_list[-4], out_dim=dim_list[-3], kernel_size=self.dcgan_kernel, is_out=True))
self.convs_list.extend([convs1, convs2, convs3])
self.deconvs_list.extend([deconvs1, deconvs2, deconvs3])
if self.dcgan_init:
self.deconvs_list.apply(self.weight_init)
self.convs_list.apply(self.weight_init)
self.linear = nn.Linear(64*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride, fm, ):
strides = [stride] + [1]*(num_blocks-1)
layers = nn.ModuleList()
shortcuts = nn.ModuleList()
down_rate = fm
num_downs = int(np.log(fm)/np.log(2))
cur_fig_size = int(fm * fm / down_rate)
# build rnn
rnn_input_size = block.expansion * planes * cur_fig_size
rnn_memory_size = int(self.args.rnn_ratio * block.expansion * planes * cur_fig_size)
assert rnn_memory_size == 512 * self.rnn_ratio
if self.consistent_separate_rnn or fm ==32:
if self.memory_type == 'rnn':
rnn = torch.nn.RNNCell(rnn_input_size, rnn_memory_size, bias=True, nonlinearity='tanh')
elif self.memory_type == 'lstm':
rnn = torch.nn.LSTMCell(rnn_input_size, rnn_memory_size, bias=True)
elif self.memory_type == 'gru':
rnn = torch.nn.GRUCell(rnn_input_size, rnn_memory_size, bias=True)
else:
rnn = None
# rnn out linear
if self.rnn_ratio != 1:
m_out_linear = nn.Linear(rnn_memory_size, rnn_input_size)
else:
m_out_linear = None
else:
rnn = None
m_out_linear = None
if self.conv_activate == 'lrelu':
conv_activation = nn.LeakyReLU(True)
elif self.conv_activate == 'relu':
conv_activation = nn.ReLU(True)
if num_downs > 0 or (self.dcgan_share_conv and fm != 32):
dcgan_kernel=self.dcgan_kernel
convs = nn.ModuleList()
deconvs = nn.ModuleList()
output_dim = block.expansion*planes
for j in range(num_downs):
output_dim = output_dim * 2
# print('output_dim:', output_dim)
if j == num_downs-1:
if self.depth_separate:
cur_conv = nn.Sequential(nn.Conv2d(in_channels=int(output_dim / 2), out_channels=int(output_dim / 2),
kernel_size=dcgan_kernel, stride=2, padding=1, groups=int(output_dim / 2)),
nn.Conv2d(in_channels=int(output_dim / 2), out_channels=output_dim, kernel_size=1, stride=1, padding=0, bias=False))
else:
cur_conv = nn.Sequential(nn.Conv2d(in_channels=int(output_dim/2), out_channels=output_dim,
kernel_size=dcgan_kernel, stride=2, padding=1))
else:
if self.depth_separate:
cur_conv = nn.Sequential(nn.Conv2d(in_channels=int(output_dim / 2), out_channels=int(output_dim / 2),
kernel_size=dcgan_kernel, stride=2, padding=1, groups=int(output_dim / 2)),
nn.BatchNorm2d(int(output_dim / 2)),
nn.ReLU(True),
nn.Conv2d(in_channels=int(output_dim / 2), out_channels=output_dim, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(output_dim),
nn.ReLU(True))
else:
cur_conv = nn.Sequential(nn.Conv2d(in_channels=int(output_dim/2), out_channels=output_dim,
kernel_size=dcgan_kernel, stride=2, padding=1),
nn.BatchNorm2d(output_dim),
conv_activation)
convs.append(cur_conv)
for j in range(num_downs):
output_dim = int(output_dim / 2)
# print('output_dim:',output_dim)
if j == num_downs-1:
is_out = True
else:
is_out = False
if self.depth_separate:
cur_deconv = DepthTransposeCNN(in_dim=output_dim * 2, out_dim=output_dim, kernel_size=self.dcgan_kernel, is_out=is_out)
else:
cur_deconv = TransposeCNN(in_dim=output_dim * 2, out_dim=output_dim, kernel_size=self.dcgan_kernel, is_out=is_out)
deconvs.append(cur_deconv)
else:
convs=None
deconvs=None
for i in range(num_blocks):
stride = strides[i] # 16*16, 16*16 .. 16*32(stride=2) 32*32 .. 32*64(stride=2),64*64 ..
layers.append(block(self.in_planes, planes, stride))
if i == 0:
shortcut = nn.Sequential()
if stride != 1 or self.in_planes != block.expansion * planes:
shortcut = nn.Sequential(
nn.Conv2d(self.in_planes, block.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(block.expansion * planes)
)
shortcuts.append(shortcut)
self.in_planes = planes * block.expansion
return layers, shortcuts, rnn, m_out_linear, rnn_memory_size, convs, deconvs
def weight_init(self, m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
return m
def init_rnn_state(self, x, rnn_memory_size):
# origin_bsz, channel, height, width, = x.size()
# bsz = height * width * origin_bsz
bsz = x.size()[0]
if self.memory_type in ['rnn', 'gru']:
hx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)
return hx
if self.memory_type == 'lstm':
hx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)
cx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)
return (hx, cx)
def m_rnn(self, x, cur_i, rnn_hidden):
input_size_list = []
rnn = self.rnn_list[cur_i]
num_downs = 5 - cur_i
if self.convs_list:
# 5,4,3,的deconv使得dim一致
convs = self.convs_list[cur_i]
for j in range(num_downs):
input_size_list.append(x.size())
# [128, 16, 32, 32]
# [128,32,16,16]
# [128,64, 8, 8]
# [128,128,4,4]
# [128, 256, 2, 2]
# [128, 512, 1, 1]
x = convs[j](x)
bsz, channel, new_height, new_width = x.size()
# print("self.convs_list[cur_i]",self.convs_list[cur_i])
# print('after conv x size',x.size())
x = x.permute([0, 2, 3, 1]).reshape(bsz, int(self.rnn_memory_size_list[cur_i] / self.args.rnn_ratio)) # bsz, new_height * new_width * channel
if self.memory_type in ['rnn', 'gru']:
hx = rnn(x, rnn_hidden)
m_output = hx # bsz, self.rnn_memory_size
rnn_hidden = hx
elif self.memory_type == 'lstm':
hx, cx = rnn(x, rnn_hidden)
m_output = hx # bsz, self.rnn_memory_size
rnn_hidden = (hx, cx)
if self.m_out_list[cur_i] is not None:
m_output = self.m_out_list[cur_i](m_output)
m_output = torch.reshape(m_output, (bsz, new_height, new_height, channel,)).permute((0, 3, 1, 2))
if self.deconvs_list:
deconvs = self.deconvs_list[cur_i]
for j in range(num_downs):
# print('j:%d deconv_in: %s| deconv j:%s' % (j, m_output.size(),deconvs[j]))
m_output = deconvs[j](m_output, output_size=input_size_list[-j - 1])
return m_output, rnn_hidden
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# out size torch.Size([128, 16, 32, 32]) [128,32,16,16]
rnn_hidden = 0 # 0 to error
for i in range(self.num_big_block):
for j in range(self.num_blocks[i]):
layer_i = self.layer_list[i]
# shortcut_i = self.shortcut_list[i]
# print('layer i=0,j=0', layer_i[j])
# print('big_block%d| layer%d| out size%s|' % (i, j, out.size()))
if i == 0 and j == 0:
rnn_hidden = self.init_rnn_state(out, self.rnn_memory_size_list[i])
if self.memory_before:
if j == 0:
m_in = self.shortcut_list[i][j](out)
else:
m_in =out
m_out, rnn_hidden = self.m_rnn(m_in, i, rnn_hidden)
res = m_out
out = layer_i[j](out) # [bsz,dim,h,w]
out += res
out = F.relu(out)
else:
out = layer_i[j](out)
m_out, rnn_hidden = self.m_rnn(out, i, rnn_hidden)
out += m_out
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def LmRnnConsistentSmall56CIFAR10(args):
return LmRnnConsistentSmallCIFAR10(block=BaseBlock, num_blocks=[9, 9, 9], args=args)
def test():
net = ResSmall20()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
# test()
| 44.944313
| 182
| 0.570005
| 5,026
| 37,933
| 4.08655
| 0.050537
| 0.030235
| 0.03228
| 0.017528
| 0.842836
| 0.826136
| 0.806953
| 0.780564
| 0.765373
| 0.747456
| 0
| 0.040524
| 0.313685
| 37,933
| 843
| 183
| 44.997628
| 0.748406
| 0.08718
| 0
| 0.688253
| 0
| 0
| 0.005502
| 0
| 0
| 0
| 0
| 0
| 0.001506
| 1
| 0.067771
| false
| 0.006024
| 0.006024
| 0.018072
| 0.149096
| 0.00753
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f79cd10364a46270c3d2e287240313f54b9a12a9
| 1,253
|
py
|
Python
|
Data Scraping/dist_code_extractor.py
|
jeevannavar/Case-Pendency
|
338c7d8edab6adec97650d8882e18105bcdbd8ba
|
[
"MIT"
] | null | null | null |
Data Scraping/dist_code_extractor.py
|
jeevannavar/Case-Pendency
|
338c7d8edab6adec97650d8882e18105bcdbd8ba
|
[
"MIT"
] | null | null | null |
Data Scraping/dist_code_extractor.py
|
jeevannavar/Case-Pendency
|
338c7d8edab6adec97650d8882e18105bcdbd8ba
|
[
"MIT"
] | null | null | null |
string = '<option value="1" >Malda</option><option value="2" >Hooghly</option><option value="3" >Calcutta</option><option value="4" >Jalpaiguri</option><option value="6" >Coochbehar</option><option value="7" >Paschim Medinpur</option><option value="8" >Birbhum</option><option value="9" >Purba Medinipur</option><option value="10" >Purulia</option><option value="11" >Howrah</option><option value="12" >Murshidabad</option><option value="13" >South Dinajpur</option><option value="14" >North Twenty Four Parganas</option><option value="17" >Darjeeling</option><option value="18" >Purba Bardhaman</option><option value="19" >Bankura</option><option value="20" >South Twenty Four Parganas</option><option value="21" >North Dinajpur</option><option value="22" >Nadia</option><option value="23" >kalimpong</option><option value="24" >Paschim Bardhaman</option><option value="25" >Jhargram</option> </select>'
separated = string.split("</option>")
codes = [each[15:].split('" >') for each in separated[:-1]]
print(codes)
#districts = [each[1] for each in codes]
#codes = [each[0] for each in codes]
codes = [",".join([each[1],each[0]]) for each in codes]
print(len(codes))
print("\n".join(sorted(codes)))
#print("\n".join(districts))
| 96.384615
| 926
| 0.69593
| 175
| 1,253
| 4.982857
| 0.371429
| 0.277523
| 0.409404
| 0.048165
| 0.151376
| 0.123853
| 0
| 0
| 0
| 0
| 0
| 0.037952
| 0.09577
| 1,253
| 12
| 927
| 104.416667
| 0.731686
| 0.080607
| 0
| 0
| 0
| 0.142857
| 0.810105
| 0.45122
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
f79e814b239a077552d880188a2a383279126415
| 3,445
|
py
|
Python
|
tests/test_methods_datetime.py
|
vmstarchenko/sxml
|
3b6fc3a89f404acfe298491555d15df269125e8f
|
[
"MIT"
] | null | null | null |
tests/test_methods_datetime.py
|
vmstarchenko/sxml
|
3b6fc3a89f404acfe298491555d15df269125e8f
|
[
"MIT"
] | null | null | null |
tests/test_methods_datetime.py
|
vmstarchenko/sxml
|
3b6fc3a89f404acfe298491555d15df269125e8f
|
[
"MIT"
] | null | null | null |
import datetime
import textwrap
import sxml
import pytest
from freezegun import freeze_time
UTC = datetime.timezone.utc
UTC4 = datetime.timezone(datetime.timedelta(hours=4))
UTC_4 = datetime.timezone(datetime.timedelta(hours=-4))
def test_strftime():
parse = sxml.HtmlPipeline.from_string(textwrap.dedent(r'''
$chain:
- $apply: datetime.strftime
'''))
assert parse(datetime.datetime(2000, 1, 2, 3, 4, 5)) == '2000-01-02T03:04:05'
parse = sxml.HtmlPipeline.from_string(textwrap.dedent(r'''
$chain:
- $apply: datetime.strftime
format: '%Y/%m/%d'
'''))
assert parse(datetime.datetime(2000, 1, 2, 3, 4, 5)) == '2000/01/02'
def test_fromtimestamp():
parse = sxml.HtmlPipeline.from_string(textwrap.dedent(r'''
$chain:
- $apply: datetime.fromtimestamp
'''))
assert parse(946771445) == datetime.datetime(2000, 1, 2, 3, 4, 5)
parse = sxml.HtmlPipeline.from_string(textwrap.dedent(r'''
$chain:
- $apply: datetime.fromtimestamp
'''))
assert parse('946771445.000') == datetime.datetime(2000, 1, 2, 3, 4, 5)
@freeze_time('2000-01-02T03:04:05+0000')
def test_parse():
parse = sxml.HtmlPipeline.from_string(textwrap.dedent(r'''
$chain:
- $apply: datetime.parse
'''))
assert parse('2000-01-02T03:04:05+0000') == datetime.datetime(2000, 1, 2, 3, 4, 5, tzinfo=UTC)
parse = sxml.HtmlPipeline.from_string(textwrap.dedent(r'''
$chain:
- $apply: datetime.parse
'''))
assert parse('Now') == datetime.datetime(2000, 1, 2, 3, 4, 5, tzinfo=UTC)
parse = sxml.HtmlPipeline.from_string(textwrap.dedent(r'''
$chain:
- $apply: datetime.parse
timezone: '+0400'
'''))
assert parse('Now GMT') == datetime.datetime(2000, 1, 2, 3, 4, 5, tzinfo=UTC)
@freeze_time(datetime.datetime(2000, 1, 2, 3, 4, 5, tzinfo=UTC_4))
def test_parse_other_timezone():
parse = sxml.HtmlPipeline.from_string(textwrap.dedent(r'''
$chain:
- $apply: datetime.parse
'''))
assert parse('2000-01-02T03:04:05+0000') == datetime.datetime(2000, 1, 2, 3, 4, 5, tzinfo=UTC)
parse = sxml.HtmlPipeline.from_string(textwrap.dedent(r'''
$chain:
- $apply: datetime.parse
'''))
assert parse('Now') == datetime.datetime(2000, 1, 2, 7, 4, 5, tzinfo=UTC)
parse = sxml.HtmlPipeline.from_string(textwrap.dedent(r'''
$chain:
- $apply: datetime.parse
timezone: '+0800'
'''))
assert parse('Now') == datetime.datetime(2000, 1, 2, 7, 4, 5, tzinfo=UTC)
@freeze_time(datetime.datetime(2000, 1, 2, 3, 4, 5, tzinfo=UTC))
def test_parse_other_base():
parse = sxml.HtmlPipeline.from_string(textwrap.dedent(r'''
$chain:
- $apply: datetime.parse
now: '2001-02-03T04:05:06'
'''))
assert parse('Now') == datetime.datetime(2001, 2, 3, 4, 5, 6, tzinfo=UTC)
parse = sxml.HtmlPipeline([{
'$apply': 'datetime.parse',
'now': datetime.datetime(2001, 2, 3, 4, 5, 6)
}])
assert parse('Now') == datetime.datetime(2001, 2, 3, 4, 5, 6, tzinfo=UTC)
parse = sxml.HtmlPipeline.from_string(textwrap.dedent(r'''
$chain:
- $apply: datetime.parse
now: !Opt now_option
'''))
now = datetime.datetime(2003, 2, 3, 4, 5, 6, tzinfo=UTC)
assert parse('Now', options={'now_option': now}) == now
| 32.196262
| 98
| 0.606676
| 446
| 3,445
| 4.621076
| 0.134529
| 0.124212
| 0.020378
| 0.027171
| 0.829694
| 0.825328
| 0.778263
| 0.77147
| 0.74721
| 0.74721
| 0
| 0.100149
| 0.223222
| 3,445
| 106
| 99
| 32.5
| 0.67003
| 0
| 0
| 0.62069
| 0
| 0
| 0.298694
| 0.039768
| 0
| 0
| 0
| 0
| 0.149425
| 1
| 0.057471
| false
| 0
| 0.057471
| 0
| 0.114943
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e3980c0877ddcb73e92a28834bad84aed6d18bc3
| 21
|
py
|
Python
|
tools/__init__.py
|
GreenBlitz/Rapid-React-Vision
|
a30793e176ef6f2bdc73f12535645ef2ec57126a
|
[
"Apache-2.0"
] | 6
|
2019-12-17T03:16:38.000Z
|
2020-07-10T10:45:24.000Z
|
tools/__init__.py
|
GreenBlitz/Rapid-React-Vision
|
a30793e176ef6f2bdc73f12535645ef2ec57126a
|
[
"Apache-2.0"
] | 5
|
2021-03-19T01:10:11.000Z
|
2022-02-10T13:37:29.000Z
|
sys_app/models/__init__.py
|
sf0402/horse-admin
|
dd3f5c2d317763a1daeef40ce7833371e6ed5ce0
|
[
"MIT"
] | 1
|
2020-11-10T07:54:52.000Z
|
2020-11-10T07:54:52.000Z
|
from .system import *
| 21
| 21
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e3c31ca98eab08a674c1b9243ff41d3a6608b725
| 43
|
py
|
Python
|
Tests/Runnable2/r_classmodname_t.py
|
jwilk/Pyrex
|
83dfbae1261788933472e3f9c501ad74c61a37c5
|
[
"Apache-2.0"
] | 5
|
2019-05-26T20:48:36.000Z
|
2021-07-09T01:38:38.000Z
|
Tests/Runnable2/r_classmodname_t.py
|
jwilk/Pyrex
|
83dfbae1261788933472e3f9c501ad74c61a37c5
|
[
"Apache-2.0"
] | null | null | null |
Tests/Runnable2/r_classmodname_t.py
|
jwilk/Pyrex
|
83dfbae1261788933472e3f9c501ad74c61a37c5
|
[
"Apache-2.0"
] | 1
|
2022-02-10T07:14:58.000Z
|
2022-02-10T07:14:58.000Z
|
from r_classmodname import Spam
print Spam
| 14.333333
| 31
| 0.860465
| 7
| 43
| 5.142857
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 43
| 2
| 32
| 21.5
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
e3d3f61891e930427ed79848960dfa5a1cfa408c
| 112
|
py
|
Python
|
normflowpy/flows/helpers.py
|
haihabi/NormFlowPy
|
a15ea6a704254a925f25dc94b22459ca2e0beaf5
|
[
"MIT"
] | null | null | null |
normflowpy/flows/helpers.py
|
haihabi/NormFlowPy
|
a15ea6a704254a925f25dc94b22459ca2e0beaf5
|
[
"MIT"
] | null | null | null |
normflowpy/flows/helpers.py
|
haihabi/NormFlowPy
|
a15ea6a704254a925f25dc94b22459ca2e0beaf5
|
[
"MIT"
] | null | null | null |
import torch
def safe_log(x: torch.Tensor, eps=1e-22) -> torch.Tensor:
return torch.log(x.clamp(min=eps))
| 18.666667
| 57
| 0.696429
| 20
| 112
| 3.85
| 0.65
| 0.103896
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.142857
| 112
| 5
| 58
| 22.4
| 0.770833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
5403922e603ba4720ff05ba55d5ba44f1b460afa
| 42
|
py
|
Python
|
topgun/models/__init__.py
|
muntumdwara/TopGun
|
fc253faa8ac0a7c9b7d000c2ea018bba9c584d27
|
[
"MIT"
] | null | null | null |
topgun/models/__init__.py
|
muntumdwara/TopGun
|
fc253faa8ac0a7c9b7d000c2ea018bba9c584d27
|
[
"MIT"
] | null | null | null |
topgun/models/__init__.py
|
muntumdwara/TopGun
|
fc253faa8ac0a7c9b7d000c2ea018bba9c584d27
|
[
"MIT"
] | null | null | null |
from .ddm import dividend_discount_models
| 21
| 41
| 0.880952
| 6
| 42
| 5.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 2
| 41
| 21
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5403f41fc2f077dfdf1769dbfd8692016c996232
| 446
|
py
|
Python
|
bunny/api/models/__init__.py
|
senpai-development/SenpaiSlasher
|
89842e584b4cd60731ce9c43315c08b02a8dc8e3
|
[
"MIT"
] | null | null | null |
bunny/api/models/__init__.py
|
senpai-development/SenpaiSlasher
|
89842e584b4cd60731ce9c43315c08b02a8dc8e3
|
[
"MIT"
] | null | null | null |
bunny/api/models/__init__.py
|
senpai-development/SenpaiSlasher
|
89842e584b4cd60731ce9c43315c08b02a8dc8e3
|
[
"MIT"
] | 1
|
2021-10-31T02:40:03.000Z
|
2021-10-31T02:40:03.000Z
|
from .channel import * # noqa: F401 F403
from .guild import * # noqa: F401 F403
from .intents import * # noqa: F401 F403
from .member import * # noqa: F401 F403
from .message import * # noqa: F401 F403
from .misc import * # noqa: F401 F403
from .presence import * # noqa: F401 F403
from .role import * # noqa: F401 F403
from .team import * # noqa: F401 F403
from .user import * # noqa: F401 F403
from .voice import * # noqa: F401 F403
| 37.166667
| 42
| 0.679372
| 66
| 446
| 4.590909
| 0.242424
| 0.363036
| 0.508251
| 0.653465
| 0.726073
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190202
| 0.221973
| 446
| 11
| 43
| 40.545455
| 0.682997
| 0.392377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5410396c26149a6538e9eb2cf4e15c4e5c951483
| 2,988
|
py
|
Python
|
python_bindings/correctness/tuple_select.py
|
rafaelravedutti/Halide
|
9417210e7eca49b224028834dbed09cb2d62b1cd
|
[
"Apache-2.0"
] | 107
|
2018-08-16T05:32:52.000Z
|
2022-02-11T19:44:25.000Z
|
python_bindings/correctness/tuple_select.py
|
rafaelravedutti/Halide
|
9417210e7eca49b224028834dbed09cb2d62b1cd
|
[
"Apache-2.0"
] | 79
|
2019-02-22T03:27:45.000Z
|
2022-02-24T23:03:28.000Z
|
python_bindings/correctness/tuple_select.py
|
rafaelravedutti/Halide
|
9417210e7eca49b224028834dbed09cb2d62b1cd
|
[
"Apache-2.0"
] | 16
|
2018-08-21T09:45:13.000Z
|
2021-12-11T03:32:15.000Z
|
import halide as hl
import numpy as np
def test_tuple_select():
x = hl.Var('x')
y = hl.Var('y')
# ternary tuple_select with Expr condition
f = hl.Func('f')
f[x, y] = hl.tuple_select(x + y < 30, (x, y), (x-1, y-2))
a, b = f.realize(200, 200)
for xx in range(a.height()):
for yy in range(a.width()):
correct_a = xx if xx + yy < 30 else xx-1
correct_b = yy if xx + yy < 30 else yy-2
assert a[xx, yy] == correct_a
assert b[xx, yy] == correct_b
# ternary tuple_select with Tuple condition
f = hl.Func('f')
f[x, y] = hl.tuple_select((x < 30, y < 30), (x, y), (x-1, y-2))
a, b = f.realize(200, 200)
for xx in range(a.height()):
for yy in range(a.width()):
correct_a = xx if xx < 30 else xx-1
correct_b = yy if yy < 30 else yy-2
assert a[xx, yy] == correct_a
assert b[xx, yy] == correct_b
# multiway tuple_select with Expr condition
f = hl.Func('f')
f[x, y] = hl.tuple_select(x + y < 30, (x, y),
x + y < 100, (x-1, y-2),
(x-100, y-200))
a, b = f.realize(200, 200)
for xx in range(a.height()):
for yy in range(a.width()):
correct_a = xx if xx + yy < 30 else xx-1 if xx + yy < 100 else xx - 100
correct_b = yy if xx + yy < 30 else yy-2 if xx + yy < 100 else yy - 200
assert a[xx, yy] == correct_a
assert b[xx, yy] == correct_b
# multiway tuple_select with Tuple condition
f = hl.Func('f')
f[x, y] = hl.tuple_select((x < 30, y < 30), (x, y),
(x < 100, y < 100), (x-1, y-2),
(x-100, y-200))
a, b = f.realize(200, 200)
for xx in range(a.height()):
for yy in range(a.width()):
correct_a = xx if xx < 30 else xx-1 if xx < 100 else xx - 100
correct_b = yy if yy < 30 else yy-2 if yy < 100 else yy - 200
assert a[xx, yy] == correct_a
assert b[xx, yy] == correct_b
# Failure case: mixing Expr and Tuple in multiway
try:
f = hl.Func('f')
f[x, y] = hl.tuple_select((x < 30, y < 30), (x, y),
x + y < 100, (x-1, y-2),
(x-100, y-200))
except RuntimeError as e:
assert 'tuple_select() may not mix Expr and Tuple for the condition elements.' in str(e)
else:
assert False, 'Did not see expected exception!'
# Failure case: Tuples of mixed sizes
try:
f = hl.Func('f')
f[x, y] = hl.tuple_select((x < 30, y < 30), (x, y, 0), (1, 2, 3, 4))
except RuntimeError as e:
assert 'tuple_select() requires all Tuples to have identical sizes' in str(e)
else:
assert False, 'Did not see expected exception!'
if __name__ == "__main__":
test_tuple_select()
| 35.571429
| 96
| 0.493976
| 482
| 2,988
| 2.979253
| 0.149378
| 0.023677
| 0.044568
| 0.033426
| 0.819638
| 0.816852
| 0.808496
| 0.755571
| 0.738858
| 0.714485
| 0
| 0.073028
| 0.372155
| 2,988
| 83
| 97
| 36
| 0.692431
| 0.084003
| 0
| 0.650794
| 0
| 0
| 0.075092
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 1
| 0.015873
| false
| 0
| 0.031746
| 0
| 0.047619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
581ec34406a8d3ce10fc41df03681e89b51b67ea
| 34
|
py
|
Python
|
Programmers/src/12925/solution.py
|
lstar2397/algorithms
|
686ea882079e26111f86b5bd5a7ab1b14ccf0fa2
|
[
"MIT"
] | null | null | null |
Programmers/src/12925/solution.py
|
lstar2397/algorithms
|
686ea882079e26111f86b5bd5a7ab1b14ccf0fa2
|
[
"MIT"
] | null | null | null |
Programmers/src/12925/solution.py
|
lstar2397/algorithms
|
686ea882079e26111f86b5bd5a7ab1b14ccf0fa2
|
[
"MIT"
] | null | null | null |
def solution(s):
return int(s)
| 17
| 17
| 0.647059
| 6
| 34
| 3.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 34
| 2
| 17
| 17
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
5861b0edf72a3563c5f7e40f8c6bf4f94a4c77d8
| 1,770
|
py
|
Python
|
reversi/strategies/coordinator/__init__.py
|
y-tetsu/othello
|
73eabfe22d6b44bbfa0b436e6287e3e7356620f4
|
[
"MIT"
] | 10
|
2020-07-24T22:04:51.000Z
|
2022-03-25T06:09:48.000Z
|
reversi/strategies/coordinator/__init__.py
|
y-tetsu/othello
|
73eabfe22d6b44bbfa0b436e6287e3e7356620f4
|
[
"MIT"
] | 12
|
2021-04-30T09:53:18.000Z
|
2022-02-25T04:16:02.000Z
|
reversi/strategies/coordinator/__init__.py
|
y-tetsu/othello
|
73eabfe22d6b44bbfa0b436e6287e3e7356620f4
|
[
"MIT"
] | 1
|
2021-11-25T13:12:32.000Z
|
2021-11-25T13:12:32.000Z
|
from ...strategies.coordinator.scorer import TableScorer, PossibilityScorer, OpeningScorer, WinLoseScorer, NumberScorer, EdgeScorer, CornerScorer, BlankScorer, EdgeCornerScorer # noqa: E501
from ...strategies.coordinator.selector import Selector, Selector_W
from ...strategies.coordinator.orderer import Orderer, Orderer_B, Orderer_C, Orderer_P, Orderer_BC, Orderer_CB, Orderer_PCB
from ...strategies.coordinator.evaluator import Evaluator, Evaluator_T, Evaluator_P, Evaluator_O, Evaluator_W, Evaluator_N, Evaluator_N_Fast, Evaluator_E, Evaluator_C, Evaluator_B, Evaluator_Ec, Evaluator_TP, Evaluator_TPO, Evaluator_NW, Evaluator_PW, Evaluator_TPW, Evaluator_TPW_Fast, Evaluator_TPOW, Evaluator_TPWE, Evaluator_TPWE_Fast, Evaluator_TPWEC, Evaluator_PWE, Evaluator_BW, Evaluator_EcW, Evaluator_BWEc, Evaluator_PBWEc, Evaluator_TPWEB # noqa: E501
__all__ = [
'TableScorer',
'PossibilityScorer',
'OpeningScorer',
'WinLoseScorer',
'NumberScorer',
'EdgeScorer',
'CornerScorer',
'BlankScorer',
'EdgeCornerScorer',
'Selector',
'Selector_W',
'Orderer',
'Orderer_B',
'Orderer_C',
'Orderer_P',
'Orderer_BC',
'Orderer_CB',
'Orderer_PCB',
'Evaluator',
'Evaluator_T',
'Evaluator_P',
'Evaluator_O',
'Evaluator_W',
'Evaluator_N',
'Evaluator_N_Fast',
'Evaluator_E',
'Evaluator_C',
'Evaluator_B',
'Evaluator_Ec',
'Evaluator_TP',
'Evaluator_TPO',
'Evaluator_NW',
'Evaluator_PW',
'Evaluator_TPW',
'Evaluator_TPW_Fast',
'Evaluator_TPOW',
'Evaluator_TPWE',
'Evaluator_TPWE_Fast',
'Evaluator_TPWEC',
'Evaluator_PWE',
'Evaluator_BW',
'Evaluator_EcW',
'Evaluator_BWEc',
'Evaluator_PBWEc',
'Evaluator_TPWEB',
]
| 32.777778
| 463
| 0.718079
| 187
| 1,770
| 6.390374
| 0.251337
| 0.065272
| 0.083682
| 0.090377
| 0.826778
| 0.826778
| 0.826778
| 0.826778
| 0.826778
| 0.63431
| 0
| 0.004071
| 0.167232
| 1,770
| 53
| 464
| 33.396226
| 0.806649
| 0.011864
| 0
| 0
| 0
| 0
| 0.313288
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.078431
| 0
| 0.078431
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
58836653c820d017fba63997dc1db865808e4a91
| 39
|
py
|
Python
|
djangolive/utils/__init__.py
|
Tomvictor/djangolive
|
fbb482395e4b9be5b947480047868870bb77f344
|
[
"MIT"
] | 1
|
2021-04-06T17:53:06.000Z
|
2021-04-06T17:53:06.000Z
|
djangolive/utils/__init__.py
|
Tomvictor/djangolive
|
fbb482395e4b9be5b947480047868870bb77f344
|
[
"MIT"
] | 6
|
2021-04-16T16:06:55.000Z
|
2021-04-24T07:13:07.000Z
|
djangolive/utils/__init__.py
|
Tomvictor/djangolive
|
fbb482395e4b9be5b947480047868870bb77f344
|
[
"MIT"
] | null | null | null |
from ._archive import ZipResponseMixin
| 19.5
| 38
| 0.871795
| 4
| 39
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
588b00f0c8a740075b5bd925c05175c12eb1037b
| 180
|
py
|
Python
|
student_mgmt/__init__.py
|
MustafaRaad/Student_management__Certificate
|
a9e8ca6cbf2f2ab39d6a44b2bd56baf8a8042505
|
[
"MIT"
] | null | null | null |
student_mgmt/__init__.py
|
MustafaRaad/Student_management__Certificate
|
a9e8ca6cbf2f2ab39d6a44b2bd56baf8a8042505
|
[
"MIT"
] | null | null | null |
student_mgmt/__init__.py
|
MustafaRaad/Student_management__Certificate
|
a9e8ca6cbf2f2ab39d6a44b2bd56baf8a8042505
|
[
"MIT"
] | null | null | null |
"""
##########################################
## Developed By:Mustafa Raad Mutashar ##
## mustafa.raad.7@gmail.com 2020 ##
##########################################
"""
| 25.714286
| 42
| 0.3
| 11
| 180
| 4.909091
| 0.818182
| 0.407407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031847
| 0.127778
| 180
| 6
| 43
| 30
| 0.312102
| 0.95
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5436a01008f2e4eb7b96ef96a5fd72bd2eeedae8
| 41
|
py
|
Python
|
libpyka/db/__init__.py
|
karakawa88/libpyka
|
3cd29b68f26ca6bce6239545142b7cc410d97bd1
|
[
"MIT"
] | null | null | null |
libpyka/db/__init__.py
|
karakawa88/libpyka
|
3cd29b68f26ca6bce6239545142b7cc410d97bd1
|
[
"MIT"
] | null | null | null |
libpyka/db/__init__.py
|
karakawa88/libpyka
|
3cd29b68f26ca6bce6239545142b7cc410d97bd1
|
[
"MIT"
] | null | null | null |
from .SQLException import SQLException
| 10.25
| 38
| 0.829268
| 4
| 41
| 8.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 41
| 3
| 39
| 13.666667
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5499ebec09d878806d2d332e24b49dea33e29dbb
| 166
|
py
|
Python
|
chapter 1/c1q28.py
|
jonathanmonreal/nltk-examples
|
95fb0b28c9ba433bac20990715496edc469293b4
|
[
"Apache-2.0"
] | 2
|
2015-08-06T18:58:44.000Z
|
2018-05-11T13:00:28.000Z
|
chapter 1/c1q28.py
|
jonathanmonreal/nltk-examples
|
95fb0b28c9ba433bac20990715496edc469293b4
|
[
"Apache-2.0"
] | null | null | null |
chapter 1/c1q28.py
|
jonathanmonreal/nltk-examples
|
95fb0b28c9ba433bac20990715496edc469293b4
|
[
"Apache-2.0"
] | null | null | null |
# Jonathan Monreal
from __future__ import division
import nltk
def percentage(word, text):
return 100 * ([word.lower() for word in text].count(word)/len(text))
| 20.75
| 72
| 0.728916
| 24
| 166
| 4.875
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021429
| 0.156627
| 166
| 7
| 73
| 23.714286
| 0.814286
| 0.096386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
54bee0d7627971277e34b7a4ffd4c77ded02b310
| 94
|
py
|
Python
|
HedyNet/promotions/views.py
|
akjohnson/HedyNet
|
77771605fa8987435bd74ce8ec2a33008e3f8fd1
|
[
"Apache-2.0"
] | null | null | null |
HedyNet/promotions/views.py
|
akjohnson/HedyNet
|
77771605fa8987435bd74ce8ec2a33008e3f8fd1
|
[
"Apache-2.0"
] | null | null | null |
HedyNet/promotions/views.py
|
akjohnson/HedyNet
|
77771605fa8987435bd74ce8ec2a33008e3f8fd1
|
[
"Apache-2.0"
] | null | null | null |
from mailchimp2 import SubscribeFormView
class GeekGirlCon(SubscribeFormView):
pass
| 15.666667
| 40
| 0.787234
| 8
| 94
| 9.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012987
| 0.180851
| 94
| 5
| 41
| 18.8
| 0.948052
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
54c0a541f65af9a0faf84e33006251254543388e
| 39
|
py
|
Python
|
test_suite/suite/test08/other_mod.py
|
joncatanio/cannoli
|
410f6bea362bf9e33eecc0e01fb080dadd14ef23
|
[
"MIT"
] | 755
|
2017-12-09T05:34:43.000Z
|
2022-03-26T09:15:56.000Z
|
test_suite/suite/test08/other_mod.py
|
joncatanio/cannoli
|
410f6bea362bf9e33eecc0e01fb080dadd14ef23
|
[
"MIT"
] | 8
|
2017-12-12T01:03:18.000Z
|
2020-06-29T01:41:03.000Z
|
test_suite/suite/test08/other_mod.py
|
joncatanio/cannoli
|
410f6bea362bf9e33eecc0e01fb080dadd14ef23
|
[
"MIT"
] | 23
|
2018-05-17T17:48:23.000Z
|
2022-03-26T09:15:57.000Z
|
def func():
print("other_mod call")
| 13
| 26
| 0.641026
| 6
| 39
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179487
| 39
| 2
| 27
| 19.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.358974
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
b738df8fb2406cc2c09ee8ef32cc652e1423b01a
| 8,987
|
py
|
Python
|
adventofcode/day11.py
|
jcfvalente/adventofcode2020
|
ec0deede4661dd80945d96cb72b034579b9ac62e
|
[
"MIT"
] | null | null | null |
adventofcode/day11.py
|
jcfvalente/adventofcode2020
|
ec0deede4661dd80945d96cb72b034579b9ac62e
|
[
"MIT"
] | null | null | null |
adventofcode/day11.py
|
jcfvalente/adventofcode2020
|
ec0deede4661dd80945d96cb72b034579b9ac62e
|
[
"MIT"
] | null | null | null |
from adventofcode.inputs import reader
import copy
def solve_part_one(seats_map: list) -> int:
seats_stable = False
aux_seats_map = []
seats_map = [list(row) for row in seats_map]
while not seats_stable: # Keeping doing this while the seats change
internal_stable = True
if aux_seats_map:
seats_map = copy.deepcopy(aux_seats_map)
else:
aux_seats_map = copy.deepcopy(seats_map)
for row in range(0, len(seats_map)): # each row
for seat_position in range(0, len(seats_map[row])): # each set
if seats_map[row][seat_position] == ".":
continue
occupied = 0
# North
if row - 1 >= 0:
# N
if seats_map[row - 1][seat_position] == "#":
occupied += 1
# NE
if seat_position - 1 >= 0:
if seats_map[row - 1][seat_position - 1] == "#":
occupied += 1
# NW
if seat_position + 1 < len(seats_map[row]):
if seats_map[row - 1][seat_position + 1] == "#":
occupied += 1
# South
if row + 1 < len(seats_map):
# S
if seats_map[row + 1][seat_position] == "#":
occupied += 1
# SE
if seat_position - 1 >= 0:
if seats_map[row + 1][seat_position - 1] == "#":
occupied += 1
# NW
if seat_position + 1 < len(seats_map[row]):
if seats_map[row + 1][seat_position + 1] == "#":
occupied += 1
# East
if seat_position - 1 >= 0:
if seats_map[row][seat_position - 1] == "#":
occupied += 1
# West
if seat_position + 1 < len(seats_map[row]):
if seats_map[row][seat_position + 1] == "#":
occupied += 1
# Update setting status
if seats_map[row][seat_position] == "L" and occupied == 0:
aux_seats_map[row][seat_position] = "#"
internal_stable = False
continue
if seats_map[row][seat_position] == "#" and occupied >= 4:
aux_seats_map[row][seat_position] = "L"
internal_stable = False
continue
if internal_stable:
# If we get here it means no seats have changed, it's stable
seats_map = aux_seats_map
seats_stable = True
return count_occupied_seats(seats_map)
def solve_part_two(seats_map: list) -> int:
seats_stable = False
aux_seats_map = []
seats_map = [list(row) for row in seats_map]
while not seats_stable: # Keeping doing this while the seats change
internal_stable = True
if aux_seats_map:
seats_map = copy.deepcopy(aux_seats_map)
else:
aux_seats_map = copy.deepcopy(seats_map)
for row in range(0, len(seats_map)):
for seat_position in range(0, len(seats_map[row])):
if seats_map[row][seat_position] == ".":
continue
occupied = 0
# North
if row - 1 >= 0:
# N
first_try = row - 1
while first_try >= 0:
if seats_map[first_try][seat_position] != ".":
if seats_map[first_try][seat_position] == "#":
occupied += 1
break
else:
break
first_try -= 1
# NE
if seat_position - 1 >= 0:
up_try = row - 1
left_try = seat_position - 1
while up_try >= 0 and left_try >= 0:
if seats_map[up_try][left_try] != ".":
if seats_map[up_try][left_try] == "#":
occupied += 1
break
else:
break
up_try -= 1
left_try -= 1
# NW
if seat_position + 1 < len(seats_map[row]):
up_try = row - 1
right_try = seat_position + 1
while up_try >= 0 and right_try < len(seats_map[row]):
if seats_map[up_try][right_try] != ".":
if seats_map[up_try][right_try] == "#":
occupied += 1
break
else:
break
up_try -= 1
right_try += 1
# South
if row + 1 < len(seats_map):
# S
first_try = row + 1
while first_try < len(seats_map):
if seats_map[first_try][seat_position] != ".":
if seats_map[first_try][seat_position] == "#":
occupied += 1
break
else:
break
first_try += 1
# SE
if seat_position - 1 >= 0:
down_try = row + 1
left_try = seat_position - 1
while down_try < len(seats_map) and left_try >= 0:
if seats_map[down_try][left_try] != ".":
if seats_map[down_try][left_try] == "#":
occupied += 1
break
else:
break
down_try += 1
left_try -= 1
# SW
if seat_position + 1 < len(seats_map[row]):
down_try = row + 1
right_try = seat_position + 1
while down_try < len(seats_map) and right_try < len(seats_map[row]):
if seats_map[down_try][right_try] != ".":
if seats_map[down_try][right_try] == "#":
occupied += 1
break
else:
break
down_try += 1
right_try += 1
# East
left_first_try = seat_position - 1
while left_first_try >= 0:
if seats_map[row][left_first_try] == "#":
occupied += 1
break
elif seats_map[row][left_first_try] == "L":
break
left_first_try -= 1
# West
right_first_try = seat_position + 1
while right_first_try < len(seats_map[row]):
if seats_map[row][right_first_try] == "#":
occupied += 1
break
elif seats_map[row][right_first_try] == "L":
break
right_first_try += 1
# Update setting status
if seats_map[row][seat_position] == "L" and occupied == 0:
aux_seats_map[row][seat_position] = "#"
internal_stable = False
continue
if seats_map[row][seat_position] == "#" and occupied >= 5:
aux_seats_map[row][seat_position] = "L"
internal_stable = False
continue
if internal_stable:
# If we get here it means no seats have changed, it's stable
seats_map = aux_seats_map
seats_stable = True
# Count the seats
return count_occupied_seats(seats_map)
def count_occupied_seats(puzzle: list) -> int:
occupied_seats = 0
for row in puzzle:
for seat in row:
if seat == "#":
occupied_seats += 1
return occupied_seats
if __name__ == '__main__':
puzzle_input = reader.read_file('adventofcode/inputs/day11.txt')
solve_part_one(puzzle_input)
solve_part_two(puzzle_input)
| 40.85
| 92
| 0.401914
| 866
| 8,987
| 3.891455
| 0.099307
| 0.178042
| 0.104451
| 0.061721
| 0.880415
| 0.866172
| 0.833828
| 0.744214
| 0.717804
| 0.573591
| 0
| 0.021488
| 0.518415
| 8,987
| 219
| 93
| 41.03653
| 0.757163
| 0.039502
| 0
| 0.697143
| 0
| 0
| 0.00837
| 0.003371
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017143
| false
| 0
| 0.011429
| 0
| 0.045714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b741acce954698c96769fd78fe934510ae2f593a
| 9,552
|
py
|
Python
|
tests/test_attention.py
|
ZeroDesigner/alphafold2
|
25255ab69314480316e5dc978e2cac1d2c8aa9d1
|
[
"MIT"
] | 1
|
2022-01-21T04:58:18.000Z
|
2022-01-21T04:58:18.000Z
|
tests/test_attention.py
|
ZeroDesigner/alphafold2
|
25255ab69314480316e5dc978e2cac1d2c8aa9d1
|
[
"MIT"
] | null | null | null |
tests/test_attention.py
|
ZeroDesigner/alphafold2
|
25255ab69314480316e5dc978e2cac1d2c8aa9d1
|
[
"MIT"
] | null | null | null |
import torch
from alphafold2_pytorch.alphafold2 import Alphafold2
from alphafold2_pytorch.utils import *
def test_main():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32
)
seq = torch.randint(0, 21, (2, 128))
msa = torch.randint(0, 21, (2, 5, 64))
mask = torch.ones_like(seq).bool()
msa_mask = torch.ones_like(msa).bool()
distogram = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert True
def test_no_msa():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32
)
seq = torch.randint(0, 21, (2, 128))
mask = torch.ones_like(seq).bool()
distogram = model(
seq,
mask = mask
)
assert True
def test_anglegrams():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
predict_angles = True
)
seq = torch.randint(0, 21, (2, 128))
msa = torch.randint(0, 21, (2, 5, 64))
mask = torch.ones_like(seq).bool()
msa_mask = torch.ones_like(msa).bool()
distogram, theta, phi, omega = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert True
def test_msa_tie_row_attn():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
msa_tie_row_attn = True
)
seq = torch.randint(0, 21, (2, 128))
msa = torch.randint(0, 21, (2, 5, 64))
mask = torch.ones_like(seq).bool()
msa_mask = torch.ones_like(msa).bool()
distogram = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert True
def test_templates():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
attn_types = ('full', 'intra_attn', 'seq_only')
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 32))
msa_mask = torch.ones_like(msa).bool()
templates_seq = torch.randint(0, 21, (2, 2, 16))
templates_coors = torch.randn(2, 2, 16, 3)
templates_mask = torch.ones_like(templates_seq).bool()
distogram = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
templates_seq = templates_seq,
templates_coors = templates_coors,
templates_mask = templates_mask
)
def test_embeddings():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
embedds = torch.randn(2, 1, 16, 1280)
# without mask
distogram = model(
seq,
mask = mask,
embedds = embedds,
msa_mask = None
)
# with mask
embedds_mask = torch.ones_like(embedds[..., -1]).bool()
distogram = model(
seq,
mask = mask,
embedds = embedds,
msa_mask = embedds_mask
)
assert True
def test_coords_se3():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
num_backbone_atoms = 3,
structure_module_dim = 1,
structure_module_depth = 1,
structure_module_heads = 1,
structure_module_dim_head = 1,
structure_module_knn = 2
)
seq = torch.randint(0, 21, (2, 8))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert coords.shape == (2, 8 * 14, 3), 'must output coordinates'
def test_coords_se3_with_global_nodes():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
num_backbone_atoms = 3,
structure_module_dim = 1,
structure_module_depth = 1,
structure_module_heads = 1,
structure_module_dim_head = 1,
structure_module_knn = 2,
structure_num_global_nodes = 2
)
seq = torch.randint(0, 21, (2, 8))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert coords.shape == (2, 8 * 14, 3), 'must output coordinates'
def test_edges_to_equivariant_network():
model = Alphafold2(
dim = 32,
depth = 1,
heads = 2,
dim_head = 32,
use_se3_transformer = False,
predict_coords = True,
predict_angles = True,
num_backbone_atoms = 3
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 32))
msa_mask = torch.ones_like(msa).bool()
coords, confidences = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
return_confidence = True
)
assert True, 'should run without errors'
def test_real_value_distance_with_coords():
model = Alphafold2(
dim = 32,
depth = 1,
heads = 2,
dim_head = 16,
predict_coords = True,
predict_real_value_distances = True,
num_backbone_atoms = 3,
structure_module_dim = 1,
structure_module_depth = 1,
structure_module_heads = 1,
structure_module_dim_head = 1,
structure_module_knn = 2
)
seq = torch.randint(0, 21, (2, 8))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
assert coords.shape == (2, 8 * 14, 3), 'must output coordinates'
def test_coords_se3_backwards():
model = Alphafold2(
dim = 256,
depth = 2,
heads = 2,
dim_head = 32,
predict_coords = True,
num_backbone_atoms = 3,
structure_module_dim = 1,
structure_module_depth = 1,
structure_module_heads = 1,
structure_module_dim_head = 1,
structure_module_knn = 1
)
seq = torch.randint(0, 21, (2, 8))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 16))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
coords.sum().backward()
assert True, 'must be able to go backwards through MDS and center distogram'
def test_coords_En():
model = Alphafold2(
dim = 256,
depth = 2,
heads = 2,
dim_head = 32,
use_se3_transformer = False,
predict_coords = True,
num_backbone_atoms = 3
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 32))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
# get masks : cloud is all points in prot. chain is all for which we have labels
cloud_mask = scn_cloud_mask(seq, boolean = True)
flat_cloud_mask = rearrange(cloud_mask, 'b l c -> b (l c)')
chain_mask = (mask.unsqueeze(-1) * cloud_mask)
flat_chain_mask = rearrange(chain_mask, 'b l c -> b (l c)')
# put in sidechainnet format
wrapper = torch.zeros(*cloud_mask.shape, 3).to(coords.device).type(coords.type())
wrapper[cloud_mask] = coords[flat_cloud_mask]
assert wrapper[chain_mask].shape == coords[flat_chain_mask].shape, 'must output coordinates'
def test_coords_En_backwards():
model = Alphafold2(
dim = 256,
depth = 2,
heads = 2,
dim_head = 32,
use_se3_transformer = False,
predict_coords = True,
num_backbone_atoms = 3
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 32))
msa_mask = torch.ones_like(msa).bool()
coords = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
coords.sum().backward()
assert True, 'must be able to go backwards through MDS and center distogram'
def test_confidence_En():
model = Alphafold2(
dim = 256,
depth = 1,
heads = 2,
dim_head = 32,
use_se3_transformer = False,
predict_coords = True,
num_backbone_atoms = 3
)
seq = torch.randint(0, 21, (2, 16))
mask = torch.ones_like(seq).bool()
msa = torch.randint(0, 21, (2, 5, 32))
msa_mask = torch.ones_like(msa).bool()
coords, confidences = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask,
return_confidence = True
)
assert coords.shape[:-1] == confidences.shape[:-1]
def test_reversible():
model = Alphafold2(
dim = 32,
depth = 2,
heads = 2,
dim_head = 32,
reversible = True
)
seq = torch.randint(0, 21, (2, 128))
msa = torch.randint(0, 21, (2, 5, 64))
mask = torch.ones_like(seq).bool()
msa_mask = torch.ones_like(msa).bool()
distogram = model(
seq,
msa,
mask = mask,
msa_mask = msa_mask
)
distogram.sum().backward()
assert True
| 23.240876
| 96
| 0.555486
| 1,226
| 9,552
| 4.126427
| 0.107667
| 0.074718
| 0.07709
| 0.10081
| 0.782368
| 0.774264
| 0.748369
| 0.744416
| 0.744416
| 0.72623
| 0
| 0.057969
| 0.329983
| 9,552
| 411
| 97
| 23.240876
| 0.7325
| 0.0134
| 0
| 0.760355
| 0
| 0
| 0.031104
| 0
| 0
| 0
| 0
| 0
| 0.04142
| 1
| 0.044379
| false
| 0
| 0.008876
| 0
| 0.053254
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3f8dccd479313b3e4fff06437382cc12c7c54b45
| 46
|
py
|
Python
|
routeros_ssh_connector/__init__.py
|
hosin211/routeros_ssh_connector
|
4728aff41c359b47b9a9c5b78b5689c9f7771a62
|
[
"MIT"
] | 6
|
2021-08-03T08:15:24.000Z
|
2022-01-21T17:43:44.000Z
|
routeros_ssh_connector/__init__.py
|
hosin211/routeros_ssh_connector
|
4728aff41c359b47b9a9c5b78b5689c9f7771a62
|
[
"MIT"
] | null | null | null |
routeros_ssh_connector/__init__.py
|
hosin211/routeros_ssh_connector
|
4728aff41c359b47b9a9c5b78b5689c9f7771a62
|
[
"MIT"
] | 1
|
2022-01-29T16:04:04.000Z
|
2022-01-29T16:04:04.000Z
|
from routeros_ssh_connector.connector import *
| 46
| 46
| 0.891304
| 6
| 46
| 6.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 46
| 1
| 46
| 46
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3fa03d7354b9949200aa201b513e9a27dffe978f
| 5,620
|
py
|
Python
|
stix2generator/test/test_stix21_registry.py
|
majacQ/cti-stix-generator
|
7465ecd29ef6caabf9f1b60ad45dad789c475028
|
[
"BSD-3-Clause"
] | null | null | null |
stix2generator/test/test_stix21_registry.py
|
majacQ/cti-stix-generator
|
7465ecd29ef6caabf9f1b60ad45dad789c475028
|
[
"BSD-3-Clause"
] | null | null | null |
stix2generator/test/test_stix21_registry.py
|
majacQ/cti-stix-generator
|
7465ecd29ef6caabf9f1b60ad45dad789c475028
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import pytest
import stix2
import stix2.exceptions
import stix2generator
import stix2generator.language.builder
import stix2generator.generation.object_generator
def get_stix21_spec_names():
"""
Gets the spec names from the STIX 2.1 registry. We need to know this to
be able to test those specifications.
"""
registry = stix2generator._get_registry("2.1")
return registry.keys()
STIX21_SPEC_NAMES = get_stix21_spec_names()
@pytest.fixture(scope="module")
def generator_random_props():
"""
Creates a generator which randomly includes or excludes properties.
"""
config = stix2generator.generation.object_generator.Config(
minimize_ref_properties=False
)
generator = stix2generator.create_object_generator(config, None, "2.1")
return generator
@pytest.fixture(scope="module")
def generator_min_props():
"""
Creates a generator which omits all optional properties.
"""
config = stix2generator.generation.object_generator.Config(
minimize_ref_properties=False,
optional_property_probability=0
)
generator = stix2generator.create_object_generator(config, None, "2.1")
return generator
@pytest.fixture(scope="module")
def generator_all_props():
"""
Creates a generator which includes all optional properties.
"""
config = stix2generator.generation.object_generator.Config(
minimize_ref_properties=False,
optional_property_probability=1
)
generator = stix2generator.create_object_generator(config, None, "2.1")
return generator
@pytest.mark.parametrize("spec_name", STIX21_SPEC_NAMES)
def test_generation_random_props(generator_random_props, spec_name, num_trials):
for _ in range(num_trials):
obj_dict = generator_random_props.generate(spec_name)
# Ensure json-serializability
json.dumps(obj_dict, ensure_ascii=False)
# Distinguish between a STIX object spec and a "helper" spec used
# by STIX object specs. Only makes sense to stix2.parse() the former.
if spec_name[0].isupper():
try:
stix2.parse(obj_dict, version="2.1")
except stix2.exceptions.ParseError:
# Maybe we can use this to mean this was an SCO?
# Try a re-parse as an SCO. Need a better way to make the
# distinction...
stix2.parse_observable(obj_dict, version="2.1")
@pytest.mark.parametrize("spec_name", STIX21_SPEC_NAMES)
def test_generation_min_props(generator_min_props, spec_name):
obj_dict = generator_min_props.generate(spec_name)
# Ensure json-serializability
json.dumps(obj_dict, ensure_ascii=False)
# Distinguish between a STIX object spec and a "helper" spec used
# by STIX object specs. Only makes sense to stix2.parse() the former.
if spec_name[0].isupper():
try:
stix2.parse(obj_dict, version="2.1")
except stix2.exceptions.ParseError:
# Maybe we can use this to mean this was an SCO?
# Try a re-parse as an SCO. Need a better way to make the
# distinction...
stix2.parse_observable(obj_dict, version="2.1")
@pytest.mark.parametrize("spec_name", STIX21_SPEC_NAMES)
def test_generation_all_props(generator_all_props, spec_name):
obj_dict = generator_all_props.generate(spec_name)
# Ensure json-serializability
json.dumps(obj_dict, ensure_ascii=False)
# Distinguish between a STIX object spec and a "helper" spec used
# by STIX object specs. Only makes sense to stix2.parse() the former.
if spec_name[0].isupper():
try:
stix2.parse(obj_dict, version="2.1")
except stix2.exceptions.ParseError:
# Maybe we can use this to mean this was an SCO?
# Try a re-parse as an SCO. Need a better way to make the
# distinction...
stix2.parse_observable(obj_dict, version="2.1")
# Test "relationship" separately since it is lower-cased, but nevertheless
# parseable by stix2. I wanted to keep it all lower-case so people
# couldn't use it like an SDO/SCO in the prototyping language.
def test_generation_random_props_relationship(
generator_random_props, num_trials
):
for _ in range(num_trials):
rel_dict = generator_random_props.generate("relationship")
json.dumps(rel_dict, ensure_ascii=False)
stix2.parse(rel_dict, version="2.1")
def test_generation_min_props_relationship(generator_min_props):
rel_dict = generator_min_props.generate("relationship")
json.dumps(rel_dict, ensure_ascii=False)
stix2.parse(rel_dict, version="2.1")
def test_generation_all_props_relationship(generator_all_props):
rel_dict = generator_all_props.generate("relationship")
json.dumps(rel_dict, ensure_ascii=False)
stix2.parse(rel_dict, version="2.1")
# Similar for sightings.
def test_generation_random_props_sighting(
generator_random_props, num_trials
):
for _ in range(num_trials):
rel_dict = generator_random_props.generate("sighting")
json.dumps(rel_dict, ensure_ascii=False)
stix2.parse(rel_dict, version="2.1")
def test_generation_min_props_sighting(generator_min_props):
rel_dict = generator_min_props.generate("sighting")
json.dumps(rel_dict, ensure_ascii=False)
stix2.parse(rel_dict, version="2.1")
def test_generation_all_props_sighting(generator_all_props):
rel_dict = generator_all_props.generate("sighting")
json.dumps(rel_dict, ensure_ascii=False)
stix2.parse(rel_dict, version="2.1")
| 32.114286
| 80
| 0.711566
| 752
| 5,620
| 5.086436
| 0.176862
| 0.032941
| 0.037647
| 0.040784
| 0.818824
| 0.769412
| 0.744837
| 0.737516
| 0.737516
| 0.699869
| 0
| 0.018259
| 0.20089
| 5,620
| 174
| 81
| 32.298851
| 0.833445
| 0.241815
| 0
| 0.586957
| 0
| 0
| 0.036647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.141304
| false
| 0
| 0.076087
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3fc3cab71b067e4586dd5d69d8364251d22bf5f7
| 1,428
|
py
|
Python
|
cycle_2018/migrations/0009_auto_20180222_1459.py
|
RobBickel/nyt-fec
|
802df867c3b31fff8e922be00bab6f40a5db2d00
|
[
"Apache-2.0"
] | 17
|
2018-03-27T15:09:58.000Z
|
2020-05-13T11:32:43.000Z
|
cycle_2018/migrations/0009_auto_20180222_1459.py
|
RobBickel/nyt-fec
|
802df867c3b31fff8e922be00bab6f40a5db2d00
|
[
"Apache-2.0"
] | 59
|
2018-03-21T17:08:15.000Z
|
2021-12-13T19:47:37.000Z
|
cycle_2018/migrations/0009_auto_20180222_1459.py
|
RobBickel/nyt-fec
|
802df867c3b31fff8e922be00bab6f40a5db2d00
|
[
"Apache-2.0"
] | 11
|
2018-09-11T23:18:32.000Z
|
2021-12-15T08:43:58.000Z
|
# Generated by Django 2.0.1 on 2018-02-22 14:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cycle_2018', '0008_schedulee'),
]
operations = [
migrations.AddField(
model_name='filing',
name='status',
field=models.CharField(choices=[('ACTIVE', 'active'), ('SUPERSEDED', 'superseded by amendment'), ('COVERED', 'covered by periodic'), ('MEMO', 'memo')], default='ACTIVE', max_length=50),
),
migrations.AddField(
model_name='schedulea',
name='status',
field=models.CharField(choices=[('ACTIVE', 'active'), ('SUPERSEDED', 'superseded by amendment'), ('COVERED', 'covered by periodic'), ('MEMO', 'memo')], default='ACTIVE', max_length=50),
),
migrations.AddField(
model_name='scheduleb',
name='status',
field=models.CharField(choices=[('ACTIVE', 'active'), ('SUPERSEDED', 'superseded by amendment'), ('COVERED', 'covered by periodic'), ('MEMO', 'memo')], default='ACTIVE', max_length=50),
),
migrations.AddField(
model_name='schedulee',
name='status',
field=models.CharField(choices=[('ACTIVE', 'active'), ('SUPERSEDED', 'superseded by amendment'), ('COVERED', 'covered by periodic'), ('MEMO', 'memo')], default='ACTIVE', max_length=50),
),
]
| 42
| 197
| 0.588235
| 140
| 1,428
| 5.928571
| 0.314286
| 0.086747
| 0.110843
| 0.13012
| 0.753012
| 0.753012
| 0.753012
| 0.753012
| 0.753012
| 0.753012
| 0
| 0.028336
| 0.233894
| 1,428
| 33
| 198
| 43.272727
| 0.730347
| 0.031513
| 0
| 0.592593
| 1
| 0
| 0.304852
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b20ddc17f3f99ef6cb191d7292f089f56babaf23
| 32,621
|
py
|
Python
|
src/models/pytorch/networks.py
|
AntonioGUJ/AirwaySegmentation_Keras
|
7da4c88dfde6f0dd2f8f181b2d3fd07dc2d28638
|
[
"MIT"
] | 15
|
2021-04-09T12:42:35.000Z
|
2022-03-22T09:01:57.000Z
|
src/models/pytorch/networks.py
|
id-b3/bronchinet
|
5acf5243da2a0e38041bbbf2ffd033291eff13a4
|
[
"MIT"
] | 13
|
2021-03-31T11:16:12.000Z
|
2022-02-10T06:11:16.000Z
|
src/models/pytorch/networks.py
|
id-b3/bronchinet
|
5acf5243da2a0e38041bbbf2ffd033291eff13a4
|
[
"MIT"
] | 9
|
2021-04-13T13:27:51.000Z
|
2022-02-25T07:03:25.000Z
|
from typing import Tuple, List, Dict, Union, Any
from torch.nn import Conv3d, MaxPool3d, Upsample, BatchNorm3d, Dropout3d, ReLU, LeakyReLU, Sigmoid
import torch.nn as nn
import torch
from common.exceptionmanager import catch_error_exception
from common.functionutil import ImagesUtil
from imageoperators.imageoperator import CropImage
from models.networks import UNetBase
LIST_AVAIL_NETWORKS = ['UNet3DOriginal',
'UNet3DGeneral',
'UNet3DPlugin',
]
class UNet(UNetBase, nn.Module):
def __init__(self,
size_image_in: Union[Tuple[int, int, int], Tuple[int, int]],
num_levels: int,
num_featmaps_in: int,
num_channels_in: int,
num_classes_out: int,
is_use_valid_convols: bool = False,
num_levels_valid_convols: int = UNetBase._num_levels_valid_convols_default,
) -> None:
super(UNet, self).__init__(size_image_in,
num_levels,
num_featmaps_in,
num_channels_in,
num_classes_out,
is_use_valid_convols=is_use_valid_convols,
num_levels_valid_convols=num_levels_valid_convols)
nn.Module.__init__(self)
self._shape_input = ImagesUtil.get_shape_channels_first(self._shape_input)
self._shape_output = ImagesUtil.get_shape_channels_first(self._shape_output)
def get_network_input_args(self) -> Dict[str, Any]:
raise NotImplementedError
def _build_info_crop_where_merge(self) -> None:
indexes_output_where_merge = [i for i, elem in enumerate(self._names_operations_layers_all)
if elem == 'upsample']
self._sizes_crop_where_merge = [self._sizes_output_all_layers[ind] for ind in indexes_output_where_merge][::-1]
def _crop_image_2d(self, input: torch.Tensor, size_crop: Tuple[int, int]) -> torch.Tensor:
size_input_image = input.shape[-2:]
limits_out_image = self._get_limits_output_crop(size_input_image, size_crop)
return CropImage._compute2d_channels_first(input, limits_out_image)
def _crop_image_3d(self, input: torch.Tensor, size_crop: Tuple[int, int, int]) -> torch.Tensor:
size_input_image = input.shape[-3:]
limits_out_image = self._get_limits_output_crop(size_input_image, size_crop)
return CropImage._compute3d_channels_first(input, limits_out_image)
class UNet3DOriginal(UNet):
_num_levels_fixed = 5
def __init__(self,
size_image_in: Tuple[int, int, int],
num_featmaps_in: int = 16,
num_channels_in: int = 1,
num_classes_out: int = 1
) -> None:
super(UNet3DOriginal, self).__init__(size_image_in,
self._num_levels_fixed,
num_featmaps_in,
num_channels_in,
num_classes_out,
is_use_valid_convols=False)
self._build_model()
def get_network_input_args(self) -> Dict[str, Any]:
return {'size_image': self._size_image_in,
'num_featmaps_in': self._num_featmaps_in,
'num_channels_in': self._num_channels_in,
'num_classes_out': self._num_classes_out}
def _build_model(self) -> None:
num_featmaps_lev1 = self._num_featmaps_in
self._convolution_down_lev1_1 = Conv3d(self._num_channels_in, num_featmaps_lev1, kernel_size=3, padding=1)
self._convolution_down_lev1_2 = Conv3d(num_featmaps_lev1, num_featmaps_lev1, kernel_size=3, padding=1)
self._pooling_down_lev1 = MaxPool3d(kernel_size=2, padding=0)
num_featmaps_lev2 = 2 * num_featmaps_lev1
self._convolution_down_lev2_1 = Conv3d(num_featmaps_lev1, num_featmaps_lev2, kernel_size=3, padding=1)
self._convolution_down_lev2_2 = Conv3d(num_featmaps_lev2, num_featmaps_lev2, kernel_size=3, padding=1)
self._pooling_down_lev2 = MaxPool3d(kernel_size=2, padding=0)
num_featmaps_lev3 = 2 * num_featmaps_lev2
self._convolution_down_lev3_1 = Conv3d(num_featmaps_lev2, num_featmaps_lev3, kernel_size=3, padding=1)
self._convolution_down_lev3_2 = Conv3d(num_featmaps_lev3, num_featmaps_lev3, kernel_size=3, padding=1)
self._pooling_down_lev3 = MaxPool3d(kernel_size=2, padding=0)
num_featmaps_lev4 = 2 * num_featmaps_lev3
self._convolution_down_lev4_1 = Conv3d(num_featmaps_lev3, num_featmaps_lev4, kernel_size=3, padding=1)
self._convolution_down_lev4_2 = Conv3d(num_featmaps_lev4, num_featmaps_lev4, kernel_size=3, padding=1)
self._pooling_down_lev4 = MaxPool3d(kernel_size=2, padding=0)
num_featmaps_lev5 = 2 * num_featmaps_lev4
self._convolution_down_lev5_1 = Conv3d(num_featmaps_lev4, num_featmaps_lev5, kernel_size=3, padding=1)
self._convolution_down_lev5_2 = Conv3d(num_featmaps_lev5, num_featmaps_lev5, kernel_size=3, padding=1)
self._upsample_up_lev5 = Upsample(scale_factor=2, mode='nearest')
num_feats_lev4pl5 = num_featmaps_lev4 + num_featmaps_lev5
self._convolution_up_lev4_1 = Conv3d(num_feats_lev4pl5, num_featmaps_lev4, kernel_size=3, padding=1)
self._convolution_up_lev4_2 = Conv3d(num_featmaps_lev4, num_featmaps_lev4, kernel_size=3, padding=1)
self._upsample_up_lev4 = Upsample(scale_factor=2, mode='nearest')
num_feats_lev3pl4 = num_featmaps_lev3 + num_featmaps_lev4
self._convolution_up_lev3_1 = Conv3d(num_feats_lev3pl4, num_featmaps_lev3, kernel_size=3, padding=1)
self._convolution_up_lev3_2 = Conv3d(num_featmaps_lev3, num_featmaps_lev3, kernel_size=3, padding=1)
self._upsample_up_lev3 = Upsample(scale_factor=2, mode='nearest')
num_feats_lev2pl3 = num_featmaps_lev2 + num_featmaps_lev3
self._convolution_up_lev2_1 = Conv3d(num_feats_lev2pl3, num_featmaps_lev2, kernel_size=3, padding=1)
self._convolution_up_lev2_2 = Conv3d(num_featmaps_lev2, num_featmaps_lev2, kernel_size=3, padding=1)
self._upsample_up_lev2 = Upsample(scale_factor=2, mode='nearest')
num_feats_lev1pl2 = num_featmaps_lev1 + num_featmaps_lev2
self._convolution_up_lev1_1 = Conv3d(num_feats_lev1pl2, num_featmaps_lev1, kernel_size=3, padding=1)
self._convolution_up_lev1_2 = Conv3d(num_featmaps_lev1, num_featmaps_lev1, kernel_size=3, padding=1)
self._classification_last = Conv3d(num_featmaps_lev1, self._num_classes_out, kernel_size=1, padding=0)
self._activation_last = Sigmoid()
def forward(self, input: torch.Tensor) -> torch.Tensor:
hidden_nxt = self._convolution_down_lev1_1(input)
hidden_nxt = self._convolution_down_lev1_2(hidden_nxt)
hidden_skip_lev1 = hidden_nxt
hidden_nxt = self._pooling_down_lev1(hidden_nxt)
hidden_nxt = self._convolution_down_lev2_1(hidden_nxt)
hidden_nxt = self._convolution_down_lev2_2(hidden_nxt)
hidden_skip_lev2 = hidden_nxt
hidden_nxt = self._pooling_down_lev2(hidden_nxt)
hidden_nxt = self._convolution_down_lev3_1(hidden_nxt)
hidden_nxt = self._convolution_down_lev3_2(hidden_nxt)
hidden_skip_lev3 = hidden_nxt
hidden_nxt = self._pooling_down_lev3(hidden_nxt)
hidden_nxt = self._convolution_down_lev4_1(hidden_nxt)
hidden_nxt = self._convolution_down_lev4_2(hidden_nxt)
hidden_skip_lev4 = hidden_nxt
hidden_nxt = self._pooling_down_lev4(hidden_nxt)
hidden_nxt = self._convolution_down_lev5_1(hidden_nxt)
hidden_nxt = self._convolution_down_lev5_2(hidden_nxt)
hidden_nxt = self._upsample_up_lev5(hidden_nxt)
hidden_nxt = torch.cat([hidden_nxt, hidden_skip_lev4], dim=1)
hidden_nxt = self._convolution_up_lev4_1(hidden_nxt)
hidden_nxt = self._convolution_up_lev4_2(hidden_nxt)
hidden_nxt = self._upsample_up_lev4(hidden_nxt)
hidden_nxt = torch.cat([hidden_nxt, hidden_skip_lev3], dim=1)
hidden_nxt = self._convolution_up_lev3_1(hidden_nxt)
hidden_nxt = self._convolution_up_lev3_2(hidden_nxt)
hidden_nxt = self._upsample_up_lev3(hidden_nxt)
hidden_nxt = torch.cat([hidden_nxt, hidden_skip_lev2], dim=1)
hidden_nxt = self._convolution_up_lev2_1(hidden_nxt)
hidden_nxt = self._convolution_up_lev2_2(hidden_nxt)
hidden_nxt = self._upsample_up_lev2(hidden_nxt)
hidden_nxt = torch.cat([hidden_nxt, hidden_skip_lev1], dim=1)
hidden_nxt = self._convolution_up_lev1_1(hidden_nxt)
hidden_nxt = self._convolution_up_lev1_2(hidden_nxt)
output = self._activation_last(self._classification_last(hidden_nxt))
return output
class UNet3DGeneral(UNet):
_num_levels_default = 5
_num_featmaps_in_default = 16
_num_channels_in_default = 1
_num_classes_out_default = 1
_dropout_rate_default = 0.2
_type_activate_hidden_default = 'relu'
_type_activate_output_default = 'sigmoid'
_num_convols_levels_down_default = 2
_num_convols_levels_up_default = 2
_sizes_kernel_convols_levels_down_default = (3, 3, 3)
_sizes_kernel_convols_levels_up_default = (3, 3, 3)
_sizes_pooling_levels_default = (2, 2, 2)
def __init__(self,
size_image_in: Tuple[int, int, int],
num_levels: int = _num_levels_default,
num_featmaps_in: int = _num_featmaps_in_default,
num_channels_in: int = _num_channels_in_default,
num_classes_out: int = _num_classes_out_default,
is_use_valid_convols: bool = False,
type_activate_hidden: str = _type_activate_hidden_default,
type_activate_output: str = _type_activate_output_default,
num_featmaps_levels: List[int] = None,
num_convols_levels_down: Union[int, Tuple[int, ...]] = _num_convols_levels_down_default,
num_convols_levels_up: Union[int, Tuple[int, ...]] = _num_convols_levels_up_default,
sizes_kernel_convols_levels_down: Union[Tuple[int, int, int], List[Tuple[int, int, int]]] =
_sizes_kernel_convols_levels_down_default,
sizes_kernel_convols_levels_up: Union[Tuple[int, int, int], List[Tuple[int, int, int]]] =
_sizes_kernel_convols_levels_up_default,
sizes_pooling_levels: Union[Tuple[int, int, int], List[Tuple[int, int, int]]] =
_sizes_pooling_levels_default,
is_disable_convol_pooling_axialdim_lastlevel: bool = False,
is_use_dropout: bool = False,
dropout_rate: float = _dropout_rate_default,
is_use_dropout_levels_down: Union[bool, List[bool]] = True,
is_use_dropout_levels_up: Union[bool, List[bool]] = True,
is_use_batchnormalize=False,
is_use_batchnormalize_levels_down: Union[bool, List[bool]] = True,
is_use_batchnormalize_levels_up: Union[bool, List[bool]] = True
) -> None:
super(UNet, self).__init__(size_image_in,
num_levels,
num_featmaps_in,
num_channels_in,
num_classes_out,
is_use_valid_convols=is_use_valid_convols)
self._type_activate_hidden = type_activate_hidden
self._type_activate_output = type_activate_output
if num_featmaps_levels:
self._num_featmaps_levels = num_featmaps_levels
else:
# default: double featmaps after every pooling
self._num_featmaps_levels = [self._num_featmaps_in]
for i in range(1, self._num_levels):
self._num_featmaps_levels[i] = 2 * self._num_featmaps_levels[i - 1]
if type(num_convols_levels_down) == int:
self._num_convols_levels_down = [num_convols_levels_down] * self._num_levels
else:
self._num_convols_levels_down = num_convols_levels_down
if type(num_convols_levels_up) == int:
self._num_convols_levels_up = [num_convols_levels_up] * (self._num_levels - 1)
else:
self._num_convols_levels_up = num_convols_levels_up
if type(sizes_kernel_convols_levels_down) == tuple:
self._sizes_kernel_convols_levels_down = [sizes_kernel_convols_levels_down] * self._num_levels
else:
self._sizes_kernel_convols_levels_down = sizes_kernel_convols_levels_down
if type(sizes_kernel_convols_levels_up) == tuple:
self._sizes_kernel_convols_levels_up = [sizes_kernel_convols_levels_up] * (self._num_levels - 1)
else:
self._sizes_kernel_convols_levels_up = sizes_kernel_convols_levels_up
if type(sizes_pooling_levels) == tuple:
self._sizes_pooling_levels = [sizes_pooling_levels] * self._num_levels
else:
self._sizes_pooling_levels = sizes_pooling_levels
self._sizes_upsample_levels = self._sizes_pooling_levels[:-1]
if is_disable_convol_pooling_axialdim_lastlevel:
size_kernel_convol_lastlevel = self._sizes_kernel_convols_levels_down[-1]
self._sizes_kernel_convols_levels_down[-1] = (1, size_kernel_convol_lastlevel[1],
size_kernel_convol_lastlevel[2])
size_pooling_lastlevel = self._sizes_pooling_levels[-1]
self._sizes_pooling_levels[-1] = (1, size_pooling_lastlevel[1], size_pooling_lastlevel[2])
self._is_use_dropout = is_use_dropout
if is_use_dropout:
self._dropout_rate = dropout_rate
if type(is_use_dropout_levels_down) == bool:
self._is_use_dropout_levels_down = [is_use_dropout_levels_down] * self._num_levels
else:
self._is_use_dropout_levels_down = is_use_dropout_levels_down
if type(is_use_dropout_levels_up) == bool:
self._is_use_dropout_levels_up = [is_use_dropout_levels_up] * (self._num_levels - 1)
else:
self._is_use_dropout_levels_up = is_use_dropout_levels_up
self._is_use_batchnormalize = is_use_batchnormalize
if is_use_batchnormalize:
if type(is_use_batchnormalize_levels_down) == bool:
self._is_use_batchnormalize_levels_down = [is_use_batchnormalize_levels_down] * self._num_levels
else:
self._is_use_batchnormalize_levels_down = is_use_batchnormalize_levels_down
if type(is_use_batchnormalize_levels_up) == bool:
self._is_use_batchnormalize_levels_up = [is_use_batchnormalize_levels_up] * (self._num_levels - 1)
else:
self._is_use_batchnormalize_levels_up = is_use_batchnormalize_levels_up
self._build_model()
def get_network_input_args(self) -> Dict[str, Any]:
return {'size_image_in': self._size_image_in,
'num_levels': self._num_levels,
'num_featmaps_in': self._num_featmaps_in,
'num_channels_in': self._num_channels_in,
'num_classes_out': self._num_classes_out,
'is_use_valid_convols': self._is_use_valid_convols}
def _build_model(self) -> None:
value_padding_convols = 0 if self._is_use_valid_convols else 1
self._convolutions_levels_down = [[] for i in range(self._num_levels)]
self._convolutions_levels_up = [[] for i in range(self._num_levels - 1)]
self._poolings_levels_down = []
self._upsamples_levels_up = []
self._batchnormalize_levels_down = [[] for i in range(self._num_levels)]
self._batchnormalize_levels_up = [[] for i in range(self._num_levels - 1)]
# ENCODING LAYERS
for i_lev in range(self._num_levels):
num_featmaps_in_level = self._num_channels_in if i_lev == 0 else self._num_featmaps_levels[i_lev - 1]
num_featmaps_out_level = self._num_featmaps_levels[i_lev]
for i_con in range(self._num_convols_levels_down[i_lev]):
num_featmaps_in_convol = num_featmaps_in_level if i_con else num_featmaps_in_level
num_featmaps_out_convol = num_featmaps_out_level
new_convolution = Conv3d(num_featmaps_in_convol, num_featmaps_out_convol,
kernel_size=self._sizes_kernel_convols_levels_down[i_lev],
padding=value_padding_convols)
self._convolutions_levels_down[i_lev].append(new_convolution)
if self._is_use_batchnormalize and self._is_use_batchnormalize_levels_down[i_lev]:
new_batchnormalize = BatchNorm3d(num_featmaps_out_convol)
self._batchnormalize_levels_down[i_lev].append(new_batchnormalize)
if (i_lev != self._num_levels - 1):
new_pooling = MaxPool3d(kernel_size=self._sizes_pooling_levels[i_lev], padding=0)
self._poolings_levels_down.append(new_pooling)
# DECODING LAYERS
for i_lev in range(self._num_levels - 2, -1, -1):
num_featmaps_in_level = self._num_featmaps_levels[i_lev - 1] + self._num_featmaps_levels[i_lev]
num_featmaps_out_level = self._num_featmaps_levels[i_lev]
new_upsample = Upsample(scale_factor=self._sizes_upsample_levels[i_lev], mode='nearest')
self._upsamples_levels_up.append(new_upsample)
for i_con in range(self._num_convols_levels_up[i_lev]):
num_featmaps_in_convol = num_featmaps_in_level if i_con else num_featmaps_in_level
num_featmaps_out_convol = num_featmaps_out_level
new_convolution = Conv3d(num_featmaps_in_convol, num_featmaps_out_convol,
kernel_size=self._sizes_kernel_convols_levels_up[i_lev],
padding=value_padding_convols)
self._convolutions_levels_up[i_lev].append(new_convolution)
if self._is_use_batchnormalize and self._is_use_batchnormalize_levels_up[i_lev]:
new_batchnormalize = BatchNorm3d(num_featmaps_out_convol)
self._batchnormalize_levels_up[i_lev].append(new_batchnormalize)
self._classification_last = Conv3d(self._num_featmaps_in, self._num_classes_out, kernel_size=1, padding=0)
if self._is_use_dropout:
self._dropout_all_levels = Dropout3d(self._dropout_rate, inplace=True)
if self._type_activate_hidden == 'relu':
self._activation_hidden = ReLU(inplace=True)
elif self._type_activate_hidden == 'leaky_relu':
self._activation_hidden = LeakyReLU(inplace=True)
elif self._type_activate_hidden == 'none':
def func_activation_none(input: torch.Tensor) -> torch.Tensor:
return input
self._activation_hidden = func_activation_none
else:
message = 'Type activation hidden not existing: \'%s\'' % (self._type_activate_hidden)
catch_error_exception(message)
if self._type_activate_output == 'sigmoid':
self._activation_last = Sigmoid()
elif self._type_activate_output == 'linear':
def func_activation_linear(input: torch.Tensor) -> torch.Tensor:
return input
self._activation_last = func_activation_linear
else:
message = 'Type activation output not existing: \'%s\' ' % (self._type_activate_output)
catch_error_exception(message)
def forward(self, input: torch.Tensor) -> torch.Tensor:
hidden_nxt = input
hidden_skips_levels = []
# ENCODING LAYERS
for i_lev in range(self._num_levels):
for i_con in range(self._num_convols_levels_down[i_lev]):
hidden_nxt = self._activation_hidden(self._convolutions_levels_down[i_lev][i_con](hidden_nxt))
if self._is_use_batchnormalize and self._is_use_batchnormalize_levels_down[i_lev]:
hidden_nxt = self._batchnormalize_levels_down[i_lev][i_con](hidden_nxt)
if self._is_use_dropout and self._is_use_dropout_levels_down[i_lev]:
hidden_nxt = self._dropout_all_levels(hidden_nxt)
if (i_lev != self._num_levels - 1):
hidden_skips_levels.append(hidden_nxt)
hidden_nxt = self._poolings_levels_down[i_lev](hidden_nxt)
# DECODING LAYERS
for i_lev in range(self._num_levels - 2, -1, -1):
hidden_nxt = self._upsamples_levels_up[i_lev](hidden_nxt)
hidden_skip_this = hidden_skips_levels[i_lev]
if self._is_use_valid_convols:
hidden_skip_this = self._crop_image_3d(hidden_skip_this, self._sizes_crop_where_merge[3])
hidden_nxt = torch.cat([hidden_nxt, hidden_skip_this], dim=1)
for i_con in range(self._num_convols_levels_up[i_lev]):
hidden_nxt = self._activation_hidden(self._convolutions_levels_up[i_lev][i_con](hidden_nxt))
if self._is_use_batchnormalize and self._is_use_batchnormalize_levels_up[i_lev]:
hidden_nxt = self._batchnormalize_levels_up[i_lev][i_con](hidden_nxt)
if self._is_use_dropout and self._is_use_dropout_levels_up[i_lev]:
hidden_nxt = self._dropout_all_levels(hidden_nxt)
output = self._activation_last(self._classification_last(hidden_nxt))
return output
class UNet3DPlugin(UNet):
_num_levels_fixed = 5
_num_levels_valid_convols_fixed = 3
_num_featmaps_in_default = 16
_num_channels_in_default = 1
_num_classes_out_default = 1
_dropout_rate_default = 0.2
_type_activate_hidden_default = 'relu'
_type_activate_output_default = 'sigmoid'
def __init__(self,
size_image_in: Tuple[int, int, int],
num_featmaps_in: int = _num_featmaps_in_default,
num_channels_in: int = _num_channels_in_default,
num_classes_out: int = _num_classes_out_default,
is_use_valid_convols: bool = False,
is_valid_convols_deep_levels: bool = False
) -> None:
super(UNet3DPlugin, self).__init__(size_image_in,
self._num_levels_fixed,
num_featmaps_in,
num_channels_in,
num_classes_out,
is_use_valid_convols=is_use_valid_convols,
num_levels_valid_convols=self._num_levels_valid_convols_fixed)
self._type_activate_hidden = self._type_activate_hidden_default
self._type_activate_output = self._type_activate_output_default
self._is_valid_convols_deep_levels = is_valid_convols_deep_levels
self._build_model()
def get_network_input_args(self) -> Dict[str, Any]:
return {'size_image_in': self._size_image_in,
'num_featmaps_in': self._num_featmaps_in,
'num_channels_in': self._num_channels_in,
'num_classes_out': self._num_classes_out,
'is_use_valid_convols': self._is_use_valid_convols}
def _build_model(self) -> None:
value_padding = 0 if self._is_use_valid_convols else 1
value_padding_deep_levels = 0 if self._is_valid_convols_deep_levels else 1
num_featmaps_lev1 = self._num_featmaps_in
self._convolution_down_lev1_1 = Conv3d(self._num_channels_in, num_featmaps_lev1, kernel_size=3,
padding=value_padding)
self._convolution_down_lev1_2 = Conv3d(num_featmaps_lev1, num_featmaps_lev1, kernel_size=3,
padding=value_padding)
self._pooling_down_lev1 = MaxPool3d(kernel_size=2, padding=0)
num_featmaps_lev2 = 2 * num_featmaps_lev1
self._convolution_down_lev2_1 = Conv3d(num_featmaps_lev1, num_featmaps_lev2, kernel_size=3,
padding=value_padding)
self._convolution_down_lev2_2 = Conv3d(num_featmaps_lev2, num_featmaps_lev2, kernel_size=3,
padding=value_padding)
self._pooling_down_lev2 = MaxPool3d(kernel_size=2, padding=0)
num_featmaps_lev3 = 2 * num_featmaps_lev2
self._convolution_down_lev3_1 = Conv3d(num_featmaps_lev2, num_featmaps_lev3, kernel_size=3,
padding=value_padding)
self._convolution_down_lev3_2 = Conv3d(num_featmaps_lev3, num_featmaps_lev3, kernel_size=3,
padding=value_padding)
self._pooling_down_lev3 = MaxPool3d(kernel_size=2, padding=0)
num_featmaps_lev4 = 2 * num_featmaps_lev3
self._convolution_down_lev4_1 = Conv3d(num_featmaps_lev3, num_featmaps_lev4, kernel_size=3,
padding=value_padding_deep_levels)
self._convolution_down_lev4_2 = Conv3d(num_featmaps_lev4, num_featmaps_lev4, kernel_size=3,
padding=value_padding_deep_levels)
self._pooling_down_lev4 = MaxPool3d(kernel_size=2, padding=0)
num_featmaps_lev5 = 2 * num_featmaps_lev4
self._convolution_down_lev5_1 = Conv3d(num_featmaps_lev4, num_featmaps_lev5, kernel_size=3,
padding=value_padding_deep_levels)
self._convolution_down_lev5_2 = Conv3d(num_featmaps_lev5, num_featmaps_lev5, kernel_size=3,
padding=value_padding_deep_levels)
self._upsample_up_lev5 = Upsample(scale_factor=2, mode='nearest')
num_feats_lev4pl5 = num_featmaps_lev4 + num_featmaps_lev5
self._convolution_up_lev4_1 = Conv3d(num_feats_lev4pl5, num_featmaps_lev4, kernel_size=3,
padding=value_padding_deep_levels)
self._convolution_up_lev4_2 = Conv3d(num_featmaps_lev4, num_featmaps_lev4, kernel_size=3,
padding=value_padding_deep_levels)
self._upsample_up_lev4 = Upsample(scale_factor=2, mode='nearest')
num_feats_lev3pl4 = num_featmaps_lev3 + num_featmaps_lev4
self._convolution_up_lev3_1 = Conv3d(num_feats_lev3pl4, num_featmaps_lev3, kernel_size=3,
padding=value_padding)
self._convolution_up_lev3_2 = Conv3d(num_featmaps_lev3, num_featmaps_lev3, kernel_size=3,
padding=value_padding)
self._upsample_up_lev3 = Upsample(scale_factor=2, mode='nearest')
num_feats_lev2pl3 = num_featmaps_lev2 + num_featmaps_lev3
self._convolution_up_lev2_1 = Conv3d(num_feats_lev2pl3, num_featmaps_lev2, kernel_size=3,
padding=value_padding)
self._convolution_up_lev2_2 = Conv3d(num_featmaps_lev2, num_featmaps_lev2, kernel_size=3,
padding=value_padding)
self._upsample_up_lev2 = Upsample(scale_factor=2, mode='nearest')
num_feats_lay1pl2 = num_featmaps_lev1 + num_featmaps_lev2
self._convolution_up_lev1_1 = Conv3d(num_feats_lay1pl2, num_featmaps_lev1, kernel_size=3,
padding=value_padding)
self._convolution_up_lev1_2 = Conv3d(num_featmaps_lev1, num_featmaps_lev1, kernel_size=3,
padding=value_padding)
self._classification_last = Conv3d(num_featmaps_lev1, self._num_classes_out, kernel_size=1, padding=0)
if self._type_activate_hidden == 'relu':
self._activation_hidden = ReLU(inplace=True)
elif self._type_activate_hidden == 'leaky_relu':
self._activation_hidden = LeakyReLU(inplace=True)
elif self._type_activate_hidden == 'linear':
def func_activation_linear(input: torch.Tensor) -> torch.Tensor:
return input
self._activation_hidden = func_activation_linear
else:
message = 'Type activation hidden not existing: \'%s\'' % (self._type_activate_hidden)
catch_error_exception(message)
if self._type_activate_output == 'sigmoid':
self._activation_last = Sigmoid()
elif self._type_activate_output == 'linear':
def func_activation_linear(input: torch.Tensor) -> torch.Tensor:
return input
self._activation_last = func_activation_linear
else:
message = 'Type activation output not existing: \'%s\' ' % (self._type_activate_output)
catch_error_exception(message)
def forward(self, input: torch.Tensor) -> torch.Tensor:
hidden_nxt = self._activation_hidden(self._convolution_down_lev1_1(input))
hidden_nxt = self._activation_hidden(self._convolution_down_lev1_2(hidden_nxt))
hidden_skip_lev1 = hidden_nxt
hidden_nxt = self._pooling_down_lev1(hidden_nxt)
hidden_nxt = self._activation_hidden(self._convolution_down_lev2_1(hidden_nxt))
hidden_nxt = self._activation_hidden(self._convolution_down_lev2_2(hidden_nxt))
hidden_skip_lev2 = hidden_nxt
hidden_nxt = self._pooling_down_lev2(hidden_nxt)
hidden_nxt = self._activation_hidden(self._convolution_down_lev3_1(hidden_nxt))
hidden_nxt = self._activation_hidden(self._convolution_down_lev3_2(hidden_nxt))
hidden_skip_lev3 = hidden_nxt
hidden_nxt = self._pooling_down_lev3(hidden_nxt)
hidden_nxt = self._activation_hidden(self._convolution_down_lev4_1(hidden_nxt))
hidden_nxt = self._activation_hidden(self._convolution_down_lev4_2(hidden_nxt))
hidden_skip_lev4 = hidden_nxt
hidden_nxt = self._pooling_down_lev4(hidden_nxt)
hidden_nxt = self._activation_hidden(self._convolution_down_lev5_1(hidden_nxt))
hidden_nxt = self._activation_hidden(self._convolution_down_lev5_2(hidden_nxt))
hidden_nxt = self._upsample_up_lev5(hidden_nxt)
if self._is_use_valid_convols:
hidden_skip_lev4 = self._crop_image_3d(hidden_skip_lev4, self._sizes_crop_where_merge[3])
hidden_nxt = torch.cat([hidden_nxt, hidden_skip_lev4], dim=1)
hidden_nxt = self._activation_hidden(self._convolution_up_lev4_1(hidden_nxt))
hidden_nxt = self._activation_hidden(self._convolution_up_lev4_2(hidden_nxt))
hidden_nxt = self._upsample_up_lev4(hidden_nxt)
if self._is_use_valid_convols:
hidden_skip_lev3 = self._crop_image_3d(hidden_skip_lev3, self._sizes_crop_where_merge[2])
hidden_nxt = torch.cat([hidden_nxt, hidden_skip_lev3], dim=1)
hidden_nxt = self._activation_hidden(self._convolution_up_lev3_1(hidden_nxt))
hidden_nxt = self._activation_hidden(self._convolution_up_lev3_2(hidden_nxt))
hidden_nxt = self._upsample_up_lev3(hidden_nxt)
if self._is_use_valid_convols:
hidden_skip_lev2 = self._crop_image_3d(hidden_skip_lev2, self._sizes_crop_where_merge[1])
hidden_nxt = torch.cat([hidden_nxt, hidden_skip_lev2], dim=1)
hidden_nxt = self._activation_hidden(self._convolution_up_lev2_1(hidden_nxt))
hidden_nxt = self._activation_hidden(self._convolution_up_lev2_2(hidden_nxt))
hidden_nxt = self._upsample_up_lev2(hidden_nxt)
if self._is_use_valid_convols:
hidden_skip_lev1 = self._crop_image_3d(hidden_skip_lev1, self._sizes_crop_where_merge[0])
hidden_nxt = torch.cat([hidden_nxt, hidden_skip_lev1], dim=1)
hidden_nxt = self._activation_hidden(self._convolution_up_lev1_1(hidden_nxt))
hidden_nxt = self._activation_hidden(self._convolution_up_lev1_2(hidden_nxt))
output = self._activation_last(self._classification_last(hidden_nxt))
return output
| 53.56486
| 119
| 0.67058
| 4,135
| 32,621
| 4.7289
| 0.041838
| 0.085507
| 0.048328
| 0.041424
| 0.881763
| 0.839061
| 0.783318
| 0.76235
| 0.751662
| 0.712335
| 0
| 0.024037
| 0.257748
| 32,621
| 608
| 120
| 53.652961
| 0.783546
| 0.003311
| 0
| 0.504032
| 0
| 0
| 0.017566
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042339
| false
| 0
| 0.016129
| 0.014113
| 0.133065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b7e6db9c4273e4fc7abb8446c256bf64257ad556
| 7,234
|
py
|
Python
|
tests/test_case_when.py
|
MojixCoder/tortoise-orm
|
2f6396815a603f515d48648ebf339ffad4d15176
|
[
"Apache-2.0"
] | 2,847
|
2018-08-27T12:02:21.000Z
|
2022-03-31T01:30:40.000Z
|
tests/test_case_when.py
|
MojixCoder/tortoise-orm
|
2f6396815a603f515d48648ebf339ffad4d15176
|
[
"Apache-2.0"
] | 983
|
2018-08-24T16:42:41.000Z
|
2022-03-30T05:14:49.000Z
|
tests/test_case_when.py
|
MojixCoder/tortoise-orm
|
2f6396815a603f515d48648ebf339ffad4d15176
|
[
"Apache-2.0"
] | 323
|
2018-09-04T23:38:42.000Z
|
2022-03-31T06:49:17.000Z
|
from tests.testmodels import IntFields
from tortoise import Tortoise
from tortoise.contrib import test
from tortoise.expressions import Case, F, Q, When
from tortoise.functions import Coalesce
class TestCaseWhen(test.TestCase):
async def setUp(self):
self.intfields = [await IntFields.create(intnum=val) for val in range(10)]
self.db = Tortoise.get_connection("models")
async def test_single_when(self):
category = Case(When(intnum__gte=8, then="big"), default="default")
sql = IntFields.all().annotate(category=category).values("intnum", "category").sql()
dialect = self.db.schema_generator.DIALECT
if dialect == "mysql":
expected_sql = "SELECT `intnum` `intnum`,CASE WHEN `intnum`>=8 THEN 'big' ELSE 'default' END `category` FROM `intfields`"
else:
expected_sql = 'SELECT "intnum" "intnum",CASE WHEN "intnum">=8 THEN \'big\' ELSE \'default\' END "category" FROM "intfields"'
self.assertEqual(sql, expected_sql)
async def test_multi_when(self):
category = Case(
When(intnum__gte=8, then="big"), When(intnum__lte=2, then="small"), default="default"
)
sql = IntFields.all().annotate(category=category).values("intnum", "category").sql()
dialect = self.db.schema_generator.DIALECT
if dialect == "mysql":
expected_sql = "SELECT `intnum` `intnum`,CASE WHEN `intnum`>=8 THEN 'big' WHEN `intnum`<=2 THEN 'small' ELSE 'default' END `category` FROM `intfields`"
else:
expected_sql = 'SELECT "intnum" "intnum",CASE WHEN "intnum">=8 THEN \'big\' WHEN "intnum"<=2 THEN \'small\' ELSE \'default\' END "category" FROM "intfields"'
self.assertEqual(sql, expected_sql)
async def test_q_object_when(self):
category = Case(When(Q(intnum__gt=2, intnum__lt=8), then="middle"), default="default")
sql = IntFields.all().annotate(category=category).values("intnum", "category").sql()
dialect = self.db.schema_generator.DIALECT
if dialect == "mysql":
expected_sql = "SELECT `intnum` `intnum`,CASE WHEN `intnum`>2 AND `intnum`<8 THEN 'middle' ELSE 'default' END `category` FROM `intfields`"
else:
expected_sql = 'SELECT "intnum" "intnum",CASE WHEN "intnum">2 AND "intnum"<8 THEN \'middle\' ELSE \'default\' END "category" FROM "intfields"'
self.assertEqual(sql, expected_sql)
async def test_F_then(self):
category = Case(When(intnum__gte=8, then=F("intnum_null")), default="default")
sql = IntFields.all().annotate(category=category).values("intnum", "category").sql()
dialect = self.db.schema_generator.DIALECT
if dialect == "mysql":
expected_sql = "SELECT `intnum` `intnum`,CASE WHEN `intnum`>=8 THEN `intnum_null` ELSE 'default' END `category` FROM `intfields`"
else:
expected_sql = 'SELECT "intnum" "intnum",CASE WHEN "intnum">=8 THEN "intnum_null" ELSE \'default\' END "category" FROM "intfields"'
self.assertEqual(sql, expected_sql)
async def test_AE_then(self):
# AE: ArithmeticExpression
category = Case(When(intnum__gte=8, then=F("intnum") + 1), default="default")
sql = IntFields.all().annotate(category=category).values("intnum", "category").sql()
dialect = self.db.schema_generator.DIALECT
if dialect == "mysql":
expected_sql = "SELECT `intnum` `intnum`,CASE WHEN `intnum`>=8 THEN `intnum`+1 ELSE 'default' END `category` FROM `intfields`"
else:
expected_sql = 'SELECT "intnum" "intnum",CASE WHEN "intnum">=8 THEN "intnum"+1 ELSE \'default\' END "category" FROM "intfields"'
self.assertEqual(sql, expected_sql)
async def test_func_then(self):
category = Case(When(intnum__gte=8, then=Coalesce("intnum_null", 10)), default="default")
sql = IntFields.all().annotate(category=category).values("intnum", "category").sql()
dialect = self.db.schema_generator.DIALECT
if dialect == "mysql":
expected_sql = "SELECT `intnum` `intnum`,CASE WHEN `intnum`>=8 THEN COALESCE(`intnum_null`,10) ELSE 'default' END `category` FROM `intfields`"
else:
expected_sql = 'SELECT "intnum" "intnum",CASE WHEN "intnum">=8 THEN COALESCE("intnum_null",10) ELSE \'default\' END "category" FROM "intfields"'
self.assertEqual(sql, expected_sql)
async def test_F_default(self):
category = Case(When(intnum__gte=8, then="big"), default=F("intnum_null"))
sql = IntFields.all().annotate(category=category).values("intnum", "category").sql()
dialect = self.db.schema_generator.DIALECT
if dialect == "mysql":
expected_sql = "SELECT `intnum` `intnum`,CASE WHEN `intnum`>=8 THEN 'big' ELSE `intnum_null` END `category` FROM `intfields`"
else:
expected_sql = 'SELECT "intnum" "intnum",CASE WHEN "intnum">=8 THEN \'big\' ELSE "intnum_null" END "category" FROM "intfields"'
self.assertEqual(sql, expected_sql)
async def test_AE_default(self):
# AE: ArithmeticExpression
category = Case(When(intnum__gte=8, then=8), default=F("intnum") + 1)
sql = IntFields.all().annotate(category=category).values("intnum", "category").sql()
dialect = self.db.schema_generator.DIALECT
if dialect == "mysql":
expected_sql = "SELECT `intnum` `intnum`,CASE WHEN `intnum`>=8 THEN 8 ELSE `intnum`+1 END `category` FROM `intfields`"
else:
expected_sql = 'SELECT "intnum" "intnum",CASE WHEN "intnum">=8 THEN 8 ELSE "intnum"+1 END "category" FROM "intfields"'
self.assertEqual(sql, expected_sql)
async def test_func_default(self):
category = Case(When(intnum__gte=8, then=8), default=Coalesce("intnum_null", 10))
sql = IntFields.all().annotate(category=category).values("intnum", "category").sql()
dialect = self.db.schema_generator.DIALECT
if dialect == "mysql":
expected_sql = "SELECT `intnum` `intnum`,CASE WHEN `intnum`>=8 THEN 8 ELSE COALESCE(`intnum_null`,10) END `category` FROM `intfields`"
else:
expected_sql = 'SELECT "intnum" "intnum",CASE WHEN "intnum">=8 THEN 8 ELSE COALESCE("intnum_null",10) END "category" FROM "intfields"'
self.assertEqual(sql, expected_sql)
async def test_case_when_in_where(self):
category = Case(
When(intnum__gte=8, then="big"), When(intnum__lte=2, then="small"), default="middle"
)
sql = (
IntFields.all()
.annotate(category=category)
.filter(category__in=["big", "small"])
.values("intnum")
.sql()
)
dialect = self.db.schema_generator.DIALECT
if dialect == "mysql":
expected_sql = "SELECT `intnum` `intnum` FROM `intfields` WHERE CASE WHEN `intnum`>=8 THEN 'big' WHEN `intnum`<=2 THEN 'small' ELSE 'middle' END IN ('big','small')"
else:
expected_sql = "SELECT \"intnum\" \"intnum\" FROM \"intfields\" WHERE CASE WHEN \"intnum\">=8 THEN 'big' WHEN \"intnum\"<=2 THEN 'small' ELSE 'middle' END IN ('big','small')"
self.assertEqual(sql, expected_sql)
| 54.390977
| 186
| 0.641968
| 896
| 7,234
| 5.066964
| 0.079241
| 0.077093
| 0.089427
| 0.101322
| 0.896476
| 0.884802
| 0.870925
| 0.870925
| 0.870925
| 0.837004
| 0
| 0.01141
| 0.212469
| 7,234
| 132
| 187
| 54.80303
| 0.785501
| 0.006774
| 0
| 0.46789
| 0
| 0.119266
| 0.358257
| 0.014481
| 0
| 0
| 0
| 0
| 0.091743
| 1
| 0
| false
| 0
| 0.045872
| 0
| 0.055046
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4d8032c98304a1e066abdd9a808af2b54d8f5eef
| 1,180
|
py
|
Python
|
wechatpayv3/merchantrisk.py
|
MacGuffinLife/wechatpayv3
|
964abc59604fae10e68c9735b2af1a242772ab9d
|
[
"MIT"
] | null | null | null |
wechatpayv3/merchantrisk.py
|
MacGuffinLife/wechatpayv3
|
964abc59604fae10e68c9735b2af1a242772ab9d
|
[
"MIT"
] | null | null | null |
wechatpayv3/merchantrisk.py
|
MacGuffinLife/wechatpayv3
|
964abc59604fae10e68c9735b2af1a242772ab9d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .type import RequestType
def merchantrisk_callback_create(self, notify_url=None):
"""创建商户违规通知回调地址
:param notify_url: 通知地址,示例值:'https://www.weixin.qq.com/wxpay/pay.php'
"""
params = {}
if notify_url:
params.update({'notify_url': notify_url})
path = '/v3/merchant-risk-manage/violation-notifications'
return self._core.request(path, method=RequestType.POST, data=params)
def merchantrisk_callback_query(self):
"""查询商户违规通知回调地址
"""
path = '/v3/merchant-risk-manage/violation-notifications'
return self._core.request(path)
def merchantrisk_callback_update(self, notify_url=None):
"""修改商户违规通知回调地址
:param notify_url: 通知地址,示例值:'https://www.weixin.qq.com/wxpay/pay.php'
"""
params = {}
if notify_url:
params.update({'notify_url': notify_url})
path = '/v3/merchant-risk-manage/violation-notifications'
return self._core.request(path, method=RequestType.PUT, data=params)
def merchantrisk_callback_delete(self):
"""查询商户违规通知回调地址
"""
path = '/v3/merchant-risk-manage/violation-notifications'
return self._core.request(path, method=RequestType.DELETE)
| 29.5
| 73
| 0.7
| 144
| 1,180
| 5.583333
| 0.333333
| 0.11194
| 0.114428
| 0.089552
| 0.782338
| 0.700249
| 0.700249
| 0.700249
| 0.700249
| 0.700249
| 0
| 0.005025
| 0.15678
| 1,180
| 39
| 74
| 30.25641
| 0.803015
| 0.189831
| 0
| 0.526316
| 0
| 0
| 0.231189
| 0.209378
| 0
| 0
| 0
| 0
| 0
| 1
| 0.210526
| false
| 0
| 0.052632
| 0
| 0.473684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.