hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
a444dbdd7b9b2139a6a086ee04f80593283112d2
10,480
py
Python
laxy_backend/data/genomics/genomes.py
MonashBioinformaticsPlatform/laxy
fa9cfc3d9b2738ec0b9f471ddf4a4235cb6eb594
[ "Apache-2.0" ]
1
2020-11-19T15:10:42.000Z
2020-11-19T15:10:42.000Z
laxy_backend/data/genomics/genomes.py
MonashBioinformaticsPlatform/laxy
fa9cfc3d9b2738ec0b9f471ddf4a4235cb6eb594
[ "Apache-2.0" ]
177
2018-10-28T23:01:24.000Z
2022-02-26T06:35:29.000Z
laxy_backend/data/genomics/genomes.py
MonashBioinformaticsPlatform/laxy
fa9cfc3d9b2738ec0b9f471ddf4a4235cb6eb594
[ "Apache-2.0" ]
2
2019-03-14T10:06:19.000Z
2020-08-24T19:41:28.000Z
# This maps reference identifiers, sent via web API requests, to a relative path containing # the reference genome (iGenomes directory structure), like {id: path}. # TODO: This should be a default config somewhere, pipeline/plugin specific. # Each compute resource should be able to override this setting. # For Python backend, validation REFERENCE_GENOME_MAPPINGS = { # Using externally provided genome files instead of this # "Acinetobacter_baumannii/Custom/ATCC19606": "Acinetobacter_baumannii/Custom/ATCC19606", # "Acinetobacter_baumannii/Custom/AB307-0294": "Acinetobacter_baumannii/Custom/AB307-0294", "Aedes_aegypti/NCBI/GCF_002204515.2_AaegL5.0": "Aedes_aegypti/NCBI/GCF_002204515.2_AaegL5.0", "Aedes_aegypti/VectorBase/AaegL5.2": "Aedes_aegypti/VectorBase/AaegL5.2", "Arabidopsis_thaliana/Ensembl/TAIR10": "Arabidopsis_thaliana/Ensembl/TAIR10", "Arabidopsis_thaliana/Ensembl/TAIR9": "Arabidopsis_thaliana/Ensembl/TAIR9", "Arabidopsis_thaliana/NCBI/TAIR10": "Arabidopsis_thaliana/NCBI/TAIR10", "Arabidopsis_thaliana/NCBI/build9.1": "Arabidopsis_thaliana/NCBI/build9.1", "Bacillus_cereus_ATCC_10987/NCBI/2004-02-13": "Bacillus_cereus_ATCC_10987/NCBI/2004-02-13", "Bacillus_subtilis_168/Ensembl/EB2": "Bacillus_subtilis_168/Ensembl/EB2", "Bos_taurus/Ensembl/Btau_4.0": "Bos_taurus/Ensembl/Btau_4.0", "Bos_taurus/Ensembl/UMD3.1": "Bos_taurus/Ensembl/UMD3.1", "Bos_taurus/NCBI/Btau_4.2": "Bos_taurus/NCBI/Btau_4.2", "Bos_taurus/NCBI/Btau_4.6.1": "Bos_taurus/NCBI/Btau_4.6.1", "Bos_taurus/NCBI/UMD_3.1": "Bos_taurus/NCBI/UMD_3.1", "Bos_taurus/NCBI/UMD_3.1.1": "Bos_taurus/NCBI/UMD_3.1.1", "Bos_taurus/UCSC/bosTau4": "Bos_taurus/UCSC/bosTau4", "Bos_taurus/UCSC/bosTau6": "Bos_taurus/UCSC/bosTau6", "Bos_taurus/UCSC/bosTau7": "Bos_taurus/UCSC/bosTau7", "Bos_taurus/UCSC/bosTau8": "Bos_taurus/UCSC/bosTau8", "Caenorhabditis_elegans/Ensembl/WBcel215": "Caenorhabditis_elegans/Ensembl/WBcel215", "Caenorhabditis_elegans/Ensembl/WBcel235": "Caenorhabditis_elegans/Ensembl/WBcel235", "Caenorhabditis_elegans/Ensembl/WS210": "Caenorhabditis_elegans/Ensembl/WS210", "Caenorhabditis_elegans/Ensembl/WS220": "Caenorhabditis_elegans/Ensembl/WS220", "Caenorhabditis_elegans/NCBI/WS190": "Caenorhabditis_elegans/NCBI/WS190", "Caenorhabditis_elegans/NCBI/WS195": "Caenorhabditis_elegans/NCBI/WS195", "Caenorhabditis_elegans/UCSC/ce10": "Caenorhabditis_elegans/UCSC/ce10", "Caenorhabditis_elegans/UCSC/ce6": "Caenorhabditis_elegans/UCSC/ce6", "Canis_familiaris/Ensembl/BROADD2": "Canis_familiaris/Ensembl/BROADD2", "Canis_familiaris/Ensembl/CanFam3.1": "Canis_familiaris/Ensembl/CanFam3.1", "Canis_familiaris/NCBI/build2.1": "Canis_familiaris/NCBI/build2.1", "Canis_familiaris/NCBI/build3.1": "Canis_familiaris/NCBI/build3.1", "Canis_familiaris/UCSC/canFam2": "Canis_familiaris/UCSC/canFam2", "Canis_familiaris/UCSC/canFam3": "Canis_familiaris/UCSC/canFam3", "Chelonia_mydas/NCBI/CheMyd_1.0": "Chelonia_mydas/NCBI/CheMyd_1.0", "Danio_rerio/Ensembl/GRCz11.97-noalt": "Danio_rerio/Ensembl/GRCz11.97-noalt", # "Danio_rerio/Ensembl/GRCz11.97": "Danio_rerio/Ensembl/GRCz11.97", "Danio_rerio/Ensembl/GRCz10": "Danio_rerio/Ensembl/GRCz10", "Danio_rerio/Ensembl/Zv9": "Danio_rerio/Ensembl/Zv9", "Danio_rerio/NCBI/GRCz10": "Danio_rerio/NCBI/GRCz10", "Danio_rerio/NCBI/Zv9": "Danio_rerio/NCBI/Zv9", "Danio_rerio/UCSC/danRer10": "Danio_rerio/UCSC/danRer10", "Danio_rerio/UCSC/danRer7": "Danio_rerio/UCSC/danRer7", "Drosophila_melanogaster/Ensembl/BDGP5": "Drosophila_melanogaster/Ensembl/BDGP5", "Drosophila_melanogaster/Ensembl/BDGP5.25": "Drosophila_melanogaster/Ensembl/BDGP5.25", "Drosophila_melanogaster/Ensembl/BDGP6": "Drosophila_melanogaster/Ensembl/BDGP6", "Drosophila_melanogaster/NCBI/build4.1": "Drosophila_melanogaster/NCBI/build4.1", "Drosophila_melanogaster/NCBI/build5": "Drosophila_melanogaster/NCBI/build5", "Drosophila_melanogaster/NCBI/build5.3": "Drosophila_melanogaster/NCBI/build5.3", "Drosophila_melanogaster/NCBI/build5.41": "Drosophila_melanogaster/NCBI/build5.41", "Drosophila_melanogaster/UCSC/dm3": "Drosophila_melanogaster/UCSC/dm3", "Drosophila_melanogaster/UCSC/dm6": "Drosophila_melanogaster/UCSC/dm6", "Enterobacteriophage_lambda/NCBI/1993-04-28": "Enterobacteriophage_lambda/NCBI/1993-04-28", "Equus_caballus/Ensembl/EquCab2": "Equus_caballus/Ensembl/EquCab2", "Equus_caballus/NCBI/EquCab2.0": "Equus_caballus/NCBI/EquCab2.0", "Equus_caballus/UCSC/equCab2": "Equus_caballus/UCSC/equCab2", # Deprecated iGenomes versions in favor of Ensembl which has gff3 annotations # "Escherichia_coli_K_12_DH10B/Ensembl/EB1": "Escherichia_coli_K_12_DH10B/Ensembl/EB1", # "Escherichia_coli_K_12_DH10B/NCBI/2008-03-17": "Escherichia_coli_K_12_DH10B/NCBI/2008-03-17", # "Escherichia_coli_K_12_MG1655/NCBI/2001-10-15": "Escherichia_coli_K_12_MG1655/NCBI/2001-10-15", "Escherichia_coli/Ensembl/GCA_000019425.1__release-46": "Escherichia_coli/Ensembl/GCA_000019425.1__release-46", "Escherichia_coli/Ensembl/GCA_000005845.2__release-46": "Escherichia_coli/Ensembl/GCA_000005845.2__release-46", "Gallus_gallus/Ensembl/Galgal4": "Gallus_gallus/Ensembl/Galgal4", "Gallus_gallus/Ensembl/WASHUC2": "Gallus_gallus/Ensembl/WASHUC2", "Gallus_gallus/NCBI/build2.1": "Gallus_gallus/NCBI/build2.1", "Gallus_gallus/NCBI/build3.1": "Gallus_gallus/NCBI/build3.1", "Gallus_gallus/UCSC/galGal3": "Gallus_gallus/UCSC/galGal3", "Gallus_gallus/UCSC/galGal4": "Gallus_gallus/UCSC/galGal4", "Glycine_max/Ensembl/Gm01": "Glycine_max/Ensembl/Gm01", "Homo_sapiens/Ensembl/GRCh38": "Homo_sapiens/Ensembl/GRCh38", "Homo_sapiens/Ensembl/GRCh37": "Homo_sapiens/Ensembl/GRCh37", "Homo_sapiens/NCBI/GRCh38": "Homo_sapiens/NCBI/GRCh38", "Homo_sapiens/NCBI/GRCh38Decoy": "Homo_sapiens/NCBI/GRCh38Decoy", "Homo_sapiens/NCBI/build36.3": "Homo_sapiens/NCBI/build36.3", "Homo_sapiens/NCBI/build37.1": "Homo_sapiens/NCBI/build37.1", "Homo_sapiens/NCBI/build37.2": "Homo_sapiens/NCBI/build37.2", "Homo_sapiens/UCSC/hg18": "Homo_sapiens/UCSC/hg18", "Homo_sapiens/UCSC/hg19": "Homo_sapiens/UCSC/hg19", "Homo_sapiens/UCSC/hg38": "Homo_sapiens/UCSC/hg38", "Macaca_mulatta/Ensembl/Mmul_1": "Macaca_mulatta/Ensembl/Mmul_1", "Mus_musculus/Ensembl/GRCm38": "Mus_musculus/Ensembl/GRCm38", "Mus_musculus/Ensembl/NCBIM37": "Mus_musculus/Ensembl/NCBIM37", "Mus_musculus/NCBI/GRCm38": "Mus_musculus/NCBI/GRCm38", "Mus_musculus/NCBI/build37.1": "Mus_musculus/NCBI/build37.1", "Mus_musculus/NCBI/build37.2": "Mus_musculus/NCBI/build37.2", "Mus_musculus/UCSC/mm10": "Mus_musculus/UCSC/mm10", "Mus_musculus/UCSC/mm9": "Mus_musculus/UCSC/mm9", "Mycobacterium_tuberculosis_H37RV/Ensembl/H37Rv.EB1": "Mycobacterium_tuberculosis_H37RV/Ensembl/H37Rv.EB1", "Mycobacterium_tuberculosis_H37RV/NCBI/2001-09-07": "Mycobacterium_tuberculosis_H37RV/NCBI/2001-09-07", "Oryza_sativa_japonica/Ensembl/IRGSP-1.0": "Oryza_sativa_japonica/Ensembl/IRGSP-1.0", "Oryza_sativa_japonica/Ensembl/MSU6": "Oryza_sativa_japonica/Ensembl/MSU6", "Pan_troglodytes/Ensembl/CHIMP2.1": "Pan_troglodytes/Ensembl/CHIMP2.1", "Pan_troglodytes/Ensembl/CHIMP2.1.4": "Pan_troglodytes/Ensembl/CHIMP2.1.4", "Pan_troglodytes/NCBI/build2.1": "Pan_troglodytes/NCBI/build2.1", "Pan_troglodytes/NCBI/build3.1": "Pan_troglodytes/NCBI/build3.1", "Pan_troglodytes/UCSC/panTro2": "Pan_troglodytes/UCSC/panTro2", "Pan_troglodytes/UCSC/panTro3": "Pan_troglodytes/UCSC/panTro3", "Pan_troglodytes/UCSC/panTro4": "Pan_troglodytes/UCSC/panTro4", "PhiX/Illumina/RTA": "PhiX/Illumina/RTA", "PhiX/NCBI/1993-04-28": "PhiX/NCBI/1993-04-28", "Plasmodium_falciparum/PlasmoDB/3D7-release-39": "Plasmodium_falciparum/PlasmoDB/3D7-release-39", "Pseudomonas_aeruginosa_PAO1/NCBI/2000-09-13": "Pseudomonas_aeruginosa_PAO1/NCBI/2000-09-13", "Rattus_norvegicus/Ensembl/RGSC3.4": "Rattus_norvegicus/Ensembl/RGSC3.4", "Rattus_norvegicus/Ensembl/Rnor_5.0": "Rattus_norvegicus/Ensembl/Rnor_5.0", "Rattus_norvegicus/Ensembl/Rnor_6.0": "Rattus_norvegicus/Ensembl/Rnor_6.0", "Rattus_norvegicus/NCBI/RGSC_v3.4": "Rattus_norvegicus/NCBI/RGSC_v3.4", "Rattus_norvegicus/NCBI/Rnor_5.0": "Rattus_norvegicus/NCBI/Rnor_5.0", "Rattus_norvegicus/NCBI/Rnor_6.0": "Rattus_norvegicus/NCBI/Rnor_6.0", "Rattus_norvegicus/UCSC/rn4": "Rattus_norvegicus/UCSC/rn4", "Rattus_norvegicus/UCSC/rn5": "Rattus_norvegicus/UCSC/rn5", "Rattus_norvegicus/UCSC/rn6": "Rattus_norvegicus/UCSC/rn6", "Rhodobacter_sphaeroides_2.4.1/NCBI/2005-10-07": "Rhodobacter_sphaeroides_2.4.1/NCBI/2005-10-07", "Saccharomyces_cerevisiae/Ensembl/EF2": "Saccharomyces_cerevisiae/Ensembl/EF2", "Saccharomyces_cerevisiae/Ensembl/EF3": "Saccharomyces_cerevisiae/Ensembl/EF3", "Saccharomyces_cerevisiae/Ensembl/EF4": "Saccharomyces_cerevisiae/Ensembl/EF4", "Saccharomyces_cerevisiae/Ensembl/R64-1-1": "Saccharomyces_cerevisiae/Ensembl/R64-1-1", "Saccharomyces_cerevisiae/NCBI/build2.1": "Saccharomyces_cerevisiae/NCBI/build2.1", "Saccharomyces_cerevisiae/NCBI/build3.1": "Saccharomyces_cerevisiae/NCBI/build3.1", "Saccharomyces_cerevisiae/UCSC/sacCer2": "Saccharomyces_cerevisiae/UCSC/sacCer2", "Saccharomyces_cerevisiae/UCSC/sacCer3": "Saccharomyces_cerevisiae/UCSC/sacCer3", "Schizosaccharomyces_pombe/Ensembl/EF1": "Schizosaccharomyces_pombe/Ensembl/EF1", "Schizosaccharomyces_pombe/Ensembl/EF2": "Schizosaccharomyces_pombe/Ensembl/EF2", "Sorangium_cellulosum_So_ce_56/NCBI/2007-11-27": "Sorangium_cellulosum_So_ce_56/NCBI/2007-11-27", "Sorghum_bicolor/Ensembl/Sbi1": "Sorghum_bicolor/Ensembl/Sbi1", "Staphylococcus_aureus_NCTC_8325/NCBI/2006-02-13": "Staphylococcus_aureus_NCTC_8325/NCBI/2006-02-13", "Sus_scrofa/Ensembl/Sscrofa10.2": "Sus_scrofa/Ensembl/Sscrofa10.2", "Sus_scrofa/Ensembl/Sscrofa9": "Sus_scrofa/Ensembl/Sscrofa9", "Sus_scrofa/NCBI/Sscrofa10": "Sus_scrofa/NCBI/Sscrofa10", "Sus_scrofa/NCBI/Sscrofa10.2": "Sus_scrofa/NCBI/Sscrofa10.2", "Sus_scrofa/NCBI/Sscrofa9.2": "Sus_scrofa/NCBI/Sscrofa9.2", "Sus_scrofa/UCSC/susScr2": "Sus_scrofa/UCSC/susScr2", "Sus_scrofa/UCSC/susScr3": "Sus_scrofa/UCSC/susScr3", "Zea_mays/Ensembl/AGPv2": "Zea_mays/Ensembl/AGPv2", "Zea_mays/Ensembl/AGPv3": "Zea_mays/Ensembl/AGPv3", }
73.286713
115
0.769275
1,374
10,480
5.612809
0.195779
0.02334
0.01945
0.010892
0.874092
0.83584
0.761151
0.408843
0.25778
0.136281
0
0.078978
0.085401
10,480
142
116
73.802817
0.725613
0.093989
0
0
0
0
0.831136
0.819112
0
0
0
0.007042
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
a47864ac762ecd700da43a5be124c58386358019
103
py
Python
WeatherPy.ipynb/api_keys.py
nehuffman13/python-api-challenge
8e106348def073cb1e1261fbd5cd94745ee75054
[ "ADSL" ]
null
null
null
WeatherPy.ipynb/api_keys.py
nehuffman13/python-api-challenge
8e106348def073cb1e1261fbd5cd94745ee75054
[ "ADSL" ]
null
null
null
WeatherPy.ipynb/api_keys.py
nehuffman13/python-api-challenge
8e106348def073cb1e1261fbd5cd94745ee75054
[ "ADSL" ]
null
null
null
# OpenWeatherMap API Key weather_api_key = "insert api key" # Google API Key g_key = "insert api key"
17.166667
34
0.737864
17
103
4.294118
0.411765
0.410959
0.328767
0.410959
0
0
0
0
0
0
0
0
0.184466
103
5
35
20.6
0.869048
0.359223
0
0
0
0
0.444444
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
f107faf9c816c62c3d41450a20f8cd7dc7434bf7
757
py
Python
rmp/rmptest.py
CrazyWcY/HandyDelivery-Flask
35951ad0fc4ccd18fbb01d58af85a9981c939ec9
[ "MIT" ]
null
null
null
rmp/rmptest.py
CrazyWcY/HandyDelivery-Flask
35951ad0fc4ccd18fbb01d58af85a9981c939ec9
[ "MIT" ]
null
null
null
rmp/rmptest.py
CrazyWcY/HandyDelivery-Flask
35951ad0fc4ccd18fbb01d58af85a9981c939ec9
[ "MIT" ]
null
null
null
import requests, json # get url = 'http://202.120.40.87:14642/rmp-resource-service/project/5fe7edf32ef44e00153874ff/resource/book/' r = requests.get(url=url) print(r.text) # post url = 'http://202.120.40.87:14642/rmp-resource-service/project/5fe7edf32ef44e00153874ff/resource/book/' headers = { 'Content-Type': 'application/json', 'passwd': 'lxr123456', } data = {'ID':1, 'name':'test'} r = requests.post(url=url, headers=headers, data=json.dumps(data)) print(r.text) # put url = 'http://202.120.40.87:14642/rmp-resource-service/project/5fe7edf32ef44e00153874ff/resource/book/' headers = { 'Content-Type': 'application/json', } data = {'ID':2, 'name':'test'} r = requests.put(url=url+'1', headers=headers, data=json.dumps(data)) print(r.text)
30.28
103
0.708058
107
757
5.009346
0.327103
0.039179
0.05597
0.072761
0.740672
0.740672
0.740672
0.740672
0.740672
0.587687
0
0.139738
0.09247
757
25
104
30.28
0.640466
0.015852
0
0.526316
0
0.157895
0.508086
0
0
0
0
0
0
1
0
false
0.052632
0.052632
0
0.052632
0.157895
0
0
0
null
0
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
5
f18038a4fbe1a241bc301b17359c5231b1583353
131
py
Python
twilioquest/python/codepath/salutation.py
greysondn/gamesolutions
6cb365d24874cc8957f2b92ab448efc062916492
[ "MIT" ]
null
null
null
twilioquest/python/codepath/salutation.py
greysondn/gamesolutions
6cb365d24874cc8957f2b92ab448efc062916492
[ "MIT" ]
null
null
null
twilioquest/python/codepath/salutation.py
greysondn/gamesolutions
6cb365d24874cc8957f2b92ab448efc062916492
[ "MIT" ]
null
null
null
# TwilioQuest version 3.1.26 # Works in: # 3.1.26 # Your first line of Python code is below! print("For the glory of Python!")
18.714286
42
0.679389
24
131
3.708333
0.791667
0.044944
0.089888
0
0
0
0
0
0
0
0
0.076923
0.206107
131
6
43
21.833333
0.778846
0.664122
0
0
0
0
0.615385
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
f18cc5908077da6a2fbeac9045ca2d2816d8c340
1,820
py
Python
tests/test_pwl/test_library/test_tube.py
HySynth/HySynth
b33b34ff5db138b5d007d0dcd32d53c9f8964b62
[ "MIT" ]
4
2021-03-05T11:18:35.000Z
2021-12-09T18:51:32.000Z
tests/test_pwl/test_library/test_tube.py
HySynth/HySynth
b33b34ff5db138b5d007d0dcd32d53c9f8964b62
[ "MIT" ]
null
null
null
tests/test_pwl/test_library/test_tube.py
HySynth/HySynth
b33b34ff5db138b5d007d0dcd32d53c9f8964b62
[ "MIT" ]
null
null
null
import pytest import ppl from numpy.testing import assert_allclose from hysynth.pwl.library import tube def test_tube(): t = ppl.Variable(0) x1 = ppl.Variable(1) x2 = ppl.Variable(2) # 1D zigzag function f = [[0., 0.], [1., 1.], [2., 0.], [3., 1.]] actual_tube = tube(f, delta=0.1) et1 = ppl.NNC_Polyhedron(2, 'universe') et1.add_constraint(-t >= -1) et1.add_constraint(1*t >= 0) et1.add_constraint(-10*t + 10*x1 >= -1) et1.add_constraint(10*t - 10*x1 >= -1) et2 = ppl.NNC_Polyhedron(2, 'universe') et2.add_constraint(-t >= -2) et2.add_constraint(1*t >= 1) et2.add_constraint(-10*t - 10*x1 >= -21) et2.add_constraint(10*t + 10*x1 >= 19) et3 = ppl.NNC_Polyhedron(2, 'universe') et3.add_constraint(-t >= -3) et3.add_constraint(1*t >= 2) et3.add_constraint(-10*t + 10*x1 >= -21) et3.add_constraint(10*t - 10*x1 >= 19) expected_tube = [et1, et2, et3] for i in range(len(f)-1): assert actual_tube[i] == expected_tube[i] # 2D zigzag function f = [[0., 0., 1.], [1., 1., 0.], [2., 0., 1.]] actual_tube = tube(f, delta=0.1) et1 = ppl.NNC_Polyhedron(3, 'universe') et1.add_constraint(-t >= -1) et1.add_constraint(1*t >= 0) et1.add_constraint(-10*t + 10*x1 >= -1) et1.add_constraint(10*t - 10*x1 >= -1) et1.add_constraint(10*t + 10*x2 >= 9) et1.add_constraint(-10*t - 10*x2 >= -11) et2 = ppl.NNC_Polyhedron(3, 'universe') et2.add_constraint(-t >= -2) et2.add_constraint(1*t >= 1) et2.add_constraint(-10*t - 10*x1 >= -21) et2.add_constraint(10*t + 10*x1 >= 19) et2.add_constraint(10*t - 10*x2 >= 9) et2.add_constraint(-10*t + 10*x2 >= -11) expected_tube = [et1, et2] for i in range(len(f)-1): assert actual_tube[i] == expected_tube[i]
29.354839
50
0.59011
304
1,820
3.404605
0.157895
0.301449
0.202899
0.216425
0.771981
0.713043
0.713043
0.553623
0.553623
0.553623
0
0.124031
0.22033
1,820
61
51
29.836066
0.605356
0.02033
0
0.468085
0
0
0.022472
0
0
0
0
0
0.06383
1
0.021277
false
0
0.085106
0
0.106383
0
0
0
0
null
1
1
1
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
74ce00038d59e6da7f26390a7f2e7bbe43eb49a3
59
py
Python
pydens/evaluation/__init__.py
zkurtz/pydens
0a38020daa745621e47602b4f2583b76d60b6591
[ "MIT" ]
6
2019-05-06T15:05:20.000Z
2021-06-29T07:20:35.000Z
pydens/evaluation/__init__.py
zkurtz/pydens
0a38020daa745621e47602b4f2583b76d60b6591
[ "MIT" ]
1
2019-04-23T18:39:28.000Z
2019-05-05T14:38:58.000Z
pydens/evaluation/__init__.py
zkurtz/pydens
0a38020daa745621e47602b4f2583b76d60b6591
[ "MIT" ]
3
2019-06-23T22:05:05.000Z
2022-02-01T13:34:49.000Z
from .evaluate import Evaluation from .binary import Binary
29.5
32
0.847458
8
59
6.25
0.625
0
0
0
0
0
0
0
0
0
0
0
0.118644
59
2
33
29.5
0.961538
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
74e5e61454b790d57b68d192c671912d11e23b61
2,367
py
Python
REDSI_1160929_1161573/boost_1_67_0/libs/metaparse/tools/benchmark/chars.py
Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo
eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8
[ "MIT" ]
32
2019-02-27T06:57:07.000Z
2021-08-29T10:56:19.000Z
REDSI_1160929_1161573/boost_1_67_0/libs/metaparse/tools/benchmark/chars.py
Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo
eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8
[ "MIT" ]
1
2019-03-04T11:21:00.000Z
2019-05-24T01:36:31.000Z
REDSI_1160929_1161573/boost_1_67_0/libs/metaparse/tools/benchmark/chars.py
Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo
eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8
[ "MIT" ]
5
2019-08-20T13:45:04.000Z
2022-03-01T18:23:49.000Z
CHARS={' ': 22284371, '\xa3': 2, '$': 4917, '\xa7': 3, '(': 898226, '\xab': 2, ',': 2398845, '\xaf': 2, '0': 624709, '\xb3': 5, '4': 402093, '\xb7': 2, '8': 274327, '\xbb': 2, '<': 906955, '\xbf': 2, '@': 16983, '\xc3': 13, 'D': 291316, '\xc7': 2, 'H': 146671, '\xcb': 2, 'L': 404004, '\xcf': 2, 'P': 717827, '\xd3': 2, 'T': 1426865, '\xd7': 2, 'X': 80953, '\xdb': 2, '\\': 80171, '\xdf': 5, '`': 12213, '\xe3': 2, 'd': 1713185, '\xe7': 2, 'h': 787023, '\xeb': 2, 'l': 2141123, '\xef': 2, 'p': 3018561, '\xf3': 2, 't': 5917113, '\xf7': 2, 'x': 383286, '\xfb': 2, '|': 18625, '\xff': 2, '\x80': 20, '\x9c': 10, '#': 242175, '\xa4': 2, "'": 24359, '\xa8': 2, '+': 62328, '\xac': 2, '/': 1496052, '\xb0': 2, '3': 522407, '\xb4': 2, '7': 281951, '\xb8': 2, ';': 938670, '\xbc': 2, '?': 6554, '\xc0': 2, 'C': 430333, '\xc4': 2, 'G': 143243, '\xc8': 2, 'K': 90732, '\xcc': 2, 'O': 875785, '\xd0': 2, 'S': 702347, '\xd4': 2, 'W': 52216, '\xd8': 2, '[': 66305, '\xdc': 2, '_': 2992229, '\xe0': 2, 'c': 2083806, '\xe4': 2, 'g': 684087, '\xe8': 2, 'k': 165087, '\xec': 2, 'o': 3158786, '\xf0': 2, 's': 2967238, '\xf4': 2, 'w': 247018, '\xf8': 3, '{': 243686, '\xfc': 2, '\n': 2276992, '\x9d': 10, '\xa1': 2, '"': 50327, '\xa5': 2, '&': 418128, '\xa9': 4, '*': 332039, '\xad': 5, '.': 391026, '\xb1': 5, '2': 823421, '\xb5': 2, '6': 322046, '\xb9': 2, ':': 2683679, '\xbd': 2, '>': 915244, '\xc1': 2, 'B': 412447, '\xc5': 2, 'F': 174215, '\xc9': 2, 'J': 11028, '\xcd': 2, 'N': 431761, '\xd1': 2, 'R': 370532, '\xd5': 2, 'V': 120889, '\xd9': 2, 'Z': 14849, '\xdd': 2, '^': 1667, '\xe1': 2, 'b': 645436, '\xe5': 2, 'f': 1305489, '\xe9': 30, 'j': 31303, '\xed': 3, 'n': 3384988, '\xf1': 2, 'r': 2870950, '\xf5': 2, 'v': 519257, '\xf9': 2, 'z': 96213, '\xfd': 2, '~': 13463, '\t': 2920, '\r': 2276968, '!': 72758, '\xa2': 2, '%': 7081, '\xa6': 2, ')': 899122, '\xaa': 2, '-': 325139, '\xae': 2, '1': 1292007, '\xb2': 2, '5': 326024, '\xb6': 2, '9': 258472, '\xba': 4, '=': 626629, '\xbe': 2, 'A': 1040447, '\xc2': 2, 'E': 657368, '\xc6': 2, 'I': 569518, '\xca': 2, 'M': 211683, '\xce': 2, 'Q': 21541, '\xd2': 2, 'U': 218558, '\xd6': 2, 'Y': 64741, '\xda': 2, ']': 65379, '\xde': 2, 'a': 4007230, '\xe2': 22, 'e': 7280723, '\xe6': 2, 'i': 2971166, '\xea': 2, 'm': 1989243, '\xee': 2, 'q': 63623, '\xf2': 2, 'u': 1297465, '\xf6': 30, 'y': 1819692, '\xfa': 2, '}': 242894, '\xfe': 2}
1,183.5
2,366
0.445289
360
2,367
2.925
0.661111
0.003799
0
0
0
0
0
0
0
0
0
0.386214
0.166455
2,367
1
2,367
2,367
0.147491
0
0
0
0
0
0.208791
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
1
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
74f0a82fb60962436afbbf8704c6108e5b689a04
32,717
py
Python
test/test_py_seqcomparer.py
davidhwyllie/findNeighbour4
d42e10711e59e93ebf0e798fbb1598929f662c9c
[ "MIT" ]
null
null
null
test/test_py_seqcomparer.py
davidhwyllie/findNeighbour4
d42e10711e59e93ebf0e798fbb1598929f662c9c
[ "MIT" ]
14
2021-11-26T14:43:25.000Z
2022-03-22T00:39:17.000Z
test/test_py_seqcomparer.py
davidhwyllie/findNeighbour4
d42e10711e59e93ebf0e798fbb1598929f662c9c
[ "MIT" ]
null
null
null
""" tests py_seqComparer.py A component of the findNeighbour4 system for bacterial relatedness monitoring Copyright (C) 2021 David Wyllie david.wyllie@phe.gov.uk repo: https://github.com/davidhwyllie/findNeighbour4 This program is free software: you can redistribute it and/or modify it under the terms of the MIT License as published by the Free Software Foundation. See <https://opensource.org/licenses/MIT>, and the LICENSE file. """ import unittest import json from Bio import SeqIO from findn.py_seqComparer import py_seqComparer class test_py_seqComparer_51(unittest.TestCase): """tests mcompare""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=10) n = 0 originals = [ "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", ] guids = [] for original in originals: n += 1 c = sc.compress(original) guid = "{0}-{1}".format(original, n) guids.append(guid) sc.persist(c, guid=guid) res = sc.mcompare(guids[0]) # defaults to sample size 30 self.assertEqual(len(res), len(originals) - 1) class test_py_seqComparer_ec(unittest.TestCase): """tests exact comparison""" def runTest(self): # generate compressed sequences refSeq = "G" * 30000 sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=1000) obj1 = json.loads( """{ "A": [], "C": [], "G": [], "invalid": 0, "M": [], "N": [], "T": [], "U": [] }""" ) obj2 = json.loads( """{ "A": [], "C": [], "G": [ 23402 ], "invalid": 0, "M": {}, "N": [ 385, 386, 387, 388, 389, 390, 391, 392, 393, 394 ], "T": [ 28931, 203, 29644, 6285, 21613, 240, 19184, 10448, 22226, 27768 ], "U": [ 385, 386, 387, 388, 389, 390, 391, 392, 393, 394 ] }""" ) sc.persist(obj1, "guid1") sc.persist(obj2, "guid2") dist = sc.compare("guid1", "guid2") self.assertEqual(dist, 11) class test_py_seqComparer_49(unittest.TestCase): """tests reporting on stored contents""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=10) # need > 30 sequences originals = [ "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", ] guid_names = [] n = 0 for original in originals: n += 1 c = sc.compress(original) this_guid = "{0}-{1}".format(original, n) sc.persist(c, guid=this_guid) guid_names.append(this_guid) res = sc.summarise_stored_items() self.assertTrue(isinstance(res, dict)) self.assertEqual(set(res.keys()), set(["server|scstat|nSeqs"])) class test_py_seqComparer_48(unittest.TestCase): """tests computations of p values from exact bionomial test""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=10) # need > 30 sequences originals = [ "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", ] guid_names = [] n = 0 for original in originals: n += 1 c = sc.compress(original) this_guid = "{0}-{1}".format(original, n) sc.persist(c, guid=this_guid) guid_names.append(this_guid) class test_py_seqComparer_46a(unittest.TestCase): """tests estimate_expected_unk, a function estimating the number of Ns in sequences by sampling""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=10) n = 0 originals = [ "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", ] guids = [] for original in originals: n += 1 c = sc.compress(original) guid = "{0}-{1}".format(original, n) guids.append(guid) sc.persist(c, guid=guid) res = sc.estimate_expected_unk() # defaults to sample size 30 self.assertEqual(res, None) # analyse the last two res = sc.estimate_expected_unk(sample_size=2, exclude_guids=guids[0:5]) self.assertEqual(res, 1.5) # analyse the first two res = sc.estimate_expected_unk(sample_size=2, exclude_guids=guids[2:7]) self.assertEqual(res, 1) class test_py_seqComparer_46b(unittest.TestCase): """tests estimate_expected_unk, a function estimating the number of Ns in sequences by sampling""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" sc = py_seqComparer(maxNs=3, reference=refSeq, snpCeiling=10) n = 0 originals = [ "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTGGN", ] guids = [] for original in originals: n += 1 c = sc.compress(original) guid = "{0}-{1}".format(original, n) guids.append(guid) sc.persist(c, guid=guid) res = sc.estimate_expected_unk() # defaults to sample size 30 self.assertEqual(res, None) # analyse them all res = sc.estimate_expected_unk(sample_size=7, exclude_guids=[]) self.assertEqual(res, 1) # analyse them all res = sc.estimate_expected_unk(sample_size=6, exclude_guids=[]) self.assertEqual(res, 1) class test_py_seqComparer_46c(unittest.TestCase): """tests estimate_expected_unk_sites, a function estimating the number of Ns in sequences by sampling""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=10) n = 0 originals = [ "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", ] guids = [] for original in originals: n += 1 c = sc.compress(original) guid = "{0}-{1}".format(original, n) guids.append(guid) sc.persist(c, guid=guid) # analyse nothing res = sc.estimate_expected_unk_sites( sample_size=2, sites=set([]), exclude_guids=guids[0:5] ) self.assertEqual(res, 0) # analyse the last two res = sc.estimate_expected_unk_sites( sample_size=2, sites=set([0, 1, 2, 3, 4, 5]), exclude_guids=guids[0:5] ) self.assertEqual(res, 1.5) # analyse the first two res = sc.estimate_expected_unk_sites( sample_size=2, sites=set([0, 1, 2, 3, 4, 5]), exclude_guids=guids[2:7] ) self.assertEqual(res, 1) class test_py_seqComparer_45a(unittest.TestCase): """tests the generation of multiple alignments of variant sites.""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=10) originals = [ "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", ] guid_names = [] n = 0 for original in originals: n += 1 c = sc.compress(original) this_guid = "{0}-{1}".format(original, n) sc.persist(c, guid=this_guid) guid_names.append(this_guid) res = sc.multi_sequence_alignment(guid_names) self.assertEqual(len(res.valid_guids), 7) self.assertEqual(res.variant_positions, [0, 1, 2, 3]) class test_py_seqComparer_45b(unittest.TestCase): """tests the generation of multiple alignments of variant sites.""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" sc = py_seqComparer(maxNs=6, reference=refSeq, snpCeiling=10) originals = [ "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTGGN", ] guid_names = [] n = 0 for original in originals: n += 1 c = sc.compress(original) this_guid = "{0}-{1}".format(original, n) sc.persist(c, guid=this_guid) guid_names.append(this_guid) res = sc.multi_sequence_alignment(guid_names) self.assertEqual(len(res.valid_guids), 7) self.assertEqual(res.variant_positions, [0, 1, 2, 3]) class test_py_seqComparer_1(unittest.TestCase): def runTest(self): refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) self.assertEqual(sc.reference, refSeq) class test_py_seqComparer_2(unittest.TestCase): def runTest(self): refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) with self.assertRaises(TypeError): retVal = sc.compress(sequence="AC") self.assertTrue(retVal is not None) class test_py_seqComparer_3(unittest.TestCase): def runTest(self): refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) retVal = sc.compress(sequence="ACTG") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([]), "N": set([]), "U": set([]), "M": {}, "invalid": 0, }, ) class test_py_seqComparer_3b(unittest.TestCase): def runTest(self): refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) retVal = sc.compress(sequence="ACTQ") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([]), "N": set([]), "U": set([3]), "M": {3: "Q"}, "invalid": 0, }, ) class test_py_seqComparer_3c(unittest.TestCase): def runTest(self): refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) retVal = sc.compress(sequence="NYTQ") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([]), "N": set([0]), "U": set([0, 1, 3]), "M": {1: "Y", 3: "Q"}, "invalid": 0, }, ) class test_py_seqComparer_4(unittest.TestCase): def runTest(self): refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) retVal = sc.compress(sequence="ACTN") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([]), "N": set([3]), "M": {}, "U": set([3]), "invalid": 0, }, ) class test_py_seqComparer_5(unittest.TestCase): def runTest(self): refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) retVal = sc.compress(sequence="ACT-") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([]), "N": set([3]), "M": {}, "U": set([3]), "invalid": 0, }, ) class test_py_seqComparer_6(unittest.TestCase): def runTest(self): refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) retVal = sc.compress(sequence="TCT-") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([0]), "N": set([3]), "M": {}, "U": set([3]), "invalid": 0, }, ) class test_py_seqComparer_7(unittest.TestCase): def runTest(self): refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) retVal = sc.compress(sequence="ATT-") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([1]), "N": set([3]), "M": {}, "U": set([3]), "invalid": 0, }, ) class test_py_seqComparer_6b(unittest.TestCase): def runTest(self): refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) originals = [ "AAAA", "CCCC", "TTTT", "GGGG", "NNNN", "ACTG", "ACTC", "TCTN", "NYTQ", "QRST", ] for original in originals: compressed_sequence = sc.compress(sequence=original) roundtrip = sc.uncompress(compressed_sequence) self.assertEqual(original, roundtrip) class test_py_seqComparer_6c(unittest.TestCase): def runTest(self): refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) originals = ["NNNN"] for original in originals: compressed_sequence = sc.compress(sequence=original) roundtrip = sc.uncompress(compressed_sequence) self.assertEqual(original, roundtrip) class test_py_seqComparer_6d(unittest.TestCase): def runTest(self): refSeq = "ACTG" sc = py_seqComparer(maxNs=3, snpCeiling=20, reference=refSeq) originals = ["NNNN"] for original in originals: compressed_sequence = sc.compress(sequence=original) with self.assertRaises(ValueError): sc.uncompress(compressed_sequence) class test_py_seqComparer_16(unittest.TestCase): """tests the comparison of two sequences where both differ from the reference.""" def runTest(self): # generate compressed sequences refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=10) seq1 = sc.compress("AAAA") seq2 = sc.compress("CCCC") self.assertEqual(sc.countDifferences(seq1, seq2), 4) class test_py_seqComparer_16b(unittest.TestCase): """tests the comparison of two sequences where both differ from the reference.""" def runTest(self): # generate compressed sequences refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=10) seq1 = sc.compress("AAAA") seq2 = sc.compress("RRCC") self.assertEqual(sc.countDifferences(seq1, seq2), 2) class test_py_seqComparer_16c(unittest.TestCase): """tests the comparison of two sequences where both differ from the reference.""" def runTest(self): # generate compressed sequences refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=10) seq1 = sc.compress("AAAA") seq2 = sc.compress("RRNN") self.assertEqual(sc.countDifferences(seq1, seq2), 0) class test_py_seqComparer_17(unittest.TestCase): """tests the comparison of two sequences where one is invalid""" def runTest(self): # generate compressed sequences refSeq = "ACTG" sc = py_seqComparer(maxNs=3, reference=refSeq, snpCeiling=10) seq1 = sc.compress("AAAA") seq2 = sc.compress("NNNN") self.assertEqual(sc.countDifferences(seq1, seq2), None) class test_py_seqComparer_cmp(unittest.TestCase): """tests the comparison of two sequences where both differ from the reference.""" def runTest(self): # generate compressed sequences refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=10) seq1 = sc.compress("AAAA") seq2 = sc.compress("CCCC") sc.persist(seq1, "s1") sc.persist(seq2, "s2") self.assertEqual(sc.compare("s1", "s2"), 4) with self.assertRaises(KeyError): sc.compare("s1", "not_there") class test_py_seqComparer_saveload3(unittest.TestCase): def runTest(self): refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) compressedObj = sc.compress(sequence="ACTT") sc.persist(compressedObj, "one") retVal = sc.load(guid="one") self.assertEqual(compressedObj, retVal) class test_py_seqComparer_save_remove(unittest.TestCase): def runTest(self): refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) compressedObj = sc.compress(sequence="ACTT") sc.persist(compressedObj, "one") retVal = sc.iscachedinram(guid="one") self.assertEqual(True, retVal) sc.remove("one") retVal = sc.iscachedinram(guid="one") self.assertEqual(False, retVal) class test_py_seqComparer_24(unittest.TestCase): """tests N compression""" def runTest(self): refSeq = "ACTGTTAATTTTTTTTTGGGGGGGGGGGGAA" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) retVal = sc.compress(sequence="ACTGTTAANNNNNNNNTGGGGGGGGGGGGAA") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([]), "M": {}, "N": set([8, 9, 10, 11, 12, 13, 14, 15]), "U": set([8, 9, 10, 11, 12, 13, 14, 15]), "invalid": 0, }, ) retVal = sc.compress(sequence="NNTGTTAANNNNNNNNTGGGGGGGGGGGGAA") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([]), "M": {}, "N": set([0, 1, 8, 9, 10, 11, 12, 13, 14, 15]), "U": set([0, 1, 8, 9, 10, 11, 12, 13, 14, 15]), "invalid": 0, }, ) class test_py_seqComparer_29(unittest.TestCase): """tests _setStats""" def runTest(self): refSeq = "ACTGTTAATTTTTTTTTGGGGGGGGGGGGAA" sc = py_seqComparer(maxNs=1e8, snpCeiling=20, reference=refSeq) compressedObj1 = sc.compress(sequence="GGGGTTAANNNNNNNNNGGGGGAAAAGGGAA") compressedObj2 = sc.compress(sequence="ACTGTTAATTTTTTTTTNNNNNNNNNNNNNN") (n1, n2, nall, rv1, rv2, retVal) = sc._setStats( compressedObj1["N"], compressedObj2["N"] ) self.assertEqual( retVal, set( [ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, ] ), ) compressedObj1 = sc.compress(sequence="GGGGTTAANNNNNNNNTGGGGGAAAAGGGAA") compressedObj2 = sc.compress(sequence="ACTGTTAATTTTTTTTTNNNNNNNNNNNNNN") (n1, n2, nall, rv1, rv2, retVal) = sc._setStats( compressedObj1["N"], compressedObj2["N"] ) self.assertEqual( retVal, set( [ 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, ] ), ) compressedObj1 = sc.compress(sequence="NNNGTTAANNNNNNNNTGGGGGAAAAGGGAA") compressedObj2 = sc.compress(sequence="ACTGTTAATTTTTTTTTNNNNNNNNNNNNNN") (n1, n2, nall, rv1, rv2, retVal) = sc._setStats( compressedObj1["N"], compressedObj2["N"] ) self.assertEqual( retVal, set( [ 0, 1, 2, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, ] ), ) compressedObj1 = sc.compress(sequence="NNNGTTAANNNNNNNNTGGGGGAAAAGGGAA") compressedObj2 = sc.compress(sequence="ACTNNNNNTTTTTTTTTNNNNNNNNNNNNNN") (n1, n2, nall, rv1, rv2, retVal) = sc._setStats( compressedObj1["N"], compressedObj2["N"] ) self.assertEqual( retVal, set( [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, ] ), ) compressedObj1 = sc.compress(sequence="NNNGTTAANNNNNNNNTGGGGGAAAAGGGAA") compressedObj2 = sc.compress(sequence="ACTNNNNNTTTTTTTTTQQQQQQQQQQQQQQ") (n1, n2, nall, rv1, rv2, retVal) = sc._setStats( compressedObj1["N"], compressedObj2["N"] ) self.assertEqual( retVal, set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) ) (n1, n2, nall, rv1, rv2, retVal) = sc._setStats( compressedObj1["M"], compressedObj2["M"] ) self.assertEqual( retVal, set([17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]) ) compressedObj1 = sc.compress(sequence="qqqGTTAAqqqqqqqqTGGGGGAAAAGGGAA") compressedObj2 = sc.compress(sequence="ACTqqqqqTTTTTTTTTqqqqqqqqqqqqqq") (n1, n2, nall, rv1, rv2, retVal) = sc._setStats( compressedObj1["M"], compressedObj2["M"] ) self.assertEqual( retVal, set( [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, ] ), ) class test_py_seqComparer_37(unittest.TestCase): """tests the loading of an exclusion file""" def runTest(self): # default exclusion file refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=1) self.assertEqual( sc.excluded_hash(), "Excl 0 nt [d751713988987e9331980363e24189ce]" ) class test_py_seqComparer_38(unittest.TestCase): """tests the loading of an exclusion file""" def runTest(self): # no exclusion file refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=1) self.assertEqual( sc.excluded_hash(), "Excl 0 nt [d751713988987e9331980363e24189ce]" ) class test_py_seqComparer_40(unittest.TestCase): """tests the computation of a hash of a compressed object""" def runTest(self): # generate compressed sequences refSeq = "ACTG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=10) compressed_sequence = sc.compress(sequence="TTAA") res = sc.compressed_sequence_hash(compressed_sequence) self.assertEqual(res, "da8785691df5858b0b847db59bdefd11") class test_py_seqComparer_45(unittest.TestCase): """tests insertion of large sequences""" def runTest(self): inputfile = "reference/NC_000962.fasta" with open(inputfile, "rt") as f: for record in SeqIO.parse(f, "fasta"): goodseq = str(record.seq) badseq = "".join("N" * len(goodseq)) originalseq = list(str(record.seq)) sc = py_seqComparer(maxNs=1e8, reference=record.seq, snpCeiling=100) n_pre = 0 guids_inserted = list() for i in range(1, 4): # 40 seq = originalseq if i % 5 == 0: is_mixed = True guid_to_insert = "mixed_{0}".format(n_pre + i) else: is_mixed = False guid_to_insert = "nomix_{0}".format(n_pre + i) # make i mutations at position 500,000 offset = 500000 nVariants = 0 for j in range(i): mutbase = offset + j ref = seq[mutbase] if is_mixed is False: nVariants += 1 if not ref == "T": seq[mutbase] = "T" if not ref == "A": seq[mutbase] = "A" if is_mixed is True: seq[mutbase] = "N" seq = "".join(seq) if i % 11 == 0: seq = badseq # invalid guids_inserted.append(guid_to_insert) if not is_mixed: # print("Adding TB sequence {2} of {0} bytes with {1} Ns and {3} variants relative to ref.".format(len(seq), seq.count('N'), guid_to_insert, nVariants)) pass else: # print("Adding mixed TB sequence {2} of {0} bytes with {1} Ns relative to ref.".format(len(seq), seq.count('N'), guid_to_insert)) pass self.assertEqual(len(seq), 4411532) # check it's the right sequence c = sc.compress(seq) sc.persist(c, guid=guid_to_insert) class test_py_seqComparer_47(unittest.TestCase): """tests raise_error""" def runTest(self): # generate compressed sequences refSeq = "GGGGGGGGGGGG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=10) with self.assertRaises(ZeroDivisionError): sc.raise_error("token") class test_py_seqComparer_47dist(unittest.TestCase): """tests distmat, a function yielding a distance matrix.""" def runTest(self): # generate compressed sequences refSeq = "GGGGGGGGGGGG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=10) originals = ["AAACACTGACTG", "CCCCACTGACTG", "TTTCACTGACTG"] for original in originals: c = sc.compress(original) sc.persist(c, guid=original) n = 0 for item in sc.distmat(half=False, diagonal=True): n += 1 l_originals = len(originals) self.assertEqual(n, l_originals * l_originals) n = 0 for item in sc.distmat(half=False, diagonal=False): n += 1 l_originals = len(originals) self.assertEqual(n, (l_originals * l_originals) - l_originals) n = 0 for item in sc.distmat(half=True, diagonal=False): n += 1 l_originals = len(originals) self.assertEqual(n, (l_originals * (l_originals - 1) / 2)) class test_py_seqComparer_50(unittest.TestCase): """tests estimate_expected_proportion, a function computing the proportion of Ns expected based on the median Ns in a list of sequences""" def runTest(self): refSeq = "GGGGGGGGGGGG" sc = py_seqComparer(maxNs=1e8, reference=refSeq, snpCeiling=10) res = sc.estimate_expected_proportion([]) self.assertTrue(res is None) res = sc.estimate_expected_proportion(["AA", "AA"]) self.assertTrue(res is None) res = sc.estimate_expected_proportion(["AA", "AA", "AA"]) self.assertTrue(res is not None) self.assertTrue(res == 0) res = sc.estimate_expected_proportion(["AAN", "AAN", "AAN"]) self.assertTrue(res is not None) self.assertAlmostEqual(res, 1 / 3)
28.059177
168
0.475044
3,088
32,717
4.937824
0.128238
0.065648
0.026692
0.053384
0.774265
0.747967
0.726128
0.716619
0.698059
0.689664
0
0.052233
0.410154
32,717
1,165
169
28.083262
0.7379
0.089006
0
0.713793
0
0
0.071681
0.021546
0
0
0
0
0.071264
1
0.042529
false
0.002299
0.004598
0
0.089655
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
74f792b39c469ecd7c60b8c7a62e18d5a31f28a5
2,393
py
Python
cifarconv/networks.py
szymanskir/CIFAR-10-CNN
cbec78915f2a635dfd853d9f04dc7605e8d52789
[ "MIT" ]
null
null
null
cifarconv/networks.py
szymanskir/CIFAR-10-CNN
cbec78915f2a635dfd853d9f04dc7605e8d52789
[ "MIT" ]
10
2020-01-28T22:40:15.000Z
2022-03-11T23:44:38.000Z
cifarconv/networks.py
szymanskir/CIFAR-10-CNN
cbec78915f2a635dfd853d9f04dc7605e8d52789
[ "MIT" ]
null
null
null
from keras.models import Sequential from keras.layers import ( GlobalAveragePooling2D, Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, ) from keras.layers.normalization import BatchNormalization def create_lenet5(input_shape): model = Sequential() model.add(Conv2D(6, (5, 5), padding="same", input_shape=input_shape)) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Activation("relu")) model.add(Conv2D(16, (5, 5), padding="same")) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(120)) model.add(Activation("relu")) model.add(Dense(84)) model.add(Activation("relu")) model.add(Dense(10)) model.add(Activation("softmax")) return model def create_allcnn(input_shape): model = Sequential() model.add(Dropout(0.2, input_shape=input_shape)) model.add(Conv2D(96, (3, 3), padding="same", kernel_initializer="he_normal")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Conv2D(96, (3, 3), padding="same", kernel_initializer="he_normal")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add( Conv2D( 96, (3, 3), strides=(2, 2), padding="same", kernel_initializer="he_normal" ) ) model.add(Dropout(0.5)) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Conv2D(192, (3, 3), padding="same", kernel_initializer="he_normal")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add( Conv2D( 192, (3, 3), strides=(2, 2), padding="same", kernel_initializer="he_normal" ) ) model.add(Dropout(0.5)) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Conv2D(192, (3, 3), padding="same", kernel_initializer="he_normal")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Conv2D(192, (1, 1), kernel_initializer="he_normal")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Conv2D(10, (1, 1), kernel_initializer="he_normal")) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(GlobalAveragePooling2D()) model.add(Activation("softmax")) return model
30.679487
87
0.658588
293
2,393
5.290102
0.16041
0.221935
0.174194
0.184516
0.803871
0.803871
0.674839
0.629677
0.629677
0.629677
0
0.041013
0.174676
2,393
77
88
31.077922
0.743797
0
0
0.573529
0
0
0.071041
0
0
0
0
0
0
1
0.029412
false
0
0.044118
0
0.102941
0
0
0
0
null
1
0
1
1
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
741265fabf45b0f92ef50a085627cdd4094c809e
658
py
Python
Notas_Python/Notas_RepasoPython/datos.py
anehik/MyST
bbc248efa2b71efdbe153b6b1f7efe21c69f2a09
[ "MIT" ]
null
null
null
Notas_Python/Notas_RepasoPython/datos.py
anehik/MyST
bbc248efa2b71efdbe153b6b1f7efe21c69f2a09
[ "MIT" ]
null
null
null
Notas_Python/Notas_RepasoPython/datos.py
anehik/MyST
bbc248efa2b71efdbe153b6b1f7efe21c69f2a09
[ "MIT" ]
null
null
null
# -- ------------------------------------------------------------------------------------ -- # # -- Proyecto: Repaso de python 3 y analisis de precios OHLC -- # # -- Codigo: datos.py - script con datos de uso en proyecto -- # # -- Rep: https://github.com/ITESOIF/MyST/tree/master/Notas_Python/Notas_RepasoPython -- # # -- Autor: Francisco ME -- # # -- ------------------------------------------------------------------------------------ -- # OA_Ak = '7' + '9ae0a52f8e483facdd81f5b316a8ef8-99fb5554f4739c76535b209044f7de2' + '6'
65.8
94
0.364742
41
658
5.780488
0.853659
0
0
0
0
0
0
0
0
0
0
0.089362
0.285714
658
9
95
73.111111
0.414894
0.835866
0
0
0
0
0.698925
0.677419
1
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
1
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
7454a0af870ef23924d9b7c5a48600c6d906e1c4
55
py
Python
flowx/imbound/_interface/rigid/__init__.py
akashdhruv/flowX
65b752c58a9da29f8508b4056d4aa3ac6d336d41
[ "MIT" ]
null
null
null
flowx/imbound/_interface/rigid/__init__.py
akashdhruv/flowX
65b752c58a9da29f8508b4056d4aa3ac6d336d41
[ "MIT" ]
7
2020-03-05T20:39:32.000Z
2020-03-13T01:11:26.000Z
flowx/imbound/_interface/rigid/__init__.py
akashdhruv/flowX
65b752c58a9da29f8508b4056d4aa3ac6d336d41
[ "MIT" ]
1
2020-03-09T17:38:00.000Z
2020-03-09T17:38:00.000Z
from ._force_flow import * from ._map_to_grid import *
18.333333
27
0.781818
9
55
4.222222
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.145455
55
2
28
27.5
0.808511
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
745c95d9f75d1a73b40096597201fa28bdbf0065
1,739
py
Python
pytaxize/__init__.py
puppriss/pytaxize
32c03d52ee99da32007dfb1ab5ee1e6745e81dbe
[ "MIT" ]
null
null
null
pytaxize/__init__.py
puppriss/pytaxize
32c03d52ee99da32007dfb1ab5ee1e6745e81dbe
[ "MIT" ]
null
null
null
pytaxize/__init__.py
puppriss/pytaxize
32c03d52ee99da32007dfb1ab5ee1e6745e81dbe
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # pytaxize ''' pytaxize library ~~~~~~~~~~~~~~~~~~~~~ pytaxize is a taxonomic toolkit for Python. Example usage: Usage:: import pytaxize pytaxize.col_children(name=["Apis"]) ''' from .refactor import * from .gnr import gnr_datasources, gnr_resolve from .gni import gni_parse, gni_search, gni_details from .col import col_children, col_downstream, col_search from .tax import names_list, vascan_search, gbif_parse, scrapenames from .ids import Ids from .itis import itis_ping, getacceptednamesfromtsn, getanymatchcount, getcommentdetailfromtsn, getcommonnamesfromtsn, getcoremetadatafromtsn, getcoveragefromtsn, getcredibilityratingfromtsn, getcredibilityratings, getcurrencyfromtsn, getdatedatafromtsn, getexpertsfromtsn, gettaxonomicranknamefromtsn, getfullhierarchyfromtsn, getfullrecordfromlsid, getfullrecordfromtsn, getgeographicdivisionsfromtsn, getgeographicvalues, getglobalspeciescompletenessfromtsn, gethierarchydownfromtsn, gethierarchyupfromtsn, getitistermsfromcommonname, getitisterms, getitistermsfromscientificname, itis_hierarchy, getjurisdictionaloriginfromtsn, getjurisdictionoriginvalues, getjurisdictionvalues, getkingdomnamefromtsn, getkingdomnames, getlastchangedate, getlsidfromtsn, getothersourcesfromtsn, getparenttsnfromtsn, getpublicationsfromtsn, getranknames, getrecordfromlsid, getreviewyearfromtsn, getscientificnamefromtsn, gettaxonauthorshipfromtsn, gettaxonomicranknamefromtsn, gettaxonomicusagefromtsn, gettsnbyvernacularlanguage, gettsnfromlsid, getunacceptabilityreasonfromtsn, getvernacularlanguages, searchbycommonname, searchbycommonnamebeginswith, searchbycommonnameendswith, itis_searchcommon, searchbyscientificname, searchforanymatch, searchforanymatchpaged
72.458333
1,255
0.86084
124
1,739
11.951613
0.709677
0.021592
0
0
0
0
0
0
0
0
0
0.00062
0.07188
1,739
23
1,256
75.608696
0.917596
0.114434
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
746149d182ecb5e3fd426dc44f6dcb05be67eeb8
64
py
Python
agent/__init__.py
SunandBean/tensorflow_RL
a248cbfb99b2041f6f7cc008fcad53fb83ac486e
[ "MIT" ]
60
2019-01-29T14:13:00.000Z
2020-11-24T09:08:05.000Z
agent/__init__.py
SunandBean/tensorflow_RL
a248cbfb99b2041f6f7cc008fcad53fb83ac486e
[ "MIT" ]
2
2019-08-14T06:44:32.000Z
2020-11-12T12:57:55.000Z
agent/__init__.py
SunandBean/tensorflow_RL
a248cbfb99b2041f6f7cc008fcad53fb83ac486e
[ "MIT" ]
37
2019-01-22T05:19:34.000Z
2021-04-12T02:27:50.000Z
import agent.continuous import agent.discrete import agent.utils
21.333333
23
0.875
9
64
6.222222
0.555556
0.589286
0
0
0
0
0
0
0
0
0
0
0.078125
64
3
24
21.333333
0.949153
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
7488193cea0322a7c2da1560e98733d6091844a5
88
py
Python
test/pytch/py/project/go_live_empty_project.py
Liampobob/pytch-vm
bb2cf19c0736d467daf195635a9de9903aaa1237
[ "MIT" ]
2
2021-11-29T09:47:23.000Z
2022-02-11T15:48:20.000Z
test/pytch/py/project/go_live_empty_project.py
Liampobob/pytch-vm
bb2cf19c0736d467daf195635a9de9903aaa1237
[ "MIT" ]
1
2022-02-28T13:50:48.000Z
2022-02-28T13:50:48.000Z
test/pytch/py/project/go_live_empty_project.py
Liampobob/pytch-vm
bb2cf19c0736d467daf195635a9de9903aaa1237
[ "MIT" ]
4
2021-02-12T15:27:33.000Z
2022-03-16T10:26:55.000Z
import pytch from pytch import ( Project, ) project = Project() project.go_live()
9.777778
19
0.693182
11
88
5.454545
0.545455
0.7
0.7
0
0
0
0
0
0
0
0
0
0.204545
88
8
20
11
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
778120917ba5a4ca6d8003c065a3d393ef2c2dee
21
py
Python
iterators/word_count.py
kowalczykj90/First-time-with-Git
320a835d6b45f5b34ac1a453391ded02758d53dc
[ "Unlicense" ]
null
null
null
iterators/word_count.py
kowalczykj90/First-time-with-Git
320a835d6b45f5b34ac1a453391ded02758d53dc
[ "Unlicense" ]
null
null
null
iterators/word_count.py
kowalczykj90/First-time-with-Git
320a835d6b45f5b34ac1a453391ded02758d53dc
[ "Unlicense" ]
null
null
null
print("See the diff")
21
21
0.714286
4
21
3.75
1
0
0
0
0
0
0
0
0
0
0
0
0.095238
21
1
21
21
0.789474
0
0
0
0
0
0.545455
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
77835cb1ac8a52ee6b0867d75d9a8c470e60ff38
97
py
Python
medembed/__init__.py
isaacsultan/MedEmbed
2a9baf91df5839b9747393fbe6c9af6d5ee1f133
[ "MIT" ]
null
null
null
medembed/__init__.py
isaacsultan/MedEmbed
2a9baf91df5839b9747393fbe6c9af6d5ee1f133
[ "MIT" ]
4
2018-03-27T17:51:46.000Z
2018-04-27T15:46:56.000Z
medembed/__init__.py
isaacsultan/MedEmbed
2a9baf91df5839b9747393fbe6c9af6d5ee1f133
[ "MIT" ]
1
2018-04-27T15:38:35.000Z
2018-04-27T15:38:35.000Z
import os DIR_PROCESSED = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'processed')
24.25
85
0.762887
15
97
4.6
0.533333
0.26087
0.376812
0
0
0
0
0
0
0
0
0
0.072165
97
3
86
32.333333
0.766667
0
0
0
0
0
0.092784
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
77c5ad42ab8d28aab7441340384f986ccd8b31e7
57
py
Python
CoinCrypt/__init__.py
HenriqueDomiciano/CoinCrypt
52f61748cf825caf471c4224efd63128f51db6f2
[ "MIT" ]
1
2021-09-07T12:57:43.000Z
2021-09-07T12:57:43.000Z
CoinCrypt/__init__.py
HenriqueDomiciano/CoinCrypt
52f61748cf825caf471c4224efd63128f51db6f2
[ "MIT" ]
null
null
null
CoinCrypt/__init__.py
HenriqueDomiciano/CoinCrypt
52f61748cf825caf471c4224efd63128f51db6f2
[ "MIT" ]
null
null
null
from requests import Session import CoinCrypt.Coincrypt
14.25
28
0.859649
7
57
7
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.122807
57
3
29
19
0.98
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
77d762b7e2f200231edbc56d3aa6ee76bb18a15e
78
py
Python
run_server.py
wonkoderverstaendige/raspi_lepton
55822cf1c5f5043c1d3547f0ab41935ddd6a9ef0
[ "MIT" ]
null
null
null
run_server.py
wonkoderverstaendige/raspi_lepton
55822cf1c5f5043c1d3547f0ab41935ddd6a9ef0
[ "MIT" ]
null
null
null
run_server.py
wonkoderverstaendige/raspi_lepton
55822cf1c5f5043c1d3547f0ab41935ddd6a9ef0
[ "MIT" ]
null
null
null
#!/usr/bin/env python from server import lepton_server print "Server Done"
11.142857
32
0.75641
12
78
4.833333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.153846
78
6
33
13
0.878788
0.25641
0
0
0
0
0.196429
0
0
0
0
0
0
0
null
null
0
0.5
null
null
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
1
0
5
7ae355a46c3ae490593b62aab84a0cf54f3cda82
53
py
Python
hydrogels/generators/gels/__init__.py
debeshmandal/brownian
bc5b2e00a04d11319c85e749f9c056b75b450ff7
[ "MIT" ]
3
2020-05-13T01:07:30.000Z
2021-02-12T13:37:23.000Z
hydrogels/generators/gels/__init__.py
debeshmandal/brownian
bc5b2e00a04d11319c85e749f9c056b75b450ff7
[ "MIT" ]
24
2020-06-04T13:48:57.000Z
2021-12-31T18:46:52.000Z
hydrogels/generators/gels/__init__.py
debeshmandal/brownian
bc5b2e00a04d11319c85e749f9c056b75b450ff7
[ "MIT" ]
1
2020-07-23T17:15:23.000Z
2020-07-23T17:15:23.000Z
from .core import Gel from .generic import GenericGel
26.5
31
0.830189
8
53
5.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.132075
53
2
31
26.5
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
249982c6383d3dcf8ddc0e745f90f62b3bebdfc6
54
py
Python
division.py
cateto/python4NLP
1d2d5086f907bf75be01762bf0b384c76d8f704e
[ "MIT" ]
2
2021-12-16T22:38:27.000Z
2021-12-17T13:09:49.000Z
division.py
cateto/python4NLP
1d2d5086f907bf75be01762bf0b384c76d8f704e
[ "MIT" ]
null
null
null
division.py
cateto/python4NLP
1d2d5086f907bf75be01762bf0b384c76d8f704e
[ "MIT" ]
null
null
null
#몫과 나머지 a = 14 // 3 b = 14 % 3 print("몫 ",a, "나머지 ",b)
13.5
23
0.444444
13
54
1.846154
0.615385
0.25
0
0
0
0
0
0
0
0
0
0.153846
0.277778
54
4
23
13.5
0.461538
0.111111
0
0
0
0
0.125
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
1
0
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
2499e1cb31a48845c26ed861ac8466c67443486b
78
py
Python
mcmo/__init__.py
tkhyn/django-mcmo
cef44217ef0dcb16ef9ffb0f6492a0be050d7668
[ "MIT" ]
null
null
null
mcmo/__init__.py
tkhyn/django-mcmo
cef44217ef0dcb16ef9ffb0f6492a0be050d7668
[ "MIT" ]
null
null
null
mcmo/__init__.py
tkhyn/django-mcmo
cef44217ef0dcb16ef9ffb0f6492a0be050d7668
[ "MIT" ]
null
null
null
from .version import __version__, __version_info__ from . import management
26
51
0.820513
9
78
6.111111
0.555556
0
0
0
0
0
0
0
0
0
0
0
0.141026
78
2
52
39
0.820896
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
24b58aad9284a90565b18cbeff3ed50d73d4c6d6
143
py
Python
pyworkforce/shifts/__init__.py
rodrigo-arenas/pyworkforce
f3986ebbc3c48a8ae08dc04dfb939ac6a9516233
[ "MIT" ]
10
2021-03-20T02:58:52.000Z
2022-03-28T05:58:56.000Z
pyworkforce/shifts/__init__.py
rodrigo-arenas/pyworkforce
f3986ebbc3c48a8ae08dc04dfb939ac6a9516233
[ "MIT" ]
3
2021-03-13T02:11:39.000Z
2021-04-08T01:27:36.000Z
pyworkforce/shifts/__init__.py
rodrigo-arenas/pyworkforce
f3986ebbc3c48a8ae08dc04dfb939ac6a9516233
[ "MIT" ]
1
2022-01-04T11:06:47.000Z
2022-01-04T11:06:47.000Z
from pyworkforce.shifts.shifts_selection import MinAbsDifference, MinRequiredResources __all__ = ["MinAbsDifference", "MinRequiredResources"]
35.75
86
0.853147
11
143
10.636364
0.727273
0.615385
0
0
0
0
0
0
0
0
0
0
0.06993
143
3
87
47.666667
0.879699
0
0
0
0
0
0.251748
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
24b7007acbac195d6140ae0970479752ed5fe6bb
180
py
Python
backend/src/profiling/valentine.py
OpertusMundi/discovery-service
82abd8e9e997075d840bdccbcc9f991009c6cec6
[ "Apache-2.0" ]
null
null
null
backend/src/profiling/valentine.py
OpertusMundi/discovery-service
82abd8e9e997075d840bdccbcc9f991009c6cec6
[ "Apache-2.0" ]
null
null
null
backend/src/profiling/valentine.py
OpertusMundi/discovery-service
82abd8e9e997075d840bdccbcc9f991009c6cec6
[ "Apache-2.0" ]
null
null
null
from valentine import valentine_match, valentine_metrics from valentine.algorithms import Coma def match(df1, df2): return valentine_match(df1, df2, Coma(strategy="COMA_OPT"))
36
63
0.805556
25
180
5.64
0.52
0.184397
0.156028
0
0
0
0
0
0
0
0
0.025
0.111111
180
5
63
36
0.85625
0
0
0
0
0
0.044199
0
0
0
0
0
0
1
0.25
false
0
0.5
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
0
0
0
5
24b93c1e06f9cfc88fa42f5bb0b8bb2c172587d0
108
py
Python
pyOfferUp/__init__.py
oscar0812/pyOfferUp
e6f58cbc7c0314ab50b9aa1af3ea58d777b3673f
[ "Apache-2.0" ]
null
null
null
pyOfferUp/__init__.py
oscar0812/pyOfferUp
e6f58cbc7c0314ab50b9aa1af3ea58d777b3673f
[ "Apache-2.0" ]
null
null
null
pyOfferUp/__init__.py
oscar0812/pyOfferUp
e6f58cbc7c0314ab50b9aa1af3ea58d777b3673f
[ "Apache-2.0" ]
null
null
null
from pyOfferUp.fetch import get_posts, get_posts_by_lat_lon, driver_executable_path import pyOfferUp.places
36
83
0.888889
17
108
5.235294
0.764706
0.179775
0
0
0
0
0
0
0
0
0
0
0.074074
108
2
84
54
0.89
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
700b13bf0cc39e4b1b880a5f2721a88c9337eff7
99
py
Python
veracode/API/__init__.py
ctcampbell/veracode-python
519706785c4ab18c3392cd64fd79d7894adde10e
[ "BSD-3-Clause" ]
13
2019-03-16T03:11:50.000Z
2021-03-16T13:02:45.000Z
veracode/API/__init__.py
ctcampbell/veracode-python
519706785c4ab18c3392cd64fd79d7894adde10e
[ "BSD-3-Clause" ]
6
2020-01-14T21:45:55.000Z
2022-03-03T17:56:43.000Z
veracode/API/__init__.py
ctcampbell/veracode-python
519706785c4ab18c3392cd64fd79d7894adde10e
[ "BSD-3-Clause" ]
10
2020-01-20T13:34:55.000Z
2021-09-28T21:21:22.000Z
from veracode.API import core, admin, flawreport, mitigation, results, sandbox, upload, exceptions
49.5
98
0.808081
12
99
6.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.111111
99
1
99
99
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7030f0822b6f42fd509caef02d70db62b6afa7b5
443
py
Python
tests/test_django_cohort_analysis.py
jturner30/django_cohort_analysis
1fb25bf8bd64db8a4ef7a1f4b730a291a0634a07
[ "BSD-3-Clause" ]
null
null
null
tests/test_django_cohort_analysis.py
jturner30/django_cohort_analysis
1fb25bf8bd64db8a4ef7a1f4b730a291a0634a07
[ "BSD-3-Clause" ]
null
null
null
tests/test_django_cohort_analysis.py
jturner30/django_cohort_analysis
1fb25bf8bd64db8a4ef7a1f4b730a291a0634a07
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_django_cohort_analysis ---------------------------------- Tests for `django_cohort_analysis` module. """ import unittest from django_cohort_analysis import cohorts class TestDjango_cohort_analysis(unittest.TestCase): def setUp(self): pass def test_something(self): pass def tearDown(self): pass if __name__ == '__main__': unittest.main()
15.275862
52
0.62754
49
443
5.306122
0.612245
0.215385
0.230769
0
0
0
0
0
0
0
0
0.002809
0.196388
443
28
53
15.821429
0.727528
0.336343
0
0.272727
0
0
0.02807
0
0
0
0
0
0
1
0.272727
false
0.272727
0.181818
0
0.545455
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
5
704267f8e8e5321f139eec1f684c7edde344a424
88
py
Python
app/venues/admin.py
swelanauguste/friendly-palm-tree
9e9709b87b645b709b3ac8aa2f57cf29dd98e2cb
[ "MIT" ]
null
null
null
app/venues/admin.py
swelanauguste/friendly-palm-tree
9e9709b87b645b709b3ac8aa2f57cf29dd98e2cb
[ "MIT" ]
null
null
null
app/venues/admin.py
swelanauguste/friendly-palm-tree
9e9709b87b645b709b3ac8aa2f57cf29dd98e2cb
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Venue admin.site.register(Venue)
14.666667
32
0.806818
13
88
5.461538
0.692308
0
0
0
0
0
0
0
0
0
0
0
0.125
88
5
33
17.6
0.922078
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7053bd15ae9b07db6287bab77ffecdee9ccd36b8
237
py
Python
tests_src/pyifx.graphics.detect_edges.py
Video-Lab/pyifx
9b9aaa690059f3148833041eebdc4de7cc8d5459
[ "MIT" ]
null
null
null
tests_src/pyifx.graphics.detect_edges.py
Video-Lab/pyifx
9b9aaa690059f3148833041eebdc4de7cc8d5459
[ "MIT" ]
null
null
null
tests_src/pyifx.graphics.detect_edges.py
Video-Lab/pyifx
9b9aaa690059f3148833041eebdc4de7cc8d5459
[ "MIT" ]
null
null
null
from test_vars import * set_paths("../tests/imgs/graphics/detect_edges") pyifx.graphics.detect_edges(img1) pyifx.graphics.detect_edges(img_list) pyifx.graphics.detect_edges(img_vol) call_error_test("pyifx.graphics.detect_edges", ['s'])
29.625
53
0.810127
36
237
5.027778
0.527778
0.38674
0.524862
0.530387
0.298343
0
0
0
0
0
0
0.004425
0.046414
237
8
53
29.625
0.79646
0
0
0
0
0
0.264706
0.260504
0
0
0
0
0
1
0
true
0
0.166667
0
0.166667
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
7082e0d948c69bac9637d08342c5896fcdb22f38
326
py
Python
orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cpu/torch_interop_utils/__init__.py
lchang20/onnxruntime
97b8f6f394ae02c73ed775f456fd85639c91ced1
[ "MIT" ]
1
2022-03-09T21:24:30.000Z
2022-03-09T21:24:30.000Z
orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cpu/torch_interop_utils/__init__.py
lchang20/onnxruntime
97b8f6f394ae02c73ed775f456fd85639c91ced1
[ "MIT" ]
30
2021-09-26T08:05:58.000Z
2022-03-31T10:45:30.000Z
orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cpu/torch_interop_utils/__init__.py
lchang20/onnxruntime
97b8f6f394ae02c73ed775f456fd85639c91ced1
[ "MIT" ]
null
null
null
def clear_all_grad_fns(): from onnxruntime.training.ortmodule.torch_cpp_extensions import torch_interop_utils torch_interop_utils.clear_all_grad_fns() import atexit # Clear all gradient functions, to avoid a deadlock issue. # Check the called function for more detailed comments. atexit.register(clear_all_grad_fns)
32.6
87
0.828221
48
326
5.3125
0.666667
0.12549
0.141176
0.176471
0
0
0
0
0
0
0
0
0.119632
326
9
88
36.222222
0.888502
0.337423
0
0
0
0
0
0
0
0
0
0
0
1
0.2
true
0
0.4
0
0.6
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
708f8cd6ca2418620a6d1550a89a6debcb6e20b7
129
py
Python
Code/YOLO/darkflow/darkflow/dark/layer.py
kalvin-osoro/ml_project
bf0bdc5719f2712682dd070045a5f1edf933a0c4
[ "Apache-2.0" ]
null
null
null
Code/YOLO/darkflow/darkflow/dark/layer.py
kalvin-osoro/ml_project
bf0bdc5719f2712682dd070045a5f1edf933a0c4
[ "Apache-2.0" ]
null
null
null
Code/YOLO/darkflow/darkflow/dark/layer.py
kalvin-osoro/ml_project
bf0bdc5719f2712682dd070045a5f1edf933a0c4
[ "Apache-2.0" ]
null
null
null
version https://git-lfs.github.com/spec/v1 oid sha256:e63615f4951aa361af944ad7d4412a3ade485350e7efda26e1ccc43ea7111487 size 2083
32.25
75
0.883721
13
129
8.769231
1
0
0
0
0
0
0
0
0
0
0
0.390244
0.046512
129
3
76
43
0.536585
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
5
709c8c655c53938e276944934b29a903e3649a6f
112
py
Python
finmodelprep/api/commodities.py
ignasrum/finmodelprep
5751cb0caac2a7c866111ee6231255e523133cc9
[ "MIT" ]
null
null
null
finmodelprep/api/commodities.py
ignasrum/finmodelprep
5751cb0caac2a7c866111ee6231255e523133cc9
[ "MIT" ]
null
null
null
finmodelprep/api/commodities.py
ignasrum/finmodelprep
5751cb0caac2a7c866111ee6231255e523133cc9
[ "MIT" ]
null
null
null
from finmodelprep.api.api import BASE_URL, download ### commodities prices ### historical commodities prices
16
51
0.785714
13
112
6.692308
0.769231
0.390805
0
0
0
0
0
0
0
0
0
0
0.133929
112
6
52
18.666667
0.896907
0.428571
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
56477271d71d97c8221c1c26133aef6549aff5ee
213
py
Python
python/unit_testing/calc.py
Uttam580/basic_ml
cf8b6daee70f95e922cffc88e11e39c59bc032f9
[ "MIT" ]
4
2019-11-11T10:18:26.000Z
2020-06-05T04:14:45.000Z
python/unit_testing/calc.py
Uttam580/Machine_learning
cf8b6daee70f95e922cffc88e11e39c59bc032f9
[ "MIT" ]
null
null
null
python/unit_testing/calc.py
Uttam580/Machine_learning
cf8b6daee70f95e922cffc88e11e39c59bc032f9
[ "MIT" ]
1
2020-08-11T14:04:14.000Z
2020-08-11T14:04:14.000Z
def add(x,y): return x+y def subtract(x,y): return x-y def multiply(x,y): return x*y def divide(x,y): if y==0: raise ValueError('can not divide by zero') return x/y
15.214286
51
0.535211
38
213
3
0.421053
0.140351
0.280702
0.236842
0.342105
0.342105
0
0
0
0
0
0.007092
0.338028
213
14
52
15.214286
0.801418
0
0
0
0
0
0.109453
0
0
0
0
0
0
1
0.4
false
0
0
0.3
0.8
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
565a1b5ef0f14c2a615a3a0568c39e095c8dfb33
32
py
Python
python/f_calls_f.py
fuzzynoise/ono
ca11f87b6afc3b29708355008f6f79d5d839607a
[ "MIT" ]
null
null
null
python/f_calls_f.py
fuzzynoise/ono
ca11f87b6afc3b29708355008f6f79d5d839607a
[ "MIT" ]
null
null
null
python/f_calls_f.py
fuzzynoise/ono
ca11f87b6afc3b29708355008f6f79d5d839607a
[ "MIT" ]
null
null
null
def f(): input('>') f()
8
14
0.3125
4
32
2.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.375
32
3
15
10.666667
0.5
0
0
0
0
0
0.03125
0
0
0
0
0
0
1
0.333333
true
0
0
0
0.333333
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
0
0
5
567aede3f6c6c154070d4b532f695e0b84f8fac4
18
py
Python
just_config/version.py
senpay/just_config
2b3d7f6d288b53068f9393c51974ba2cacdd6440
[ "MIT" ]
1
2020-05-29T13:29:25.000Z
2020-05-29T13:29:25.000Z
just_config/version.py
senpay/just_config
2b3d7f6d288b53068f9393c51974ba2cacdd6440
[ "MIT" ]
null
null
null
just_config/version.py
senpay/just_config
2b3d7f6d288b53068f9393c51974ba2cacdd6440
[ "MIT" ]
null
null
null
VERSION = '20.03'
9
17
0.611111
3
18
3.666667
1
0
0
0
0
0
0
0
0
0
0
0.266667
0.166667
18
1
18
18
0.466667
0
0
0
0
0
0.277778
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
3b10603c93ebe621220173885c00eba9a854d1e9
43
py
Python
sparksampling/tests/__init__.py
Wh1isper/pyspark-sampling
5d5883491122608ff731bb6e7f7aa0887beb556c
[ "Apache-2.0" ]
2
2021-12-08T14:53:07.000Z
2021-12-08T14:53:08.000Z
sparksampling/tests/__init__.py
Wh1isper/pyspark-sampling
5d5883491122608ff731bb6e7f7aa0887beb556c
[ "Apache-2.0" ]
null
null
null
sparksampling/tests/__init__.py
Wh1isper/pyspark-sampling
5d5883491122608ff731bb6e7f7aa0887beb556c
[ "Apache-2.0" ]
2
2021-11-30T03:26:19.000Z
2021-12-08T16:28:49.000Z
"""Unit test package for sparksampling."""
21.5
42
0.72093
5
43
6.2
1
0
0
0
0
0
0
0
0
0
0
0
0.116279
43
1
43
43
0.815789
0.837209
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
3b38a5901b2d7d16e9d4bd8ae17f73fa26fbd234
159
py
Python
venv/Lib/site-packages/nbdime/tests/__init__.py
PeerHerholz/guideline_jupyter_book
ce445e4be0d53370b67708a22550565b90d71ac6
[ "BSD-3-Clause" ]
2
2021-02-16T16:17:07.000Z
2021-11-08T20:27:13.000Z
venv/Lib/site-packages/nbdime/tests/__init__.py
PeerHerholz/guideline_jupyter_book
ce445e4be0d53370b67708a22550565b90d71ac6
[ "BSD-3-Clause" ]
null
null
null
venv/Lib/site-packages/nbdime/tests/__init__.py
PeerHerholz/guideline_jupyter_book
ce445e4be0d53370b67708a22550565b90d71ac6
[ "BSD-3-Clause" ]
4
2020-11-14T17:05:36.000Z
2020-11-16T18:44:54.000Z
# coding: utf-8 # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import unicode_literals
22.714286
58
0.786164
22
159
5.454545
0.954545
0
0
0
0
0
0
0
0
0
0
0.007463
0.157233
159
6
59
26.5
0.88806
0.691824
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3b5e290e59b68d530e0450c94e9d9b9cdbd7c910
307
py
Python
test_conversion.py
cagis2019/conversion_tofix
d27f5df148bec658b872bf767b1aeed798c1720c
[ "Unlicense" ]
2
2019-08-05T21:06:58.000Z
2020-08-03T17:52:23.000Z
test_conversion.py
cagis2019/conversion_tofix
d27f5df148bec658b872bf767b1aeed798c1720c
[ "Unlicense" ]
7
2017-08-01T20:41:42.000Z
2020-08-03T19:01:34.000Z
test_conversion.py
cagis2019/conversion_tofix
d27f5df148bec658b872bf767b1aeed798c1720c
[ "Unlicense" ]
92
2017-08-01T18:17:35.000Z
2021-08-02T21:54:00.000Z
import conversion assert conversion.dollars2cents(1) == 100 assert conversion.dollars2cents(.1) == 10 assert conversion.dollars2cents(0) == 0 assert conversion.gallons2liters(1) == 3.78541 assert conversion.gallons2liters(2) == 7.57082 assert conversion.gallons2liters(0) == 0 print("Testing completed")
25.583333
46
0.778502
37
307
6.459459
0.459459
0.401674
0.364017
0.251046
0
0
0
0
0
0
0
0.112319
0.100977
307
11
47
27.909091
0.753623
0
0
0
0
0
0.055375
0
0
0
0
0
0.75
1
0
true
0
0.125
0
0.125
0.125
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
1
0
0
0
0
0
0
5
3b69e5a4fc6ef4d5dcc4efb1ddeab4bd8dfd20f0
65
py
Python
script.deluge/resources/lib/deluge_client/__init__.py
ogero/Deluge-Manager-XBMC
10c4f2a93ac1fffba01209444ba5e597036b968b
[ "MIT" ]
null
null
null
script.deluge/resources/lib/deluge_client/__init__.py
ogero/Deluge-Manager-XBMC
10c4f2a93ac1fffba01209444ba5e597036b968b
[ "MIT" ]
null
null
null
script.deluge/resources/lib/deluge_client/__init__.py
ogero/Deluge-Manager-XBMC
10c4f2a93ac1fffba01209444ba5e597036b968b
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from deluge_client.client import Deluge
16.25
39
0.692308
9
65
4.888889
0.777778
0
0
0
0
0
0
0
0
0
0
0.018182
0.153846
65
3
40
21.666667
0.781818
0.323077
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3b8d44e48c392af67f18c62241eef8be94689cc5
521
py
Python
src/queries.py
conrad-evans/sports_betting_api
baa80df5608c1cc244f51be86ba29eaabd8f031e
[ "MIT" ]
null
null
null
src/queries.py
conrad-evans/sports_betting_api
baa80df5608c1cc244f51be86ba29eaabd8f031e
[ "MIT" ]
null
null
null
src/queries.py
conrad-evans/sports_betting_api
baa80df5608c1cc244f51be86ba29eaabd8f031e
[ "MIT" ]
null
null
null
CREATE_ODDS = """INSERT INTO odds (league, home_team, away_team, home_team_win_odds, away_team_win_odds, draw_odds, game_date) VALUES (?, ?, ?, ?, ?, ?, ?)""" READ_ALL_ODDS = """SELECT * FROM odds""" UPDATE_ODDS = """UPATE odds SET league = ?, home_team = ?, away_team = ?, home_team_win_odds = ?, away_team_win_odds = ?, draw_odds = ?, game_date = ? WHERE league = ?, home_team = ?, away_team = ? AND game_date = ?""" DELETE_ODDS = """DELETE FROM odds WHERE league = ?, home_team = ?, away_team = ? AND game_date = ?"""
104.2
218
0.660269
74
521
4.22973
0.310811
0.153355
0.178914
0.230032
0.677316
0.677316
0.677316
0.677316
0.677316
0.434505
0
0
0.163148
521
4
219
130.25
0.71789
0
0
0
0
0.5
0.834933
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
3b8ece904ecfeee4043ef5f9a0077bc0fdd160e9
44
py
Python
snakeladders/__init__.py
GregoryMarchesan/snakeladders
fe855e239fc95e8e0084d517a506904b16db83c8
[ "MIT" ]
null
null
null
snakeladders/__init__.py
GregoryMarchesan/snakeladders
fe855e239fc95e8e0084d517a506904b16db83c8
[ "MIT" ]
null
null
null
snakeladders/__init__.py
GregoryMarchesan/snakeladders
fe855e239fc95e8e0084d517a506904b16db83c8
[ "MIT" ]
null
null
null
from .SnakesAndLadders import SnakeLadders
14.666667
42
0.863636
4
44
9.5
1
0
0
0
0
0
0
0
0
0
0
0
0.113636
44
2
43
22
0.974359
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
8e64523acc2ea93d012025d1b0d5f09145c84168
572
py
Python
SiteGadget-main/insides/Banner.py
Zusyaku/Termux-And-Lali-Linux-V2
b1a1b0841d22d4bf2cc7932b72716d55f070871e
[ "Apache-2.0" ]
2
2021-11-17T03:35:03.000Z
2021-12-08T06:00:31.000Z
SiteGadget-main/insides/Banner.py
Zusyaku/Termux-And-Lali-Linux-V2
b1a1b0841d22d4bf2cc7932b72716d55f070871e
[ "Apache-2.0" ]
null
null
null
SiteGadget-main/insides/Banner.py
Zusyaku/Termux-And-Lali-Linux-V2
b1a1b0841d22d4bf2cc7932b72716d55f070871e
[ "Apache-2.0" ]
2
2021-11-05T18:07:48.000Z
2022-02-24T21:25:07.000Z
from insides.Colors import Colors def Banner(): print(f'''{Colors.BOLD} ______ _ _______ _ / _____|_) _ (_______) | | _ ( (____ _ _| |_ _____ _ ___ _____ __| | ____ _____ _| |_ \____ \| (_ _) ___ | | | (_ (____ |/ _ |/ _ | ___ (_ _) _____) ) | | |_| ____| | |___) / ___ ( (_| ( (_| | ____| | |_ (______/|_| \__)_____) \_____/\_____|\____|\___ |_____) \__) (_____|v1.0 https://github.com/alpkeskin {Colors.ENDC}''')
44
64
0.421329
19
572
4.736842
0.842105
0
0
0
0
0
0
0
0
0
0
0.005917
0.409091
572
13
65
44
0.260355
0
0
0
0
0.333333
0.886562
0.041885
0
0
0
0
0
1
0.083333
true
0
0.083333
0
0.166667
0.083333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
1
1
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
d932614652d664d24e2618ab6108cce1e944dd5c
105
py
Python
Codeforces/85 Beta Division 2/Problem A/A.py
VastoLorde95/Competitive-Programming
6c990656178fb0cd33354cbe5508164207012f24
[ "MIT" ]
170
2017-07-25T14:47:29.000Z
2022-01-26T19:16:31.000Z
Codeforces/85 Beta Division 2/Problem A/A.py
navodit15/Competitive-Programming
6c990656178fb0cd33354cbe5508164207012f24
[ "MIT" ]
null
null
null
Codeforces/85 Beta Division 2/Problem A/A.py
navodit15/Competitive-Programming
6c990656178fb0cd33354cbe5508164207012f24
[ "MIT" ]
55
2017-07-28T06:17:33.000Z
2021-10-31T03:06:22.000Z
s = raw_input().lower() t = raw_input().lower() if s == t: print 0 elif s < t: print -1 else: print 1
11.666667
23
0.590476
21
105
2.857143
0.52381
0.266667
0.433333
0
0
0
0
0
0
0
0
0.037037
0.228571
105
8
24
13.125
0.703704
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0.375
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
d947a15912f218246eccc8cf132a2538a48d402d
53
py
Python
src/nashpy/__init__.py
Fil/Nashpy
405abe23cb655a084ea4a767b97e03fa24c3d5d2
[ "MIT" ]
null
null
null
src/nashpy/__init__.py
Fil/Nashpy
405abe23cb655a084ea4a767b97e03fa24c3d5d2
[ "MIT" ]
null
null
null
src/nashpy/__init__.py
Fil/Nashpy
405abe23cb655a084ea4a767b97e03fa24c3d5d2
[ "MIT" ]
1
2020-10-30T09:41:20.000Z
2020-10-30T09:41:20.000Z
from .game import * from .version import __version__
17.666667
32
0.792453
7
53
5.428571
0.571429
0
0
0
0
0
0
0
0
0
0
0
0.150943
53
2
33
26.5
0.844444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d96ed93650f0065704faf57f762dc5aadadd8aaf
118
py
Python
src/primepackage/__init__.py
lin-chen-Langley/prime
8981dd6cea77aaa1d05cd1c24f57bcb7e473186d
[ "MIT" ]
null
null
null
src/primepackage/__init__.py
lin-chen-Langley/prime
8981dd6cea77aaa1d05cd1c24f57bcb7e473186d
[ "MIT" ]
null
null
null
src/primepackage/__init__.py
lin-chen-Langley/prime
8981dd6cea77aaa1d05cd1c24f57bcb7e473186d
[ "MIT" ]
null
null
null
from primepackage.primeio import write_primes, read_primes from primepackage.primemodule import is_prime, get_n_prime
39.333333
58
0.881356
17
118
5.823529
0.705882
0.323232
0
0
0
0
0
0
0
0
0
0
0.084746
118
2
59
59
0.916667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
d996e35ee5a8ee9f5e8888d6296616e6e726fe0f
195
py
Python
sciencebeam_judge/evaluation/scoring_types/scoring_type.py
elifesciences/sciencebeam-judge
357f1b4266674611b24371224468db268ed4574e
[ "MIT" ]
null
null
null
sciencebeam_judge/evaluation/scoring_types/scoring_type.py
elifesciences/sciencebeam-judge
357f1b4266674611b24371224468db268ed4574e
[ "MIT" ]
189
2018-01-11T17:14:18.000Z
2022-03-28T17:30:11.000Z
sciencebeam_judge/evaluation/scoring_types/scoring_type.py
elifesciences/sciencebeam-judge
357f1b4266674611b24371224468db268ed4574e
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod class ScoringType(ABC): @abstractmethod def score(self, expected, actual, include_values=False, measures=None, convert_to_lower=False): pass
24.375
99
0.74359
24
195
5.916667
0.833333
0.239437
0
0
0
0
0
0
0
0
0
0
0.174359
195
7
100
27.857143
0.881988
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0.2
0.2
0
0.6
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
5
79a5cd688aa67e89f5a49bf8950a97f5d3980978
72
py
Python
ci/site/sitecustomize.py
hboshnak/pyrex
8262423d48348cb3cd8cfb5d17c28bdc90628f47
[ "Apache-2.0" ]
22
2019-01-24T21:22:35.000Z
2022-03-11T10:23:05.000Z
ci/site/sitecustomize.py
hboshnak/pyrex
8262423d48348cb3cd8cfb5d17c28bdc90628f47
[ "Apache-2.0" ]
41
2019-02-11T15:16:28.000Z
2022-01-30T15:33:57.000Z
ci/site/sitecustomize.py
hboshnak/pyrex
8262423d48348cb3cd8cfb5d17c28bdc90628f47
[ "Apache-2.0" ]
12
2019-01-29T20:08:53.000Z
2022-01-04T12:52:47.000Z
import coverage coverage.current_coverage = coverage.process_startup()
18
54
0.847222
8
72
7.375
0.625
0.542373
0
0
0
0
0
0
0
0
0
0
0.083333
72
3
55
24
0.893939
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
79adc10fbbc94b8ebd86624789b516b436e231f6
212
py
Python
python-algorithm/leetcode/problem_1994.py
isudox/leetcode-solution
60085e64deaf396a171367affc94b18114565c43
[ "MIT" ]
5
2017-06-11T09:19:34.000Z
2019-01-16T16:58:31.000Z
python-algorithm/leetcode/problem_1994.py
isudox/leetcode-solution
60085e64deaf396a171367affc94b18114565c43
[ "MIT" ]
null
null
null
python-algorithm/leetcode/problem_1994.py
isudox/leetcode-solution
60085e64deaf396a171367affc94b18114565c43
[ "MIT" ]
1
2019-03-02T15:50:43.000Z
2019-03-02T15:50:43.000Z
"""1994. The Number of Good Subsets https://leetcode.com/problems/the-number-of-good-subsets/ """ from typing import List class Solution: def numberOfGoodSubsets(self, nums: List[int]) -> int: pass
21.2
58
0.70283
29
212
5.137931
0.758621
0.120805
0.147651
0.201342
0.295302
0
0
0
0
0
0
0.022599
0.165094
212
9
59
23.555556
0.819209
0.424528
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0.25
0.25
0
0.75
0
1
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
5
79bbf8b171fdcfa2ab6b503125cb3e05d1f64222
7,999
py
Python
resolwe/permissions/tests/test_shortcuts.py
jkokosar/resolwe
c23db01494ef863fb2f8e130a59198cfd10bc7aa
[ "Apache-2.0" ]
null
null
null
resolwe/permissions/tests/test_shortcuts.py
jkokosar/resolwe
c23db01494ef863fb2f8e130a59198cfd10bc7aa
[ "Apache-2.0" ]
null
null
null
resolwe/permissions/tests/test_shortcuts.py
jkokosar/resolwe
c23db01494ef863fb2f8e130a59198cfd10bc7aa
[ "Apache-2.0" ]
null
null
null
# pylint: disable=missing-docstring from __future__ import absolute_import, division, print_function, unicode_literals import unittest import six from django.contrib.auth import get_user_model from django.contrib.auth.models import AnonymousUser, Group from guardian.shortcuts import assign_perm from guardian.models import GroupObjectPermission, UserObjectPermission from resolwe.flow.models import Collection from resolwe.permissions.shortcuts import get_user_group_perms, get_object_perms class UserGroupTestCase(unittest.TestCase): def setUp(self): self.user = get_user_model().objects.create(username="test_user") self.group1 = Group.objects.create(name="Test group 1") self.group2 = Group.objects.create(name="Test group 2") self.collection = Collection.objects.create( contributor=self.user, name="Test collection", ) # This collection is here to make sure that other permissions # don't affect tested queries. collection2 = Collection.objects.create( contributor=self.user, name="Test collection 2", ) assign_perm("view_collection", self.user, collection2) assign_perm("view_collection", self.group1, collection2) def tearDown(self): GroupObjectPermission.objects.all().delete() UserObjectPermission.objects.all().delete() Collection.objects.all().delete() Group.objects.all().delete() # `public` user is created by guardian get_user_model().objects.exclude(username="public").delete() def test_user(self): assign_perm("view_collection", self.user, self.collection) assign_perm("edit_collection", self.user, self.collection) user_perms, group_perms = get_user_group_perms(self.user, self.collection) self.assertEqual(len(group_perms), 0) six.assertCountEqual(self, user_perms, ["view_collection", "edit_collection"]) def test_user_in_group(self): self.group1.user_set.add(self.user) assign_perm("view_collection", self.group1, self.collection) assign_perm("edit_collection", self.group1, self.collection) user_perms, group_perms = get_user_group_perms(self.user, self.collection) self.assertEqual(len(group_perms), 1) six.assertCountEqual(self, group_perms[0][2], ["view_collection", "edit_collection"]) self.assertEqual(len(user_perms), 0) assign_perm("view_collection", self.user, self.collection) user_perms, group_perms = get_user_group_perms(self.user, self.collection) self.assertEqual(len(group_perms), 1) six.assertCountEqual(self, group_perms[0][2], ["view_collection", "edit_collection"]) self.assertEqual(len(user_perms), 1) six.assertCountEqual(self, user_perms, ["view_collection"]) def test_user_in_multiple_groups(self): self.group1.user_set.add(self.user) self.group2.user_set.add(self.user) assign_perm("view_collection", self.group1, self.collection) assign_perm("edit_collection", self.group1, self.collection) assign_perm("view_collection", self.group2, self.collection) user_perms, group_perms = get_user_group_perms(self.user, self.collection) self.assertEqual(len(group_perms), 2) self.assertEqual(group_perms[0][0], self.group1.pk) six.assertCountEqual(self, group_perms[0][2], ["view_collection", "edit_collection"]) self.assertEqual(group_perms[1][0], self.group2.pk) six.assertCountEqual(self, group_perms[1][2], ["view_collection"]) self.assertEqual(len(user_perms), 0) def test_group(self): assign_perm("view_collection", self.group1, self.collection) assign_perm("edit_collection", self.group1, self.collection) user_perms, group_perms = get_user_group_perms(self.group1, self.collection) self.assertEqual(len(group_perms), 1) six.assertCountEqual(self, group_perms[0][2], ["view_collection", "edit_collection"]) self.assertEqual(len(user_perms), 0) class ObjectPermsTestCase(unittest.TestCase): def setUp(self): self.user1 = get_user_model().objects.create(username="test_user1") self.user2 = get_user_model().objects.create(username="test_user2") self.group1 = Group.objects.create(name="Test group 1") self.group2 = Group.objects.create(name="Test group 2") self.anonymous = AnonymousUser() self.collection = Collection.objects.create( contributor=self.user1, name="Test collection", ) def tearDown(self): GroupObjectPermission.objects.all().delete() UserObjectPermission.objects.all().delete() Collection.objects.all().delete() Group.objects.all().delete() # `public` user is created by guardian get_user_model().objects.exclude(username="public").delete() def test_all_permissions(self): self.group1.user_set.add(self.user1) perms = get_object_perms(self.collection) self.assertEqual(len(perms), 0) assign_perm("view_collection", self.user1, self.collection) assign_perm("edit_collection", self.user1, self.collection) assign_perm("view_collection", self.user2, self.collection) expected_perms = [ {'permissions': ['edit', 'view'], 'type': 'user', 'id': self.user1.pk, 'name': 'test_user1'}, {'permissions': ['view'], 'type': 'user', 'id': self.user2.pk, 'name': 'test_user2'}, ] perms = get_object_perms(self.collection) six.assertCountEqual(self, expected_perms, perms) assign_perm("view_collection", self.group1, self.collection) assign_perm("edit_collection", self.group1, self.collection) assign_perm("view_collection", self.group2, self.collection) expected_perms.extend([ {'permissions': ['edit', 'view'], 'type': 'group', 'id': self.group1.pk, 'name': 'Test group 1'}, {'permissions': ['view'], 'type': 'group', 'id': self.group2.pk, 'name': 'Test group 2'}, ]) perms = get_object_perms(self.collection) six.assertCountEqual(self, expected_perms, perms) assign_perm("view_collection", self.anonymous, self.collection) expected_perms.append( {'permissions': ['view'], 'type': 'public'}, ) perms = get_object_perms(self.collection) six.assertCountEqual(self, expected_perms, perms) def test_user_permissions(self): self.group1.user_set.add(self.user1) assign_perm("view_collection", self.user1, self.collection) assign_perm("edit_collection", self.user1, self.collection) assign_perm("view_collection", self.user2, self.collection) assign_perm("view_collection", self.group1, self.collection) assign_perm("edit_collection", self.group1, self.collection) assign_perm("view_collection", self.group2, self.collection) expected_perms = [ {'permissions': ['edit', 'view'], 'type': 'user', 'id': self.user1.pk, 'name': 'test_user1'}, {'permissions': ['edit', 'view'], 'type': 'group', 'id': self.group1.pk, 'name': 'Test group 1'}, ] perms = get_object_perms(self.collection, self.user1) six.assertCountEqual(self, expected_perms, perms) self.group2.user_set.add(self.user1) expected_perms.append( {'permissions': ['view'], 'type': 'group', 'id': self.group2.pk, 'name': 'Test group 2'}, ) perms = get_object_perms(self.collection, self.user1) six.assertCountEqual(self, expected_perms, perms) assign_perm("view_collection", self.anonymous, self.collection) expected_perms.append( {'permissions': ['view'], 'type': 'public'}, ) perms = get_object_perms(self.collection, self.user1) six.assertCountEqual(self, expected_perms, perms)
44.687151
109
0.673084
939
7,999
5.538871
0.100107
0.107672
0.065757
0.083061
0.84676
0.832532
0.788694
0.719477
0.699096
0.659104
0
0.014012
0.197025
7,999
178
110
44.938202
0.795734
0.024503
0
0.64539
0
0
0.135565
0
0
0
0
0
0.177305
1
0.070922
false
0
0.06383
0
0.148936
0.007092
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
8dc805fd790413b3149e53867c31b3a4fe3ed158
103
py
Python
chapter03/my_test1.py
stavinski/grayhat_python_redux
882b66616426a5dc774331ad1894049d19702424
[ "MIT" ]
4
2019-07-03T08:41:03.000Z
2022-02-22T03:36:01.000Z
chapter03/my_test1.py
stavinski/grayhat_python_redux
882b66616426a5dc774331ad1894049d19702424
[ "MIT" ]
null
null
null
chapter03/my_test1.py
stavinski/grayhat_python_redux
882b66616426a5dc774331ad1894049d19702424
[ "MIT" ]
null
null
null
import my_debugger debugger = my_debugger.debugger() debugger.load("c:\\windows\\system32\\calc.exe")
20.6
48
0.76699
14
103
5.5
0.642857
0.623377
0.467532
0
0
0
0
0
0
0
0
0.020833
0.067961
103
4
49
25.75
0.78125
0
0
0
0
0
0.300971
0.300971
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
5c1322cb65dc2f1dc749129dcdf75dea7a145c76
487
py
Python
project/user/finders/user_finder.py
fv316/flask-template-project
026459b299c7aa4d82c2b59b98e3c929b4786a78
[ "MIT" ]
9
2017-02-08T21:42:15.000Z
2021-12-15T05:18:18.000Z
project/user/finders/user_finder.py
fv316/flask-template-project
026459b299c7aa4d82c2b59b98e3c929b4786a78
[ "MIT" ]
10
2016-07-25T11:00:08.000Z
2019-09-25T14:56:40.000Z
project/user/finders/user_finder.py
fv316/flask-template-project
026459b299c7aa4d82c2b59b98e3c929b4786a78
[ "MIT" ]
7
2016-11-01T20:11:03.000Z
2020-02-04T14:25:49.000Z
from project.user.models.user import User class UserFinder: @classmethod def all(cls): return User.query.filter().all() @classmethod def by_id(cls, user_id): return User.query.filter(User.id == user_id).first() @classmethod def by_username(cls, username): return User.query.filter(User.username == username).first() @classmethod def by_api_key(cls, api_key): return User.query.filter(User.api_key == api_key).first()
23.190476
67
0.661191
66
487
4.742424
0.30303
0.178914
0.191693
0.268371
0.239617
0
0
0
0
0
0
0
0.215606
487
20
68
24.35
0.819372
0
0
0.285714
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0.071429
0.285714
0.714286
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
30bb9bdfe431b4bb1c31803da3b474bb7db25e8d
73
py
Python
optimizer/__init__.py
ishine/FastVocoder
ac716e6df8cd03dbfc4a969d8a5ed42c055c38aa
[ "MIT" ]
null
null
null
optimizer/__init__.py
ishine/FastVocoder
ac716e6df8cd03dbfc4a969d8a5ed42c055c38aa
[ "MIT" ]
null
null
null
optimizer/__init__.py
ishine/FastVocoder
ac716e6df8cd03dbfc4a969d8a5ed42c055c38aa
[ "MIT" ]
null
null
null
from .radam import * from .optimizers import * from torch.optim import *
18.25
25
0.753425
10
73
5.5
0.6
0.363636
0
0
0
0
0
0
0
0
0
0
0.164384
73
3
26
24.333333
0.901639
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
a50d375758b9ca2294cf5f7b4723a8348262782e
8,485
py
Python
migrations/0001_initial.py
molgor/FIA-django
b18786ab5522007cd1f7b3bb83d5e44ebaa147db
[ "BSD-3-Clause" ]
null
null
null
migrations/0001_initial.py
molgor/FIA-django
b18786ab5522007cd1f7b3bb83d5e44ebaa147db
[ "BSD-3-Clause" ]
null
null
null
migrations/0001_initial.py
molgor/FIA-django
b18786ab5522007cd1f7b3bb83d5e44ebaa147db
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-08-24 15:24 from __future__ import unicode_literals import django.contrib.gis.db.models.fields from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='BiomassGroups', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('spp_group', models.CharField(db_index=True, max_length=254)), ('spcd', models.CharField(db_index=True, max_length=254)), ('family', models.CharField(db_index=True, max_length=254)), ('newgenus', models.CharField(db_index=True, max_length=254)), ('newspecies', models.CharField(db_index=True, max_length=254)), ('usfs_wd', models.CharField(db_index=True, max_length=254)), ('chave_wd', models.CharField(db_index=True, max_length=254)), ('chavewd_level', models.CharField(db_index=True, max_length=254)), ('code', models.CharField(db_index=True, max_length=254)), ('group', models.CharField(db_index=True, max_length=254)), ('taxa', models.CharField(db_index=True, max_length=254)), ('b_0', models.CharField(db_index=True, max_length=254)), ('b_1', models.CharField(db_index=True, max_length=254)), ('mindbh', models.CharField(db_index=True, max_length=254)), ('maxdbh', models.CharField(db_index=True, max_length=254)), ], ), migrations.CreateModel( name='Richness', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('stateab', models.CharField(db_index=True, max_length=254)), ('statenm', models.CharField(db_index=True, max_length=254)), ('countynm', models.CharField(db_index=True, max_length=254)), ('plot_idn', models.BigIntegerField(db_index=True)), ('lat', models.FloatField()), ('lon', models.FloatField()), ('elev', models.FloatField(db_index=True)), ('invyr', models.BigIntegerField()), ('area', models.BigIntegerField()), ('s', models.BigIntegerField()), ('tree_dens', models.BigIntegerField()), ('plot_agb', models.FloatField(db_index=True)), ('geom', django.contrib.gis.db.models.fields.PointField(db_index=True, srid=4326)), ], ), migrations.CreateModel( name='Spplist', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('stateab', models.CharField(db_index=True, max_length=254)), ('statenm', models.CharField(db_index=True, max_length=254)), ('countynm', models.CharField(db_index=True, max_length=254)), ('plot_idn', models.FloatField(db_index=True)), ('lat', models.FloatField()), ('lon', models.FloatField()), ('elev', models.FloatField(db_index=True)), ('spcd', models.BigIntegerField()), ('genus', models.CharField(db_index=True, max_length=254)), ('species', models.CharField(db_index=True, max_length=254)), ('variety', models.CharField(db_index=True, max_length=254)), ('subspecies', models.CharField(db_index=True, max_length=254)), ('geom', django.contrib.gis.db.models.fields.PointField(db_index=True, srid=4326)), ], ), migrations.CreateModel( name='SppNProduct', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('statecd', models.BigIntegerField(db_index=True)), ('stateab', models.CharField(db_index=True, max_length=254)), ('statenm', models.CharField(db_index=True, max_length=254)), ('countycd', models.BigIntegerField(db_index=True)), ('lat', models.FloatField(db_index=True)), ('lon', models.FloatField(db_index=True)), ('elev', models.FloatField(db_index=True)), ('plot', models.BigIntegerField(db_index=True)), ('plot_id', models.CharField(db_index=True, max_length=254)), ('plotidn', models.BigIntegerField(db_index=True)), ('period', models.BigIntegerField(db_index=True)), ('n_inventor', models.BigIntegerField(db_index=True)), ('sppn', models.BigIntegerField()), ('mean_treed', models.FloatField()), ('mai_basala', models.FloatField()), ('mai_biomas', models.FloatField()), ('geom', django.contrib.gis.db.models.fields.PointField(db_index=True, srid=4326)), ], ), migrations.CreateModel( name='TreeLevel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('study', models.CharField(db_index=True, max_length=254)), ('lat', models.FloatField()), ('long', models.FloatField()), ('plot_id', models.BigIntegerField(db_index=True)), ('plotarea_m', models.BigIntegerField()), ('year', models.BigIntegerField(db_index=True)), ('full_speci', models.CharField(db_index=True, max_length=254)), ('tree_id', models.FloatField(db_index=True)), ('dbhcm', models.FloatField()), ('abundance', models.BigIntegerField(db_index=True)), ('geom', django.contrib.gis.db.models.fields.PointField(db_index=True, srid=4326)), ], ), migrations.CreateModel( name='TreesPerYear', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('statenm', models.CharField(max_length=254)), ('statecd', models.BigIntegerField()), ('stateab', models.CharField(max_length=254)), ('countycd', models.BigIntegerField()), ('plot', models.BigIntegerField()), ('plot_id', models.CharField(db_index=True, max_length=254)), ('plotidn', models.BigIntegerField(db_index=True)), ('subp', models.BigIntegerField(db_index=True)), ('n_inventor', models.BigIntegerField(db_index=True)), ('lat', models.FloatField(db_index=True)), ('lon', models.FloatField(db_index=True)), ('elev', models.FloatField(db_index=True)), ('invyr', models.BigIntegerField(db_index=True)), ('tree', models.FloatField(db_index=True)), ('spcd', models.BigIntegerField(db_index=True)), ('accepted_n', models.CharField(db_index=True, max_length=254)), ('family', models.CharField(db_index=True, max_length=254)), ('dia', models.FloatField(db_index=True)), ('ht_m', models.CharField(db_index=True, max_length=254)), ('ba_m2', models.FloatField(db_index=True)), ('biomass_kg', models.FloatField(db_index=True)), ('geom', django.contrib.gis.db.models.fields.PointField(db_index=True, srid=4326)), ], ), migrations.CreateModel( name='USGrid100km', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('id_original', models.IntegerField(blank=True, null=True)), ('xmini', models.FloatField()), ('xmaxi', models.FloatField()), ('ymini', models.FloatField()), ('ymaxi', models.FloatField()), ('geom', django.contrib.gis.db.models.fields.MultiPolygonField(db_index=True, srid=4326)), ], ), ]
54.044586
114
0.56429
855
8,485
5.415205
0.155556
0.105832
0.166307
0.161555
0.79568
0.725918
0.708855
0.708855
0.574082
0.495896
0
0.025456
0.282381
8,485
156
115
54.391026
0.734932
0.008014
0
0.466216
1
0
0.081174
0
0
0
0
0
0
1
0
false
0
0.02027
0
0.047297
0
0
0
0
null
0
0
1
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
eb723493b5202f058ac4329a39fb495d481860f5
82
py
Python
nordlys/nordlys/logic/er/__init__.py
medtray/MultiEm-RGCN
11c7978273d57242090fa3715207ba18732d7f38
[ "MIT" ]
34
2017-03-22T10:49:51.000Z
2022-03-15T07:20:14.000Z
nordlys/nordlys/logic/er/__init__.py
medtray/MultiEm-RGCN
11c7978273d57242090fa3715207ba18732d7f38
[ "MIT" ]
33
2017-11-08T11:11:34.000Z
2021-11-15T15:39:51.000Z
nordlys/nordlys/logic/er/__init__.py
medtray/MultiEm-RGCN
11c7978273d57242090fa3715207ba18732d7f38
[ "MIT" ]
19
2017-03-22T17:48:42.000Z
2021-03-10T20:52:04.000Z
""" Entity retrieval ================ This is the entity retrieval package. """
10.25
37
0.560976
8
82
5.75
0.75
0.652174
0
0
0
0
0
0
0
0
0
0
0.158537
82
7
38
11.714286
0.666667
0.878049
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
eb7abb516fa2674d478657b8f309db77b718a588
78
py
Python
dlt/viz/__init__.py
dmarnerides/pydlt
b018f75b68af29645d0a5dae10b6d7255e53f867
[ "BSD-3-Clause-Clear" ]
236
2018-01-29T14:19:50.000Z
2022-03-20T08:27:23.000Z
dlt/viz/__init__.py
dmarnerides/pydlt
b018f75b68af29645d0a5dae10b6d7255e53f867
[ "BSD-3-Clause-Clear" ]
null
null
null
dlt/viz/__init__.py
dmarnerides/pydlt
b018f75b68af29645d0a5dae10b6d7255e53f867
[ "BSD-3-Clause-Clear" ]
17
2018-01-30T08:27:48.000Z
2018-10-07T15:30:56.000Z
from .imshow import imshow from . import modules from .csvplot import plot_csv
26
29
0.820513
12
78
5.25
0.583333
0
0
0
0
0
0
0
0
0
0
0
0.141026
78
3
29
26
0.940299
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
eb891f0ff709e2bba13aca23621609a04bf61ee1
238
py
Python
wiki_dump.py
crocopie/wiki-etl
f97894751999fd56b80faf326aa2510e298a7ea9
[ "Apache-2.0" ]
null
null
null
wiki_dump.py
crocopie/wiki-etl
f97894751999fd56b80faf326aa2510e298a7ea9
[ "Apache-2.0" ]
null
null
null
wiki_dump.py
crocopie/wiki-etl
f97894751999fd56b80faf326aa2510e298a7ea9
[ "Apache-2.0" ]
null
null
null
import luigi from parse_wiki_task import ParseWikiTask class WikiDump(luigi.WrapperTask): def requires(self): yield ParseWikiTask(main_lang='ru', trans_lang='en') yield ParseWikiTask(main_lang='en', trans_lang='ru')
26.444444
60
0.735294
31
238
5.451613
0.612903
0.213018
0.260355
0.307692
0
0
0
0
0
0
0
0
0.159664
238
8
61
29.75
0.845
0
0
0
0
0
0.033613
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0
0.666667
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
ebacd7bcc5763542b0f9965f20b361476d65d61e
8,067
py
Python
get_wordVector_byGlove.py
daojiaxu/semeval_11
1877f6b0867986aff8b6c3ae4a52ba8c80c5a69c
[ "Apache-2.0" ]
3
2020-07-24T08:21:29.000Z
2021-02-13T04:10:30.000Z
get_wordVector_byGlove.py
daojiaxu/semeval_11
1877f6b0867986aff8b6c3ae4a52ba8c80c5a69c
[ "Apache-2.0" ]
null
null
null
get_wordVector_byGlove.py
daojiaxu/semeval_11
1877f6b0867986aff8b6c3ae4a52ba8c80c5a69c
[ "Apache-2.0" ]
null
null
null
import os import numpy as np # from semeval.datasets import pre_deal import pre_deal_bert import new_pre_deal from keras.preprocessing import sequence from mxnet.contrib import text from transformers import BertTokenizer import pandas as pd from bert_serving.client import BertClient max_len = 1000 tokenizer = BertTokenizer.from_pretrained("bert-large-uncased") def get_vector(): # 文本 texts = [] list = os.listdir("train-articles") for i in range(0, len(list)): f = open("train-articles/" + list[i], encoding='utf8') texts.append(f.read()) glove_6b50d = text.embedding.create("glove", pretrained_file_name='glove.6B.300d.txt') vectors = [] for i in range(0, len(texts)): a = glove_6b50d.get_vecs_by_tokens(tokenizer.tokenize(texts[i])[0:1200]) b = a.asnumpy() if (a.shape[0] < 1200): x = 1200 - int(a.shape[0]) shape_zeros = np.zeros((x, 300)) vector = np.vstack((b, shape_zeros)) vector = vector.tolist() else: vector = b.tolist() vectors.append(vector) vectors = np.array(vectors) np.save("glove_300d_1200.npy", vectors) return vectors def get_vector_test(): # 文本 texts = [] list = os.listdir("dev-articles") for i in range(0, len(list)): f = open("dev-articles/" + list[i], encoding='utf8') texts.append(f.read()) glove_6b50d = text.embedding.create("glove", pretrained_file_name='glove.6B.300d.txt') vectors = [] for i in range(0, len(texts)): a = glove_6b50d.get_vecs_by_tokens(tokenizer.tokenize(texts[i])[0:1200]) b = a.asnumpy() if (a.shape[0] < 1200): x = 1200 - int(a.shape[0]) shape_zeros = np.zeros((x, 300)) vector = np.vstack((b, shape_zeros)) vector = vector.tolist() else: vector = b.tolist() vectors.append(vector) vectors = np.array(vectors) np.save("glove_test_300d_1200.npy", vectors) return vectors def get_labels_vector(): labels_vector_dict, texts = pre_deal_bert.get_labels_vector() labels_vector = [] for key in labels_vector_dict.keys(): labels_vector.append(labels_vector_dict[key]) labels_vector = sequence.pad_sequences(np.array(labels_vector), maxlen=1200, padding='post') np.save("train_labels_vector_1200.npy", labels_vector) def get_labels_vector_new(): labels_vector_dict = new_pre_deal.get_labels_vector_new() labels_vector = [] for key in labels_vector_dict.keys(): labels_vector.append(labels_vector_dict[key]) labels_vector = sequence.pad_sequences(np.array(labels_vector), maxlen=1200, padding='post') np.save("new_train_labels_vector_1200.npy", labels_vector) def get_bert_labels_vector_new(): labels_vector_dict = new_pre_deal.get_labels_vector_new() labels_vector = [] for key in labels_vector_dict.keys(): labels_vector.append(labels_vector_dict[key]) labels_vector = sequence.pad_sequences(np.array(labels_vector), maxlen=500, padding='post') np.save("new_train_dev_labels_vector_500.npy", labels_vector) def new_get_train_dev_vector(): # 文本 texts = [] list = os.listdir("train_dev_articles") for i in range(0, len(list)): f = open("train_dev_articles/" + list[i], encoding='utf8') texts.append(f.read()) glove_6b50d = text.embedding.create("glove", pretrained_file_name='glove.6B.300d.txt') vectors = [] for i in range(0, len(texts)): a = glove_6b50d.get_vecs_by_tokens(tokenizer.tokenize(texts[i])[0:1200]) b = a.asnumpy() if (a.shape[0] < 1200): x = 1200 - int(a.shape[0]) shape_zeros = np.zeros((x, 300)) vector = np.vstack((b, shape_zeros)) vector = vector.tolist() else: vector = b.tolist() vectors.append(vector) vectors = np.array(vectors) np.save("new_train_dev_glove_300d_1200.npy", vectors) return vectors def new_get_train_dev_vector_bert(): # 文本 texts = [] list = os.listdir("train_dev_articles") for i in range(0, len(list)): f = open("train_dev_articles/" + list[i], encoding='utf8') texts.append(f.read()) bc = BertClient(ip='222.19.197.229', port=5555, port_out=5556, check_version=False) texts_vector = bc.encode(texts) np.save("train_dev_vector_bert_450.npy", texts_vector) return texts_vector def new_get_dev_vector_bert(): # 文本 texts = [] list = os.listdir("dev_articles") for i in range(0, len(list)): f = open("dev_articles/" + list[i], encoding='utf8') texts.append(f.read()) bc = BertClient(ip='222.19.197.228', port=5555, port_out=5556, check_version=False) texts_vector = bc.encode(texts) np.save("dev_vector_bert_500.npy", texts_vector) return texts_vector def get_vector_train_tc(): # 文本 train_articles = pd.read_excel("mapping_TC.xlsx") text_list_train = [] labels_list_train = [] for i in range(0, 6369): text_list_train.append(str(train_articles['Associated_Propaganda'][i])) labels_list_train.append(train_articles['Classification'][i]) glove_6b50d = text.embedding.create("glove", pretrained_file_name='glove.6B.300d.txt') vectors = [] for i in range(0, len(text_list_train)): a = glove_6b50d.get_vecs_by_tokens(tokenizer.tokenize(text_list_train[i])[0:100]) b = a.asnumpy() if (a.shape[0] < 100): x = 100 - int(a.shape[0]) shape_zeros = np.zeros((x, 300)) vector = np.vstack((b, shape_zeros)) vector = vector.tolist() else: vector = b.tolist() vectors.append(vector) vectors = np.array(vectors) np.save("tc_glove_train.npy", vectors) return vectors def get_vector_test_tc(): # 文本 dev_articles = pd.read_excel("TC_dev_predict.xlsx") text_list_dev = [] for i in range(0, 1063): text_list_dev.append(dev_articles['Associated_Propaganda'][i]) glove_6b50d = text.embedding.create("glove", pretrained_file_name='glove.6B.300d.txt') vectors = [] for i in range(0, len(text_list_dev)): a = glove_6b50d.get_vecs_by_tokens(tokenizer.tokenize(text_list_dev[i])[0:100]) b = a.asnumpy() if (a.shape[0] < 100): x = 100 - int(a.shape[0]) shape_zeros = np.zeros((x, 300)) vector = np.vstack((b, shape_zeros)) vector = vector.tolist() else: vector = b.tolist() vectors.append(vector) vectors = np.array(vectors) np.save("tc_glove_test.npy", vectors) return vectors def get_vector_test_final(): # 文本 texts = [] list = os.listdir("test-articles") for i in range(0, len(list)): f = open("test-articles/" + list[i], encoding='utf8') texts.append(f.read()) glove_6b50d = text.embedding.create("glove", pretrained_file_name='glove.6B.300d.txt') vectors = [] for i in range(0, len(texts)): a = glove_6b50d.get_vecs_by_tokens(tokenizer.tokenize(texts[i])[0:1200]) b = a.asnumpy() if (a.shape[0] < 1200): x = 1200 - int(a.shape[0]) shape_zeros = np.zeros((x, 300)) vector = np.vstack((b, shape_zeros)) vector = vector.tolist() else: vector = b.tolist() vectors.append(vector) vectors = np.array(vectors) np.save("glove_final_300d_1200.npy", vectors) return vectors def get_test(): texts_vector = np.load("train_case_512.npy") return texts_vector if __name__ == '__main__': # get_labels_vector_new() #new_get_train_dev_vector() #new_get_train_dev_vector_bert() #get_bert_labels_vector_new() text_vector = get_test()
34.922078
97
0.619561
1,101
8,067
4.298819
0.116258
0.088739
0.017748
0.032538
0.820621
0.790408
0.773505
0.751321
0.695542
0.677794
0
0.048067
0.249535
8,067
231
98
34.922078
0.73373
0.021074
0
0.666667
0
0
0.103606
0.035406
0
0
0
0
0
1
0.063492
false
0
0.047619
0
0.15873
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
ebafec2154ea5714113bc838f9c9da03945df8cf
297
py
Python
user/vistas/widgets/previewImg-boton.py
ZerpaTechnology/occoa
a8c0bd2657bc058801a883109c0ec0d608d04ccc
[ "Apache-2.0" ]
null
null
null
user/vistas/widgets/previewImg-boton.py
ZerpaTechnology/occoa
a8c0bd2657bc058801a883109c0ec0d608d04ccc
[ "Apache-2.0" ]
null
null
null
user/vistas/widgets/previewImg-boton.py
ZerpaTechnology/occoa
a8c0bd2657bc058801a883109c0ec0d608d04ccc
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- print '''<!--Parametros requeridos: input , output , detalle --><div><input type="file" id="'''+str(data['input'])+'''" name="'''+str(data['input'])+'''" multiple /><script type="text/javascript">''' importar(data,"previewImg") print '''</script></div>'''
59.4
199
0.602694
35
297
5.114286
0.714286
0.078212
0.134078
0
0
0
0
0
0
0
0
0.00365
0.077441
297
5
200
59.4
0.649635
0.127946
0
0
0
0
0.658915
0.089147
0
0
0
0
0
0
null
null
0
0.333333
null
null
0.666667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
1
0
0
0
1
0
0
1
0
5
cce65f7d3e00e26009be8c5879a023d7f303a2b0
62
py
Python
_ext/python/crawlab/test/crawlab_result/__init__.py
crawlab-team/crawlab-python-sdk
35f83f8d76046d3ee2700d63e96624ed534c1ca5
[ "BSD-3-Clause" ]
null
null
null
_ext/python/crawlab/test/crawlab_result/__init__.py
crawlab-team/crawlab-python-sdk
35f83f8d76046d3ee2700d63e96624ed534c1ca5
[ "BSD-3-Clause" ]
null
null
null
_ext/python/crawlab/test/crawlab_result/__init__.py
crawlab-team/crawlab-python-sdk
35f83f8d76046d3ee2700d63e96624ed534c1ca5
[ "BSD-3-Clause" ]
null
null
null
from .result_test import * from .result_service_test import *
20.666667
34
0.806452
9
62
5.222222
0.555556
0.425532
0
0
0
0
0
0
0
0
0
0
0.129032
62
2
35
31
0.87037
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
ccf0abd24c38120a7c2b95c5586292b30721dd8a
826
py
Python
wrapper/network.py
link-money/distribution_robot-master
4c35d80b8b74b6549529d147277981d593a24402
[ "MIT" ]
null
null
null
wrapper/network.py
link-money/distribution_robot-master
4c35d80b8b74b6549529d147277981d593a24402
[ "MIT" ]
1
2021-06-01T22:32:25.000Z
2021-06-01T22:32:25.000Z
wrapper/network.py
link-money/distribution_robot-master
4c35d80b8b74b6549529d147277981d593a24402
[ "MIT" ]
null
null
null
# coding: utf-8 from .utils import xdr_hash NETWORKS = {'PUBLIC': 'Fotono Main Net; 2018-8-10', 'TESTNET': 'Fotono Test Network; 2017-1-1', 'LOCAL': 'Fotono Network Main Net; 2018-3-15', 'STELLAR': 'Public Global Stellar Network ; September 2015' } class Network(object): def __init__(self, passphrase=None): if passphrase is None: self.passphrase = NETWORKS['TESTNET'] else: self.passphrase = passphrase def network_id(self): return xdr_hash(self.passphrase.encode()) def test_network(): return Network(NETWORKS['TESTNET']) def live_network(): return Network(NETWORKS['PUBLIC']) def local_network(): return Network(NETWORKS['LOCALNET']) def stellar_network(): return Network(NETWORKS['STELLAR'])
25.030303
71
0.634383
96
826
5.34375
0.4375
0.109162
0.155945
0.218324
0
0
0
0
0
0
0
0.039872
0.24092
826
33
72
25.030303
0.778309
0.015739
0
0
0
0
0.240148
0
0
0
0
0
0
1
0.272727
false
0.227273
0.045455
0.227273
0.590909
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
5
6900aa9695e2be9ad661cf525ca298624f82f1e8
86
py
Python
__init__.py
mmcheng55/Helpers
4ff2547116371b518e246d78e9e5790edba774c8
[ "MIT" ]
null
null
null
__init__.py
mmcheng55/Helpers
4ff2547116371b518e246d78e9e5790edba774c8
[ "MIT" ]
null
null
null
__init__.py
mmcheng55/Helpers
4ff2547116371b518e246d78e9e5790edba774c8
[ "MIT" ]
null
null
null
# Copyright (c) 2020. from .Commander import Commander from .tcp_echo import TCPEcho
21.5
32
0.77907
12
86
5.5
0.75
0
0
0
0
0
0
0
0
0
0
0.054795
0.151163
86
4
33
21.5
0.849315
0.22093
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
696ef5b9ee9abeaa90ff4f84aa36601cce6bd957
6,331
py
Python
scripts/lenet.py
ashishraste/CarND-TrafficSignClassifier
3795c4fbdfd3eb39f6d70392d2042e8e7c49176f
[ "MIT" ]
null
null
null
scripts/lenet.py
ashishraste/CarND-TrafficSignClassifier
3795c4fbdfd3eb39f6d70392d2042e8e7c49176f
[ "MIT" ]
1
2019-03-06T02:19:16.000Z
2019-03-06T02:19:16.000Z
scripts/lenet.py
ashishraste/CarND-TrafficSignClassifier
3795c4fbdfd3eb39f6d70392d2042e8e7c49176f
[ "MIT" ]
null
null
null
import tensorflow as tf from tensorflow.contrib.layers import flatten def conv2d(x, W, b, strides=1): x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='VALID') + b return x def maxpool2d(x, k=2): x = tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='VALID') return x def build_lenet(x, keep_prob=0.5): # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer mu = 0 sigma = 0.1 weights = { 'fw1': tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean=mu, stddev=sigma)), 'fw2': tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean=mu, stddev=sigma)), 'fcw1': tf.Variable(tf.truncated_normal(shape=(400, 120), mean=mu, stddev=sigma)), 'fcw2': tf.Variable(tf.truncated_normal(shape=(120, 84), mean=mu, stddev=sigma)), 'fcw3': tf.Variable(tf.truncated_normal(shape=(84, 43), mean=mu, stddev=sigma)) } biases = { 'b1': tf.Variable(tf.zeros(6)), 'b2': tf.Variable(tf.zeros(16)), 'fcb1': tf.Variable(tf.zeros(120)), 'fcb2': tf.Variable(tf.zeros(84)), 'fcb3': tf.Variable(tf.zeros(43)) } # Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6. conv1 = conv2d(x, weights['fw1'], biases['b1']) # Activation. conv1 = tf.nn.relu(conv1) # Pooling. Input = 28x28x6. Output = 14x14x6. conv1 = maxpool2d(conv1) # Layer 2: Convolutional. Output = 10x10x16. conv2 = conv2d(conv1, weights['fw2'], biases['b2']) # Activation. conv2 = tf.nn.relu(conv2) # Pooling. Input = 10x10x16. Output = 5x5x16. conv2 = maxpool2d(conv2) # Flatten. Input = 5x5x16. Output = 400. conv2 = flatten(conv2) # Layer 3: Fully Connected. Input = 400. Output = 120. conv3 = tf.add(tf.matmul(conv2, weights['fcw1']), biases['fcb1']) # Activation. conv3 = tf.nn.relu(conv3) # Dropout. conv3 = tf.nn.dropout(conv3, keep_prob) # Layer 4: Fully Connected. Input = 120. Output = 84. conv4 = tf.add(tf.matmul(conv3, weights['fcw2']), biases['fcb2']) # Activation. conv4 = tf.nn.relu(conv4) # Dropout. conv4 = tf.nn.dropout(conv4, keep_prob) # Layer 5: Fully Connected. Input = 84. Output = 43. logits = tf.add(tf.matmul(conv4, weights['fcw3']), biases['fcb3']) return logits def build_lenet2(x, keep_prob=0.5): mu = 0 sigma = 0.1 weights = { 'fw1': tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean=mu, stddev=sigma)), 'fw2': tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean=mu, stddev=sigma)), 'fw3': tf.Variable(tf.truncated_normal(shape=(5, 5, 16, 400), mean=mu, stddev=sigma)), 'fcw1': tf.Variable(tf.truncated_normal(shape=(800, 43), mean=mu, stddev=sigma)) } biases = { 'b1': tf.Variable(tf.zeros(6)), 'b2': tf.Variable(tf.zeros(16)), 'b3': tf.Variable(tf.zeros(400)), 'fcb1': tf.Variable(tf.zeros(43)) } ### Stage 0. # Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6. conv1 = conv2d(x, weights['fw1'], biases['b1']) # Activation. conv1 = tf.nn.relu(conv1) # Pooling. Input = 28x28x6. Output = 14x14x6. conv1 = maxpool2d(conv1) ### Stage 1. Outputs from this stage are also passed to the (first) fully-connected layer. # Layer 2: Convolutional. Output = 10x10x16. conv2 = conv2d(conv1, weights['fw2'], biases['b2']) # Activation. conv2 = tf.nn.relu(conv2) # Pooling. Input = 10x10x16. Output = 5x5x16. conv2 = maxpool2d(conv2) layer2 = conv2 # To be used in the classifier. # Layer 3: Convolutional. Input = 5x5x16. Output = 1x1x400 conv3 = conv2d(conv2, weights['fw3'], biases['b3']) # Activation. conv3 = tf.nn.relu(conv3) layer3 = conv3 # To be used in the classifier. # Concat layer2 and layer3 layer2_flat = flatten(layer2) layer3_flat = flatten(layer3) conv3 = tf.concat([layer2_flat, layer3_flat], 1) # Dropout. conv3 = tf.nn.dropout(conv3, keep_prob) # Layer 4: Fully Connected. Input = 800. Output = 43. logits = tf.add(tf.matmul(conv3, weights['fcw1']), biases['fcb1']) return logits def build_lenet3(x, keep_prob=0.5): ''' LeNet architecture with dropout applied for the activations of fully-connected layers. ''' mu = 0 sigma = 0.1 weights = { 'fw1': tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean=mu, stddev=sigma)), 'fw2': tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean=mu, stddev=sigma)), 'fcw1': tf.Variable(tf.truncated_normal(shape=(400, 120), mean=mu, stddev=sigma)), 'fcw2': tf.Variable(tf.truncated_normal(shape=(120, 84), mean=mu, stddev=sigma)), 'fcw3': tf.Variable(tf.truncated_normal(shape=(84, 60), mean=mu, stddev=sigma)), 'fcw4': tf.Variable(tf.truncated_normal(shape=(60, 43), mean=mu, stddev=sigma)) } biases = { 'b1': tf.Variable(tf.zeros(6)), 'b2': tf.Variable(tf.zeros(16)), 'fcb1': tf.Variable(tf.zeros(120)), 'fcb2': tf.Variable(tf.zeros(84)), 'fcb3': tf.Variable(tf.zeros(60)), 'fcb4': tf.Variable(tf.zeros(43)) } # Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6. conv1 = conv2d(x, weights['fw1'], biases['b1']) # Activation. conv1 = tf.nn.relu(conv1) # Pooling. Input = 28x28x6. Output = 14x14x6. conv1 = maxpool2d(conv1) # Layer 2: Convolutional. Output = 10x10x16. conv2 = conv2d(conv1, weights['fw2'], biases['b2']) # Activation. conv2 = tf.nn.relu(conv2) # Pooling. Input = 10x10x16. Output = 5x5x16. conv2 = maxpool2d(conv2) # Flatten. Input = 5x5x16. Output = 400. conv2 = flatten(conv2) # Layer 3: Fully Connected. Input = 400. Output = 120. conv3 = tf.add(tf.matmul(conv2, weights['fcw1']), biases['fcb1']) # Activation. conv3 = tf.nn.relu(conv3) # Dropout. conv3 = tf.nn.dropout(conv3, keep_prob) # Layer 4: Fully Connected. Input = 120. Output = 84. conv4 = tf.add(tf.matmul(conv3, weights['fcw2']), biases['fcb2']) # Activation. conv4 = tf.nn.relu(conv4) # Dropout. conv4 = tf.nn.dropout(conv4, keep_prob) # Layer 5: Fully Connected. Input = 84. Output = 60. conv5 = tf.add(tf.matmul(conv4, weights['fcw3']), biases['fcb3']) # Activation. conv5 = tf.nn.relu(conv5) # Dropout. conv5 = tf.nn.dropout(conv5, keep_prob) # Layer 6: Fully Connected. Input = 60. Output = 43. logits = tf.add(tf.matmul(conv5, weights['fcw4']), biases['fcb4']) return logits
29.584112
112
0.648081
910
6,331
4.472527
0.132967
0.07371
0.088452
0.077396
0.772973
0.759214
0.730221
0.714988
0.706634
0.687469
0
0.092429
0.178013
6,331
213
113
29.723005
0.689662
0.26773
0
0.657407
0
0
0.045484
0
0
0
0
0
0
1
0.046296
false
0
0.018519
0
0.111111
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
15f332295dc5e061c88921ebafeb6a8df2141b1f
85
py
Python
tccli/services/bda/__init__.py
zqfan/tencentcloud-cli
b6ad9fced2a2b340087e4e5522121d405f68b615
[ "Apache-2.0" ]
47
2018-05-31T11:26:25.000Z
2022-03-08T02:12:45.000Z
tccli/services/bda/__init__.py
zqfan/tencentcloud-cli
b6ad9fced2a2b340087e4e5522121d405f68b615
[ "Apache-2.0" ]
23
2018-06-14T10:46:30.000Z
2022-02-28T02:53:09.000Z
tccli/services/bda/__init__.py
zqfan/tencentcloud-cli
b6ad9fced2a2b340087e4e5522121d405f68b615
[ "Apache-2.0" ]
22
2018-10-22T09:49:45.000Z
2022-03-30T08:06:04.000Z
# -*- coding: utf-8 -*- from tccli.services.bda.bda_client import action_caller
21.25
55
0.694118
12
85
4.75
0.916667
0
0
0
0
0
0
0
0
0
0
0.014085
0.164706
85
4
56
21.25
0.788732
0.247059
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c604fdc6ad091f4eff3330220ccd518ff8597916
92
py
Python
server/api/admin.py
monkukui/iNAZO
2ffbf91b7239049d5b4e5192c05e6a33bea8e77e
[ "MIT" ]
null
null
null
server/api/admin.py
monkukui/iNAZO
2ffbf91b7239049d5b4e5192c05e6a33bea8e77e
[ "MIT" ]
null
null
null
server/api/admin.py
monkukui/iNAZO
2ffbf91b7239049d5b4e5192c05e6a33bea8e77e
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import GradeInfo admin.register(GradeInfo)
13.142857
32
0.815217
12
92
6.25
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.130435
92
6
33
15.333333
0.9375
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c63544452846c0de8493d0f6974970d4a06bfe1a
20
py
Python
scraper.py
SUZITD/selenium-youtube-scrapper
cbefc3ddd599b72c00b4ba41a8d9a55690064ac2
[ "MIT" ]
null
null
null
scraper.py
SUZITD/selenium-youtube-scrapper
cbefc3ddd599b72c00b4ba41a8d9a55690064ac2
[ "MIT" ]
null
null
null
scraper.py
SUZITD/selenium-youtube-scrapper
cbefc3ddd599b72c00b4ba41a8d9a55690064ac2
[ "MIT" ]
null
null
null
print ("hell world")
20
20
0.7
3
20
4.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.1
20
1
20
20
0.777778
0
0
0
0
0
0.47619
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
d6815bb63bd50663b4c53f264a0f080b562ebe6d
162
py
Python
ego_objects/__init__.py
ContinualAI/clvision-challenge-2022
e8523d1269646a1c3d5759b546c82d74693ed7fa
[ "MIT" ]
17
2022-02-25T08:38:43.000Z
2022-03-31T01:55:29.000Z
ego_objects/__init__.py
ContinualAI/clvision-challenge-2022
e8523d1269646a1c3d5759b546c82d74693ed7fa
[ "MIT" ]
3
2022-03-23T11:01:38.000Z
2022-03-31T13:45:57.000Z
ego_objects/__init__.py
ContinualAI/clvision-challenge-2022
e8523d1269646a1c3d5759b546c82d74693ed7fa
[ "MIT" ]
4
2022-03-08T05:59:01.000Z
2022-03-21T11:10:31.000Z
from .entries import * from .ego_objects import EgoObjects from .results import EgoObjectsResults from .eval import EgoObjectsEval from .vis import EgoObjectsVis
27
38
0.839506
20
162
6.75
0.6
0
0
0
0
0
0
0
0
0
0
0
0.123457
162
5
39
32.4
0.950704
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d6c2e7c64c3737eb1cb0f381d0a1ac60b97d64e2
91
py
Python
data.py
astronote/astronote-api
2baea5ae7a5fdaaf150fed3bf7014ea4242e4cd7
[ "MIT" ]
null
null
null
data.py
astronote/astronote-api
2baea5ae7a5fdaaf150fed3bf7014ea4242e4cd7
[ "MIT" ]
null
null
null
data.py
astronote/astronote-api
2baea5ae7a5fdaaf150fed3bf7014ea4242e4cd7
[ "MIT" ]
null
null
null
import astronote print(astronote.get_events(date='2017-11-30', lat='-27.7', lon='152.7'))
22.75
72
0.703297
16
91
3.9375
0.875
0
0
0
0
0
0
0
0
0
0
0.176471
0.065934
91
3
73
30.333333
0.564706
0
0
0
0
0
0.21978
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
d6d8b39fcef75444bd27e4f1c96b3e5ff25e4f52
49
py
Python
augmentations_tuner/fastautoaugment/__init__.py
erinfolami/ZazuML
8dbe934c06612dd7917f38090701e3ead0337fb8
[ "MIT" ]
null
null
null
augmentations_tuner/fastautoaugment/__init__.py
erinfolami/ZazuML
8dbe934c06612dd7917f38090701e3ead0337fb8
[ "MIT" ]
null
null
null
augmentations_tuner/fastautoaugment/__init__.py
erinfolami/ZazuML
8dbe934c06612dd7917f38090701e3ead0337fb8
[ "MIT" ]
null
null
null
from .search import search as augmentation_search
49
49
0.877551
7
49
6
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.102041
49
1
49
49
0.954545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
ba3ab01c2c73e51cc407b84ab72b53404a84956a
17,166
py
Python
daisychain/channel_clock/tests/test_triggerinputview.py
daisychainme/daisychain
245d0041f1efd2d6cc110f60aebf2e2dee98bcdb
[ "MIT" ]
5
2016-09-27T10:44:59.000Z
2022-03-29T08:16:44.000Z
daisychain/channel_clock/tests/test_triggerinputview.py
daisychainme/daisychain
245d0041f1efd2d6cc110f60aebf2e2dee98bcdb
[ "MIT" ]
null
null
null
daisychain/channel_clock/tests/test_triggerinputview.py
daisychainme/daisychain
245d0041f1efd2d6cc110f60aebf2e2dee98bcdb
[ "MIT" ]
null
null
null
from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.http import HttpRequest from mock import MagicMock, patch from unittest import skip from channel_clock.views import (TriggerInputView, RequiredInputMissing, InputInvalid, TriggerInputView) from core.models import Channel, Trigger, TriggerInput from recipes.tests.test_utils import RecipeTestCase class BaseViewTestCase(RecipeTestCase): fixtures = ['channel_clock/fixtures/initial_data.json'] def setUp(self): self.url = reverse("recipes:new_step3") self.channel = Channel.objects.get(name="Clock") self.max_muster = User.objects.create_user("max_muster") self.client.force_login(self.max_muster) def assertMessage(self, response, message): for msg in response.context['messages']: if message == str(msg): return raise AssertionError("Message not found: '{}'".format(message)) class TriggerInputViewTest(BaseViewTestCase): @patch("channel_clock.views.redirect") @patch("django.contrib.messages.error") def test_dispatch__trigger_does_not_exist(self, mock_error, mock_redirect): request = "test_request_not_used" draft = { 'trigger_id': -99 } TriggerInputView().dispatch(request, draft) mock_error.assert_called_once_with( request, "The selected trigger does not exist") mock_redirect.assert_called_once_with("recipes:new_step2") def test_dispatch__valid_trigger_types(self): for trigger_type in range(1,6): trigger = Trigger.objects.get(channel=self.channel, trigger_type=trigger_type) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) self.client.get(self.url) self.client.post(self.url) def test__save_and_redirect(self): draft = {} # every year trigger trigger = Trigger.objects.get(channel=self.channel, trigger_type=5) inputs = { "Time": "15:30", "Date": "10-04" } draft_expected = { 'recipe_conditions': [] } trigger_input1 = TriggerInput.objects.get(trigger=trigger, name="Date") trigger_input2 = TriggerInput.objects.get(trigger=trigger, name="Time") draft_expected['recipe_conditions'] = [{ 'id': trigger_input1.id, 'value': "10-04" }, { 'id': trigger_input2.id, 'value': "15:30" }] TriggerInputView()._save_and_redirect(None, draft, trigger, inputs) condition_sorter = lambda x: x['id'] draft['recipe_conditions'].sort(key=condition_sorter) draft_expected['recipe_conditions'].sort(key=condition_sorter) self.assertEqual(draft, draft_expected) @patch("django.contrib.messages.error") def test__validate_input__missing_required(self, mock_error): request = MagicMock() request.POST = MagicMock() request.POST.getlist = MagicMock(return_value=[]) message_expected = "Please select a test_key value" with self.assertRaises(RequiredInputMissing): TriggerInputView()._validate_input(request, 'test_key', None) request.POST.getlist.assert_called_once_with('test_key') mock_error.assert_called_once_with(request, message_expected) @patch("django.contrib.messages.error") def test__validate_input__missing_required_custom_msg(self, mock_error): request = MagicMock() request.POST = MagicMock() request.POST.getlist = MagicMock(return_value=[]) message = "test_message" with self.assertRaises(RequiredInputMissing): TriggerInputView()._validate_input(request, 'test_key', None, message_required=message) request.POST.getlist.assert_called_once_with('test_key') mock_error.assert_called_once_with(request, message) @patch("django.contrib.messages.error") def test__validate_input__missing_not_required(self, mock_error): request = MagicMock() request.POST = MagicMock() request.POST.getlist = MagicMock(return_value=[]) result = TriggerInputView()._validate_input(request, 'test_key', None, required=False) self.assertIsNone(result) request.POST.getlist.assert_called_once_with('test_key') mock_error.assert_not_called() def test__validate_input__return_single_value(self): request = MagicMock() request.POST = MagicMock() request.POST.getlist = MagicMock(return_value=['post_val_1']) condition = MagicMock(return_value=True) result = TriggerInputView()._validate_input(request, 'test_key', condition) self.assertEqual(result, 'post_val_1') request.POST.getlist.assert_called_once_with('test_key') def test__validate_input__return_list(self): result_expected = ["value_1", "value_2", "value_3"] request = MagicMock() request.POST = MagicMock() request.POST.getlist = MagicMock(return_value=result_expected) condition = MagicMock(return_value=True) result = TriggerInputView()._validate_input(request, 'test_key', condition) self.assertEqual(result, result_expected) request.POST.getlist.assert_called_once_with('test_key') @patch("django.contrib.messages.error") def test__validate_input__condition_false(self, mock_error): result_expected = ["value_1"] request = MagicMock() request.POST = MagicMock() request.POST.getlist = MagicMock(return_value=result_expected) message_expected = ("Invalid selection. Please stay within the defined" " test_key values") condition = MagicMock(return_value=False) with self.assertRaises(InputInvalid): TriggerInputView()._validate_input(request, 'test_key', condition) request.POST.getlist.assert_called_once_with('test_key') mock_error.assert_called_once_with(request, message_expected) class EveryDayTriggerInputViewTest(BaseViewTestCase): def test_post__valid_values(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=1) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "13", "minute": "30" } res = self.client.post(self.url, data=data) self.assertRedirect('recipes:new_step4', res) def test_post__invalid_hour(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=1) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "24", "minute": "30" } res = self.client.post(self.url, data=data) self.assertMessage(res, ('Invalid selection. Please stay within the ' 'defined hour values')) def test_post__invalid_minute(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=1) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "13", "minute": "60" } res = self.client.post(self.url, data=data) self.assertMessage(res, ('Invalid selection. Please stay within the ' 'defined minute values')) class EveryHourTriggerInputViewTest(BaseViewTestCase): def test_post__valid_minute(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=2) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "minute": "30" } res = self.client.post(self.url, data=data) self.assertRedirect("recipes:new_step4", res) def test_post__invalid_minute(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=2) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "minute": "60" } res = self.client.post(self.url, data=data) self.assertMessage(res, ('Invalid selection. Please stay within the ' 'defined minute values')) class EveryWeekdayTriggerInputViewTest(BaseViewTestCase): def test_post__valid_data(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=3) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "15", "minute": "30", "weekday": ("1", "3", "5") } res = self.client.post(self.url, data=data) self.assertRedirect("recipes:new_step4", res) def test_post__invalid_hour(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=3) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "25", "minute": "30", "weekday": ("1", "3", "5") } res = self.client.post(self.url, data=data) self.assertMessage(res, ('Invalid selection. Please stay within the ' 'defined hour values')) def test_post__invalid_minute(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=3) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "15", "minute": "60", "weekday": ("1", "3", "5") } res = self.client.post(self.url, data=data) self.assertMessage(res, ('Invalid selection. Please stay within the ' 'defined minute values')) def test_post__invalid_weekday(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=3) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "15", "minute": "30", "weekday": "9" } res = self.client.post(self.url, data=data) self.assertMessage(res, ('Invalid selection. Please stay within the ' 'defined weekday values')) class EveryMonthTriggerInputViewTest(BaseViewTestCase): def test_post__valid_data(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=4) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "15", "minute": "30", "day": "20" } res = self.client.post(self.url, data=data) self.assertRedirect("recipes:new_step4", res) def test_post__invalid_hour(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=4) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "25", "minute": "30", "day": "28" } res = self.client.post(self.url, data=data) self.assertMessage(res, ('Invalid selection. Please stay within the ' 'defined hour values')) def test_post__invalid_minute(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=4) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "15", "minute": "60", "day": "28" } res = self.client.post(self.url, data=data) self.assertMessage(res, ('Invalid selection. Please stay within the ' 'defined minute values')) def test_post__invalid_day(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=4) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "15", "minute": "30", "day": "29" } res = self.client.post(self.url, data=data) self.assertMessage(res, ('Invalid selection. Please stay within the ' 'defined day values')) class EveryYearTriggerInputViewTest(BaseViewTestCase): def test_post__valid_data(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=5) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "15", "minute": "30", "day": "28", "month": "7" } res = self.client.post(self.url, data=data) self.assertRedirect("recipes:new_step4", res) def test_post__invalid_hour(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=5) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "25", "minute": "30", "day": "28", "month": "7" } res = self.client.post(self.url, data=data) self.assertMessage(res, ('Invalid selection. Please stay within the ' 'defined hour values')) def test_post__invalid_minute(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=5) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "15", "minute": "60", "day": "28", "month": "7" } res = self.client.post(self.url, data=data) self.assertMessage(res, ('Invalid selection. Please stay within the ' 'defined minute values')) def test_post__invalid_day(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=5) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "15", "minute": "30", "day": "32", "month": "7" } res = self.client.post(self.url, data=data) self.assertMessage(res, ('Invalid selection. Please stay within the ' 'defined day values')) def test_post__invalid_month(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=5) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "15", "minute": "30", "day": "28", "month": "13" } res = self.client.post(self.url, data=data) self.assertMessage(res, ('Invalid selection. Please stay within the ' 'defined month values')) def test_post__leapday(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=5) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "15", "minute": "30", "day": "29", "month": "2" } res = self.client.post(self.url, data=data) self.assertMessage(res, ('You selected the leap day. Please confirm ' 'your choice.')) def test_post__invalid_day_month_combination(self): trigger = Trigger.objects.get(channel=self.channel, trigger_type=5) self.set_recipe_draft({ 'trigger_channel_id': self.channel.id, 'trigger_id': trigger.id }) data = { "hour": "15", "minute": "30", "day": "31", "month": "4" } res = self.client.post(self.url, data=data) self.assertMessage(res, 'Invalid selection. This date does not exist')
31.097826
79
0.578644
1,813
17,166
5.255378
0.097628
0.050798
0.048489
0.055416
0.781696
0.763224
0.746851
0.724286
0.724286
0.703925
0
0.014616
0.306478
17,166
551
80
31.154265
0.78572
0.001049
0
0.6775
0
0
0.157996
0.013648
0
0
0
0
0.1025
1
0.0775
false
0
0.02
0
0.12
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
ba797504be402325028e126a96ecfda0344fb4ac
231
py
Python
backend/reset_migrations.py
sud0su/django-ecommerce-api
cfd5d9d94965759c9c0130ade345f24c36fd96ee
[ "MIT" ]
null
null
null
backend/reset_migrations.py
sud0su/django-ecommerce-api
cfd5d9d94965759c9c0130ade345f24c36fd96ee
[ "MIT" ]
3
2020-02-12T00:16:45.000Z
2021-06-10T21:31:15.000Z
backend/reset_migrations.py
sud0su/django-ecommerce-api
cfd5d9d94965759c9c0130ade345f24c36fd96ee
[ "MIT" ]
null
null
null
from subprocess import call call('find . | grep -E "(__pycache__|\.pyc|\.pyo$)" | xargs rm -rf', shell=True) call('find . -path "*/migrations/*.py" -not -name "__init__.py" -delete', shell=True) # call('rm db.sqlite3', shell=True)
46.2
85
0.658009
34
231
4.235294
0.705882
0.1875
0.180556
0
0
0
0
0
0
0
0
0.004926
0.121212
231
5
86
46.2
0.704434
0.142857
0
0
0
0
0.634518
0.142132
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
ba977a4485b6603411b301ab758cc01269f91bd8
217
py
Python
dask_cloudprovider/__init__.py
samuel-co/dask-cloudprovider
d441f5b95a85ac731c35420489b6df14fa2883ab
[ "BSD-3-Clause" ]
1
2019-10-26T02:15:06.000Z
2019-10-26T02:15:06.000Z
dask_cloudprovider/__init__.py
samuel-co/dask-cloudprovider
d441f5b95a85ac731c35420489b6df14fa2883ab
[ "BSD-3-Clause" ]
null
null
null
dask_cloudprovider/__init__.py
samuel-co/dask-cloudprovider
d441f5b95a85ac731c35420489b6df14fa2883ab
[ "BSD-3-Clause" ]
1
2021-01-15T10:43:53.000Z
2021-01-15T10:43:53.000Z
from . import config from .providers.aws.ecs import ECSCluster, FargateCluster __all__ = ["ECSCluster", "FargateCluster"] from ._version import get_versions __version__ = get_versions()["version"] del get_versions
21.7
57
0.788018
25
217
6.36
0.52
0.207547
0.226415
0
0
0
0
0
0
0
0
0
0.115207
217
9
58
24.111111
0.828125
0
0
0
0
0
0.142857
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
baab82e0cdf076394d543431989b9e852ceddf2b
192
py
Python
backend/apps/organizations/admin.py
hovedstyret/indok-web
598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159
[ "MIT" ]
3
2021-11-18T09:29:14.000Z
2022-01-13T20:12:11.000Z
backend/apps/organizations/admin.py
rubberdok/indok-web
598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159
[ "MIT" ]
277
2022-01-17T18:16:44.000Z
2022-03-31T19:44:04.000Z
backend/apps/organizations/admin.py
hovedstyret/indok-web
598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159
[ "MIT" ]
null
null
null
from django.contrib import admin from apps.organizations.models import Organization, Membership # Register your models here. admin.site.register(Organization) admin.site.register(Membership)
27.428571
62
0.838542
24
192
6.708333
0.583333
0.111801
0.21118
0
0
0
0
0
0
0
0
0
0.088542
192
6
63
32
0.92
0.135417
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
baba184c9b3040a94a217e7eff6038c50bf3ab55
176
py
Python
src/noteburst/worker/functions/__init__.py
lsst-sqre/noteburst
ff08698ca8c35f69c8c840037b9e35d43e9737de
[ "MIT" ]
null
null
null
src/noteburst/worker/functions/__init__.py
lsst-sqre/noteburst
ff08698ca8c35f69c8c840037b9e35d43e9737de
[ "MIT" ]
5
2021-10-31T23:33:19.000Z
2022-03-21T19:43:56.000Z
src/noteburst/worker/functions/__init__.py
lsst-sqre/noteburst
ff08698ca8c35f69c8c840037b9e35d43e9737de
[ "MIT" ]
null
null
null
__all__ = ["ping", "nbexec", "run_python", "keep_alive"] from .keepalive import keep_alive from .nbexec import nbexec from .ping import ping from .runpython import run_python
25.142857
56
0.767045
25
176
5.08
0.44
0.141732
0.204724
0
0
0
0
0
0
0
0
0
0.130682
176
6
57
29.333333
0.830065
0
0
0
0
0
0.170455
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
bacc2c0a66002302237b81eaeb9c06f12cf4f232
43
py
Python
tutorial/run.py
Corvince/mesa-viz
ca6dc2e26b61ea152eff526015a8fc5659ed23ab
[ "Apache-2.0" ]
9
2020-07-16T07:35:51.000Z
2022-03-29T09:39:44.000Z
tutorial/run.py
Corvince/mesa-viz
ca6dc2e26b61ea152eff526015a8fc5659ed23ab
[ "Apache-2.0" ]
2
2022-01-22T17:51:27.000Z
2022-02-13T18:06:28.000Z
tutorial/run.py
Corvince/mesa-viz
ca6dc2e26b61ea152eff526015a8fc5659ed23ab
[ "Apache-2.0" ]
null
null
null
from turtle import server server.launch()
10.75
25
0.790698
6
43
5.666667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.139535
43
3
26
14.333333
0.918919
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
242b4ce8d67d4db78b0546d128c8f3fb437d9a5c
81
py
Python
src/boogie/models/expressions/__init__.py
pencil-labs/django-boogie
79b759617785ce33a24cb6013266a0810b24801c
[ "BSD-3-Clause" ]
null
null
null
src/boogie/models/expressions/__init__.py
pencil-labs/django-boogie
79b759617785ce33a24cb6013266a0810b24801c
[ "BSD-3-Clause" ]
null
null
null
src/boogie/models/expressions/__init__.py
pencil-labs/django-boogie
79b759617785ce33a24cb6013266a0810b24801c
[ "BSD-3-Clause" ]
2
2021-09-16T22:11:35.000Z
2021-09-25T12:28:27.000Z
from .f_object import F from .functions import concat, coalesce, greatest, least
27
56
0.802469
12
81
5.333333
0.75
0
0
0
0
0
0
0
0
0
0
0
0.135802
81
2
57
40.5
0.914286
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
79f451eda53250d1b88604adee69d8d7f4e3f006
59
py
Python
ubermagtable/util/__init__.py
ubermag/oommfodt
f0fadbcd990e742647269ee1c2b94302dc4e0def
[ "BSD-3-Clause" ]
null
null
null
ubermagtable/util/__init__.py
ubermag/oommfodt
f0fadbcd990e742647269ee1c2b94302dc4e0def
[ "BSD-3-Clause" ]
null
null
null
ubermagtable/util/__init__.py
ubermag/oommfodt
f0fadbcd990e742647269ee1c2b94302dc4e0def
[ "BSD-3-Clause" ]
null
null
null
"""Utility tools""" from .util import columns, data, units
19.666667
38
0.711864
8
59
5.25
1
0
0
0
0
0
0
0
0
0
0
0
0.135593
59
2
39
29.5
0.823529
0.220339
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
79f5e504af79f3837417f8c279e2fe2f669ca2a7
283
py
Python
up/utils/general/fake_linklink.py
ModelTC/EOD
164bff80486e9ae6a095a97667b365c46ceabd86
[ "Apache-2.0" ]
196
2021-10-30T05:15:36.000Z
2022-03-30T18:43:40.000Z
up/utils/general/fake_linklink.py
ModelTC/EOD
164bff80486e9ae6a095a97667b365c46ceabd86
[ "Apache-2.0" ]
12
2021-10-30T11:33:28.000Z
2022-03-31T14:22:58.000Z
up/utils/general/fake_linklink.py
ModelTC/EOD
164bff80486e9ae6a095a97667b365c46ceabd86
[ "Apache-2.0" ]
23
2021-11-01T07:26:17.000Z
2022-03-27T05:55:37.000Z
class link(object): class nn(object): class SyncBatchNorm2d(object): ... class syncbnVarMode_t(object): class L2(object): ... class linklink(object): class nn(object): class SyncBatchNorm2d(object): ...
14.15
38
0.533569
25
283
6
0.36
0.513333
0.173333
0.253333
0.6
0.6
0.6
0
0
0
0
0.016304
0.349823
283
19
39
14.894737
0.798913
0
0
0.636364
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0.727273
0
1
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
5
030b094f53ff1eb074c435e11879c12402e11b9b
4,465
py
Python
resources/test_cases/python/cryptography/TestRule2.py
stg-tud/licma
b899e6e682f7716d19e79d6ce7b73c28c6efd4cf
[ "MIT" ]
5
2021-09-13T11:24:13.000Z
2022-03-18T21:56:58.000Z
resources/test_cases/python/cryptography/TestRule2.py
stg-tud/licma
b899e6e682f7716d19e79d6ce7b73c28c6efd4cf
[ "MIT" ]
null
null
null
resources/test_cases/python/cryptography/TestRule2.py
stg-tud/licma
b899e6e682f7716d19e79d6ce7b73c28c6efd4cf
[ "MIT" ]
1
2021-09-13T06:02:20.000Z
2021-09-13T06:02:20.000Z
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from cryptography.hazmat.backends import default_backend from Crypto.Random import random g_backend = default_backend() g_iv1 = b"1234567812345678" g_iv2 = bytes("1234567812345678", "utf8") def p_example1_hard_coded1(key, data): cipher = Cipher(algorithms.AES(key), modes.CBC(b"1234567812345678"), backend=g_backend) encryptor = cipher.encryptor() cipher_text = encryptor.update(data) + encryptor.finalize() return cipher_text def p_example2_hard_coded2(key, data): cipher = Cipher(algorithms.AES(key), modes.CBC(bytes("1234567812345678", "utf8")), backend=g_backend) encryptor = cipher.encryptor() cipher_text = encryptor.update(data) + encryptor.finalize() return cipher_text def p_example3_local_variable1(key, data): iv = b"1234567812345678" cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=g_backend) encryptor = cipher.encryptor() cipher_text = encryptor.update(data) + encryptor.finalize() return cipher_text def p_example4_local_variable2(key, data): iv = bytes("1234567812345678", "utf8") cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=g_backend) encryptor = cipher.encryptor() cipher_text = encryptor.update(data) + encryptor.finalize() return cipher_text def p_example5_nested_local_variable1(key, data): iv1 = b"1234567812345678" iv2 = iv1 iv3 = iv2 cipher = Cipher(algorithms.AES(key), modes.CBC(iv3), backend=g_backend) encryptor = cipher.encryptor() cipher_text = encryptor.update(data) + encryptor.finalize() return cipher_text def p_example6_nested_local_variable2(key, data): iv1 = bytes("1234567812345678", "utf8") iv2 = iv1 iv3 = iv2 cipher = Cipher(algorithms.AES(key), modes.CBC(iv3), backend=g_backend) encryptor = cipher.encryptor() cipher_text = encryptor.update(data) + encryptor.finalize() return cipher_text def p_example_method_call(key, iv, data): cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=g_backend) encryptor = cipher.encryptor() cipher_text = encryptor.update(data) + encryptor.finalize() return cipher_text def p_example_nested_method_call(key, iv, data): return p_example_method_call(key, iv, data) def p_example7_direct_method_call1(key, data): iv = b"1234567812345678" return p_example_method_call(key, iv, data) def p_example8_direct_method_call2(key, data): iv = bytes("1234567812345678", "utf8") return p_example_method_call(key, iv, data) def p_example9_nested_method_call1(key, data): iv = b"1234567812345678" return p_example_nested_method_call(key, iv, data) def p_example10_nested_method_call2(key, data): iv = bytes("1234567812345678", "utf8") return p_example_nested_method_call(key, iv, data) def p_example11_direct_g_variable_access1(key, data): cipher = Cipher(algorithms.AES(key), modes.CBC(g_iv1), backend=g_backend) encryptor = cipher.encryptor() cipher_text = encryptor.update(data) + encryptor.finalize() return cipher_text def p_example12_direct_g_variable_access2(key, data): cipher = Cipher(algorithms.AES(key), modes.CBC(g_iv2), backend=g_backend) encryptor = cipher.encryptor() cipher_text = encryptor.update(data) + encryptor.finalize() return cipher_text def p_example13_indirect_g_variable_access1(key, data): iv = g_iv1 cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=g_backend) encryptor = cipher.encryptor() cipher_text = encryptor.update(data) + encryptor.finalize() return cipher_text def p_example14_indirect_g_variable_access2(key, data): iv = g_iv2 cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=g_backend) encryptor = cipher.encryptor() cipher_text = encryptor.update(data) + encryptor.finalize() return cipher_text def p_example15_warning_parameter_not_resolvable(key, iv, data): cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=g_backend) encryptor = cipher.encryptor() cipher_text = encryptor.update(data) + encryptor.finalize() return cipher_text def n_example1_cbc(key, data): iv = random.getrandbits(16).to_bytes(16, 'big') cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=g_backend) encryptor = cipher.encryptor() cipher_text = encryptor.update(data) + encryptor.finalize() return cipher_text
32.830882
105
0.737514
588
4,465
5.369048
0.127551
0.123535
0.090592
0.102946
0.782705
0.749762
0.738993
0.735825
0.726956
0.699715
0
0.067528
0.150952
4,465
135
106
33.074074
0.765233
0
0
0.65625
0
0
0.049048
0
0
0
0
0
0
1
0.1875
false
0
0.03125
0.010417
0.40625
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
034b1da501e4d0660e0d720a2efb93cf7ddf9d30
42
py
Python
battleship/ui/__init__.py
mikolasan/battleship
f6cd30f13595f8ce36bb97db194e546ad477021c
[ "MIT" ]
null
null
null
battleship/ui/__init__.py
mikolasan/battleship
f6cd30f13595f8ce36bb97db194e546ad477021c
[ "MIT" ]
1
2020-08-25T15:44:04.000Z
2020-08-25T15:44:04.000Z
battleship/ui/__init__.py
mikolasan/battleship
f6cd30f13595f8ce36bb97db194e546ad477021c
[ "MIT" ]
1
2020-01-29T04:59:19.000Z
2020-01-29T04:59:19.000Z
'''Custom UI elements for PyGame engine'''
42
42
0.738095
6
42
5.166667
1
0
0
0
0
0
0
0
0
0
0
0
0.119048
42
1
42
42
0.837838
0.857143
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
cefd61735be85970a4ffc7c57d75caf1ca113ab7
128
py
Python
projects/Django Multiselect Form/Formapp/admin.py
Manasranjanpati/Intern_training
28a1ef3f55cbf85d6525b76e98ed3fdb1663d5e6
[ "MIT" ]
null
null
null
projects/Django Multiselect Form/Formapp/admin.py
Manasranjanpati/Intern_training
28a1ef3f55cbf85d6525b76e98ed3fdb1663d5e6
[ "MIT" ]
null
null
null
projects/Django Multiselect Form/Formapp/admin.py
Manasranjanpati/Intern_training
28a1ef3f55cbf85d6525b76e98ed3fdb1663d5e6
[ "MIT" ]
null
null
null
from django.contrib import admin # Register your models here. from .models import EnquiryData admin.site.register(EnquiryData)
21.333333
32
0.820313
17
128
6.176471
0.647059
0
0
0
0
0
0
0
0
0
0
0
0.117188
128
6
33
21.333333
0.929204
0.203125
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3028cf562a304245009d19b6e5b6d2fc95af54a8
10,943
py
Python
lib/net/deeplabv3plus.py
FUTUREEEEEE/semantic-segmentation-codebase
39a91695813484af430778da3b7032a98d26835b
[ "MIT" ]
37
2021-01-12T06:37:23.000Z
2022-03-23T08:14:09.000Z
lib/net/deeplabv3plus.py
FUTUREEEEEE/semantic-segmentation-codebase
39a91695813484af430778da3b7032a98d26835b
[ "MIT" ]
8
2021-01-17T07:53:24.000Z
2021-11-16T08:55:48.000Z
lib/net/deeplabv3plus.py
FUTUREEEEEE/semantic-segmentation-codebase
39a91695813484af430778da3b7032a98d26835b
[ "MIT" ]
6
2021-03-14T11:09:30.000Z
2021-08-24T11:40:53.000Z
# ---------------------------------------- # Written by Yude Wang # ---------------------------------------- import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init from net.backbone import build_backbone from net.operators import ASPP from utils.registry import NETS @NETS.register_module class deeplabv3plus(nn.Module): def __init__(self, cfg, batchnorm=nn.BatchNorm2d, **kwargs): super(deeplabv3plus, self).__init__() self.cfg = cfg self.batchnorm = batchnorm self.backbone = build_backbone(cfg.MODEL_BACKBONE, pretrained=cfg.MODEL_BACKBONE_PRETRAIN, norm_layer=self.batchnorm, **kwargs) input_channel = self.backbone.OUTPUT_DIM self.aspp = ASPP(dim_in=input_channel, dim_out=cfg.MODEL_ASPP_OUTDIM, rate=[0, 6, 12, 18], bn_mom = cfg.TRAIN_BN_MOM, has_global = cfg.MODEL_ASPP_HASGLOBAL, batchnorm = self.batchnorm) #self.dropout1 = nn.Dropout(0.5) indim = self.backbone.MIDDLE_DIM self.shortcut_conv = nn.Sequential( nn.Conv2d(indim, cfg.MODEL_SHORTCUT_DIM, 3, 1, padding=1, bias=False), batchnorm(cfg.MODEL_SHORTCUT_DIM, momentum=cfg.TRAIN_BN_MOM, affine=True), nn.ReLU(inplace=True), ) self.cat_conv = nn.Sequential( nn.Conv2d(cfg.MODEL_ASPP_OUTDIM+cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1,bias=False), batchnorm(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM, affine=True), nn.ReLU(inplace=True), #nn.Dropout(0.5), nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1,bias=False), batchnorm(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM, affine=True), nn.ReLU(inplace=True), #nn.Dropout(0.1), ) self.cls_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0) for m in self.modules(): if m not in self.backbone.modules(): # if isinstance(m, nn.Conv2d): # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if isinstance(m, batchnorm): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if cfg.MODEL_FREEZEBN: self.freeze_bn() def forward(self, x, getf=False, interpolate=True): N,C,H,W = x.size() l1, l2, l3, l4 = self.backbone(x) feature_aspp = self.aspp(l4) #feature_aspp = self.dropout1(feature_aspp) feature_shallow = self.shortcut_conv(l1) n,c,h,w = feature_shallow.size() feature_aspp = F.interpolate(feature_aspp,(h,w),mode='bilinear',align_corners=True) feature_cat = torch.cat([feature_aspp,feature_shallow],1) feature = self.cat_conv(feature_cat) result = self.cls_conv(feature) result = F.interpolate(result, (H,W), mode='bilinear',align_corners=True) if getf: if interpolate: feature = F.interpolate(feature, (H,W), mode='bilinear', align_corners=True) return result, feature else: return result def freeze_bn(self): for m in self.modules(): if isinstance(m, self.batchnorm): m.eval() def unfreeze_bn(self): for m in self.modules(): if isinstance(m, self.batchnorm): m.train() @NETS.register_module class deeplabv3plus2d(deeplabv3plus): def __init__(self, cfg, batchnorm=nn.BatchNorm2d, **kwargs): super(deeplabv3plus2d, self).__init__(cfg, batchnorm=batchnorm, **kwargs) self.compress_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, 2, 1, 1, padding=0, bias=False) self.cls_conv = nn.Conv2d(2, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0, bias=False) for m in self.modules(): if m not in self.backbone.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if isinstance(m, batchnorm): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if cfg.MODEL_FREEZEBN: self.freeze_bn() def forward(self, x, getf=False, interpolate=True): N,C,H,W = x.size() l1, l2, l3, l4 = self.backbone(x) feature_aspp = self.aspp(l4) #feature_aspp = self.dropout1(feature_aspp) feature_shallow = self.shortcut_conv(l1) n,c,h,w = feature_shallow.size() feature_aspp = F.interpolate(feature_aspp,(h,w),mode='bilinear',align_corners=True) feature_cat = torch.cat([feature_aspp,feature_shallow],1) feature = self.cat_conv(feature_cat) feature = self.compress_conv(feature) result = self.cls_conv(feature) result = F.interpolate(result, (H,W), mode='bilinear',align_corners=True) if getf: if interpolate: feature = F.interpolate(feature, (H,W), mode='bilinear', align_corners=True) return result, feature else: return result @NETS.register_module class deeplabv3plusInsNorm(deeplabv3plus): def __init__(self, cfg, batchnorm=nn.BatchNorm2d, **kwargs): super(deeplabv3plusInsNorm, self).__init__(cfg, batchnorm, **kwargs) self.cat_conv = nn.Sequential( nn.Conv2d(cfg.MODEL_ASPP_OUTDIM+cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1,bias=False), nn.InstanceNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM, affine=True), nn.ReLU(inplace=True), nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1,bias=False), nn.InstanceNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM, affine=True), nn.ReLU(inplace=True), ) for m in self.modules(): if m not in self.backbone.modules(): if isinstance(m, (batchnorm, nn.InstanceNorm2d)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if cfg.MODEL_FREEZEBN: self.freeze_bn() @NETS.register_module class deeplabv3plusAux(deeplabv3plus): def __init__(self, cfg, batchnorm=nn.BatchNorm2d, **kwargs): super(deeplabv3plusAux, self).__init__(cfg, batchnorm, **kwargs) input_channel = self.backbone.OUTPUT_DIM self.seghead2 = nn.Sequential( nn.Conv2d(input_channel//4, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=False), batchnorm(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM, affine=True), nn.ReLU(inplace=True), nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0) ) self.seghead3 = nn.Sequential( nn.Conv2d(input_channel//2, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=False), batchnorm(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM, affine=True), nn.ReLU(inplace=True), nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0) ) self.seghead4 = nn.Sequential( nn.Conv2d(input_channel, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=False), batchnorm(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM, affine=True), nn.ReLU(inplace=True), nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0) ) #self.cls_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0, bias=False) for m in self.modules(): if m not in self.backbone.modules(): # if isinstance(m, nn.Conv2d): # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if isinstance(m, batchnorm): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if cfg.MODEL_FREEZEBN: self.freeze_bn() def forward(self, x, getf=False, interpolate=True): N,C,H,W = x.size() l1, l2, l3, l4 = self.backbone(x) feature_aspp = self.aspp(l4) feature_shallow = self.shortcut_conv(l1) n,c,h,w = feature_shallow.size() feature_aspp = F.interpolate(feature_aspp,(h,w),mode='bilinear',align_corners=True) feature_cat = torch.cat([feature_aspp,feature_shallow],1) feature = self.cat_conv(feature_cat) result = self.cls_conv(feature) result = F.interpolate(result, (H,W), mode='bilinear',align_corners=True) seg2 = F.interpolate(self.seghead2(l2), (H,W), mode='bilinear', align_corners=True) seg3 = F.interpolate(self.seghead3(l3), (H,W), mode='bilinear', align_corners=True) seg4 = F.interpolate(self.seghead4(l4), (H,W), mode='bilinear', align_corners=True) if getf: if interpolate: feature = F.interpolate(feature, (H,W), mode='bilinear', align_corners=True) return [result, seg2, seg3, seg4], feature else: return [result, seg2, seg3, seg4] def orth_init(self): self.cls_conv.weight = torch.nn.Parameter(torch.eye(n=self.cfg.MODEL_NUM_CLASSES, m=self.cfg.MODEL_ASPP_OUTDIM).unsqueeze(-1).unsqueeze(-1)) self.seghead2[-1].weight = torch.nn.Parameter(torch.eye(n=self.cfg.MODEL_NUM_CLASSES, m=self.cfg.MODEL_ASPP_OUTDIM).unsqueeze(-1).unsqueeze(-1)) self.seghead3[-1].weight = torch.nn.Parameter(torch.eye(n=self.cfg.MODEL_NUM_CLASSES, m=self.cfg.MODEL_ASPP_OUTDIM).unsqueeze(-1).unsqueeze(-1)) self.seghead4[-1].weight = torch.nn.Parameter(torch.eye(n=self.cfg.MODEL_NUM_CLASSES, m=self.cfg.MODEL_ASPP_OUTDIM).unsqueeze(-1).unsqueeze(-1)) print('deeplabv3plusAux orth_init() finished') def orth_reg(self): module_list = [self.cls_conv, self.seghead2[-1], self.seghead3[-1], self.seghead4[-1]] loss_reg = 0 for m in module_list: w = m.weight.squeeze(-1).squeeze(-1) w_norm = torch.norm(w, dim=1, keepdim=True) w = w/w_norm matrix = torch.matmul(w, w.transpose(0,1)) loss_reg += torch.mean(matrix*(1-torch.eye(self.cfg.MODEL_NUM_CLASSES).to(0))) return loss_reg @NETS.register_module class deeplabv3plusAuxSigmoid(deeplabv3plusAux): def __init__(self, cfg, batchnorm=nn.BatchNorm2d, **kwargs): super(deeplabv3plusAuxSigmoid, self).__init__(cfg, batchnorm, **kwargs) for m in self.modules(): if m not in self.backbone.modules() and isinstance(m, nn.ReLU): m = nn.Sigmoid() @NETS.register_module class deeplabv3plusAuxReLUSigmoid(deeplabv3plusAux): def __init__(self, cfg, batchnorm=nn.BatchNorm2d, **kwargs): super(deeplabv3plusAuxReLUSigmoid, self).__init__(cfg, batchnorm, **kwargs) for m in self.modules(): if isinstance(m, nn.ReLU): m = nn.Sequential( nn.ReLU(inplace=True), nn.Sigmoid() ) @NETS.register_module class deeplabv3plusNorm(deeplabv3plus): def __init__(self, cfg, batchnorm=nn.BatchNorm2d, **kwargs): super(deeplabv3plusNorm, self).__init__(cfg, batchnorm, **kwargs) self.cls_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_NUM_CLASSES, 1, 1, padding=0, bias=False) def forward(self, x, getf=False, interpolate=True): N,C,H,W = x.size() l1, l2, l3, l4 = self.backbone(x) feature_aspp = self.aspp(l4) #feature_aspp = self.dropout1(feature_aspp) feature_shallow = self.shortcut_conv(l1) n,c,h,w = feature_shallow.size() feature_aspp = F.interpolate(feature_aspp,(h,w),mode='bilinear',align_corners=True) feature_cat = torch.cat([feature_aspp,feature_shallow],1) feature = self.cat_conv(feature_cat) feature_norm = torch.norm(feature, dim=1, keepdim=True).detach() feature = feature/feature_norm conv_norm = torch.norm(self.cls_conv.weight, dim=1, keepdim=True).detach() conv_norm = conv_norm.permute(1,0,2,3) result = self.cls_conv(feature)/conv_norm result = F.interpolate(result, (H,W), mode='bilinear',align_corners=True) if getf: if interpolate: feature = F.interpolate(feature, (H,W), mode='bilinear', align_corners=True) return result, feature else: return result
39.648551
146
0.720369
1,654
10,943
4.573761
0.092503
0.056048
0.049174
0.071381
0.785063
0.757832
0.723728
0.711831
0.710377
0.685658
0
0.021865
0.130677
10,943
275
147
39.792727
0.773363
0.054555
0
0.604348
0
0
0.016262
0
0
0
0
0
0
1
0.065217
false
0
0.034783
0
0.169565
0.004348
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
30358809035a2b0abd579cb9ed6221d760a5c0ca
78
py
Python
CodeWars/7 Kyu/Big Factorial.py
anubhab-code/Competitive-Programming
de28cb7d44044b9e7d8bdb475da61e37c018ac35
[ "MIT" ]
null
null
null
CodeWars/7 Kyu/Big Factorial.py
anubhab-code/Competitive-Programming
de28cb7d44044b9e7d8bdb475da61e37c018ac35
[ "MIT" ]
null
null
null
CodeWars/7 Kyu/Big Factorial.py
anubhab-code/Competitive-Programming
de28cb7d44044b9e7d8bdb475da61e37c018ac35
[ "MIT" ]
null
null
null
import math def factorial(n): if n >= 0: return math.factorial(n)
15.6
32
0.602564
12
78
3.916667
0.666667
0.425532
0
0
0
0
0
0
0
0
0
0.017857
0.282051
78
5
32
15.6
0.821429
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
30660185725f989ac2981b589b8cbb0f96bbf852
143
py
Python
net_promoter_score/apps.py
yunojuno/django-nps
a7f904f85f17f1f7735193e0b9aeb1010ecf9feb
[ "MIT" ]
3
2016-06-21T21:56:19.000Z
2019-10-02T13:04:37.000Z
net_promoter_score/apps.py
yunojuno/django-nps
a7f904f85f17f1f7735193e0b9aeb1010ecf9feb
[ "MIT" ]
5
2016-02-22T14:05:44.000Z
2020-06-03T18:32:09.000Z
net_promoter_score/apps.py
yunojuno/django-nps
a7f904f85f17f1f7735193e0b9aeb1010ecf9feb
[ "MIT" ]
4
2016-03-27T02:51:28.000Z
2017-07-05T16:20:07.000Z
from django.apps import AppConfig class NpsConfig(AppConfig): name = "net_promoter_score" verbose_name = "NPS (Net Promoter Score)"
17.875
45
0.734266
18
143
5.666667
0.722222
0.215686
0.313725
0
0
0
0
0
0
0
0
0
0.181818
143
7
46
20.428571
0.871795
0
0
0
0
0
0.293706
0
0
0
0
0
0
1
0
false
0
0.25
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
5
0628e280feaf964bc295d8c944e5a3204e22e7de
93
py
Python
nyamuk/__init__.py
MasterScott/nyamuk
ac4c6028de288a4c8e0b332ae16eae889deb643d
[ "BSD-2-Clause" ]
49
2015-01-27T15:06:31.000Z
2022-02-18T13:51:48.000Z
nyamuk/__init__.py
MasterScott/nyamuk
ac4c6028de288a4c8e0b332ae16eae889deb643d
[ "BSD-2-Clause" ]
10
2015-03-19T13:24:33.000Z
2019-03-01T10:06:23.000Z
nyamuk/__init__.py
MasterScott/nyamuk
ac4c6028de288a4c8e0b332ae16eae889deb643d
[ "BSD-2-Clause" ]
19
2015-01-27T15:13:29.000Z
2021-05-23T13:43:52.000Z
from nyamuk import Nyamuk from event import * import nyamuk_const as NC
11.625
33
0.634409
12
93
4.833333
0.583333
0.413793
0
0
0
0
0
0
0
0
0
0
0.354839
93
7
34
13.285714
0.966667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
0657fd07cead5d60fbd9d4817041a0e5f6ab9766
4,656
py
Python
tests/shortcuts/test_dialogs.py
ffaraone/interrogatio
8b66e7fe73d14bfda38cc2eb3aecb3291e4afda1
[ "BSD-3-Clause" ]
5
2019-02-19T13:10:39.000Z
2022-03-04T19:11:04.000Z
tests/shortcuts/test_dialogs.py
ffaraone/interrogatio
8b66e7fe73d14bfda38cc2eb3aecb3291e4afda1
[ "BSD-3-Clause" ]
11
2020-03-24T16:58:41.000Z
2021-12-14T10:19:17.000Z
tests/shortcuts/test_dialogs.py
ffaraone/interrogatio
8b66e7fe73d14bfda38cc2eb3aecb3291e4afda1
[ "BSD-3-Clause" ]
2
2019-05-31T08:36:26.000Z
2020-12-18T17:58:50.000Z
import pytest from interrogatio.shortcuts import dialogs @pytest.mark.parametrize( ('func', 'kwargs', 'expected_kwargs'), ( ( dialogs.yes_no_dialog, { 'title': 'title', 'text': 'text', 'yes_text': 'yes_text', 'no_text': 'no_text', }, { 'title': 'title', 'text': 'text', 'yes_text': 'yes_text', 'no_text': 'no_text', }, ), ( dialogs.button_dialog, { 'title': 'title', 'text': 'text', }, { 'title': 'title', 'text': 'text', 'buttons': [], }, ), ( dialogs.button_dialog, { 'title': 'title', 'text': 'text', 'buttons': ['btn1', 'btn2'], }, { 'title': 'title', 'text': 'text', 'buttons': ['btn1', 'btn2'], }, ), ( dialogs.input_dialog, { 'title': 'title', 'text': 'text', 'ok_text': 'ok_text', 'cancel_text': 'cancel_text', }, { 'title': 'title', 'text': 'text', 'ok_text': 'ok_text', 'cancel_text': 'cancel_text', 'completer': None, 'password': False, }, ), ( dialogs.input_dialog, { 'title': 'title', 'text': 'text', 'ok_text': 'ok_text', 'cancel_text': 'cancel_text', 'completer': 'completer', 'password': True, }, { 'title': 'title', 'text': 'text', 'ok_text': 'ok_text', 'cancel_text': 'cancel_text', 'completer': 'completer', 'password': True, }, ), ( dialogs.message_dialog, { 'title': 'title', 'text': 'text', 'ok_text': 'ok_text', }, { 'title': 'title', 'text': 'text', 'ok_text': 'ok_text', }, ), ( dialogs.radiolist_dialog, { 'title': 'title', 'text': 'text', 'ok_text': 'ok_text', 'cancel_text': 'cancel_text', }, { 'title': 'title', 'text': 'text', 'ok_text': 'ok_text', 'cancel_text': 'cancel_text', 'values': None, }, ), ( dialogs.radiolist_dialog, { 'title': 'title', 'text': 'text', 'ok_text': 'ok_text', 'cancel_text': 'cancel_text', 'values': ['a', 'b'], }, { 'title': 'title', 'text': 'text', 'ok_text': 'ok_text', 'cancel_text': 'cancel_text', 'values': ['a', 'b'], }, ), ( dialogs.progress_dialog, { 'title': 'title', 'text': 'text', }, { 'title': 'title', 'text': 'text', 'run_callback': None, }, ), ( dialogs.progress_dialog, { 'title': 'title', 'text': 'text', 'run_callback': 'a function', }, { 'title': 'title', 'text': 'text', 'run_callback': 'a function', }, ), ), ) def test_dialogs(mocker, func, kwargs, expected_kwargs): mocked = mocker.patch( f'interrogatio.shortcuts.dialogs.pt_{func.__name__}', ) mocker.patch( 'interrogatio.shortcuts.dialogs.for_dialog', return_value='a style', ) func(**kwargs) assert mocked.mock_calls[0].kwargs == { **expected_kwargs, 'style': 'a style', } kwargs['style'] = 'another style' func(**kwargs) assert mocked.mock_calls[1].kwargs == { **expected_kwargs, 'style': 'another style', }
25.582418
61
0.339562
310
4,656
4.867742
0.177419
0.132538
0.185553
0.238569
0.7389
0.730285
0.722995
0.586481
0.536117
0.436713
0
0.002653
0.514175
4,656
181
62
25.723757
0.664456
0
0
0.549133
0
0
0.234107
0.01933
0
0
0
0
0.011561
1
0.00578
false
0.017341
0.011561
0
0.017341
0
0
0
0
null
0
1
1
0
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
06634912ed64f2731e0d96887d1c4e5fb7882c7b
85
py
Python
layerserver_databaselayer/__init__.py
aroiginfraplan/giscube-admin
b7f3131b0186f847f3902df97f982cb288b16a49
[ "BSD-3-Clause" ]
5
2018-06-07T12:54:35.000Z
2022-01-14T10:38:38.000Z
layerserver_databaselayer/__init__.py
aroiginfraplan/giscube-admin
b7f3131b0186f847f3902df97f982cb288b16a49
[ "BSD-3-Clause" ]
140
2018-06-18T10:27:28.000Z
2022-03-23T09:53:15.000Z
layerserver_databaselayer/__init__.py
aroiginfraplan/giscube-admin
b7f3131b0186f847f3902df97f982cb288b16a49
[ "BSD-3-Clause" ]
1
2021-04-13T11:20:54.000Z
2021-04-13T11:20:54.000Z
default_app_config = 'layerserver_databaselayer.apps.LayerserverDatabaselayerConfig'
42.5
84
0.905882
7
85
10.571429
1
0
0
0
0
0
0
0
0
0
0
0
0.035294
85
1
85
85
0.902439
0
0
0
0
0
0.717647
0.717647
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
068e2a238f71d0359ee7ad3d1178fc8a5230fd31
2,181
py
Python
scopetools/_count.py
SingleronBio/SCOPE-tools
b2552c9e04b1a86d9d8610f670cc33622545d6f9
[ "Apache-2.0" ]
14
2020-05-26T03:48:32.000Z
2021-07-14T03:55:54.000Z
scopetools/_count.py
SingleronBio/CeleScope
b2552c9e04b1a86d9d8610f670cc33622545d6f9
[ "Apache-2.0" ]
2
2020-06-22T12:44:13.000Z
2020-06-28T04:41:52.000Z
scopetools/_count.py
SingleronBio/CeleScope
b2552c9e04b1a86d9d8610f670cc33622545d6f9
[ "Apache-2.0" ]
2
2020-06-23T07:15:14.000Z
2020-08-01T07:23:34.000Z
# -*- coding: utf-8 -*- import pandas as pd def umi_reads_downsample(seq_df): """ :param seq_df: :return: saturations """ saturations = pd.DataFrame(columns=['percent', 'median_gene_num', 'saturation']).set_index('percent') all_seq_df = seq_df.reset_index().set_index(['Barcode', 'geneID', 'UMI', 'mark']).index.repeat(seq_df['count']).to_frame().set_index(['Barcode']) saturations.loc[0, :] = [0, 0] for i in range(1, 11): sample_df = all_seq_df.sample(frac=i / 10) sample_df = sample_df.loc[sample_df['mark'] > 0] total = sample_df['UMI'].count() gene_num_median = sample_df.pivot_table(index='Barcode', aggfunc={'geneID': 'nunique'})['geneID'].median() sample_df = sample_df.pivot_table(index=['Barcode', 'geneID', 'UMI'], aggfunc={'UMI': 'count'}) repeat = sample_df.loc[sample_df['UMI'] > 1, 'UMI'].sum() saturation = repeat / total saturations.loc[i / 10, :] = [gene_num_median, saturation] return saturations def umi_count_downsample(seq_df): """ :param seq_df: :return: saturations """ saturations = pd.DataFrame(columns=['percent', 'median_gene_num', 'saturation']).set_index('percent') all_seq_df = seq_df.reset_index().set_index(['Barcode', 'geneID', 'UMI', 'mark']).index.repeat(seq_df['count']).to_frame().set_index(['Barcode']) saturations.loc[0, :] = [0, 0] for i in range(1, 11): sample_df = all_seq_df.sample(frac=i / 10) sample_df = sample_df.loc[sample_df['mark'] > 0] tmp = sample_df.pivot_table(index=['Barcode', 'geneID', 'UMI'], aggfunc={'mark': 'count'}).reset_index().set_index(['Barcode']) total = tmp.pivot_table(index=['Barcode'], aggfunc={'mark': 'count'})['mark'].sum() repeat = tmp[tmp['mark'] > 1].pivot_table(index=['Barcode'], aggfunc={'mark': 'count'})['mark'].sum() sample_df_pivot = sample_df.pivot_table(index=['Barcode'], aggfunc={'UMI': 'count', 'geneID': 'nunique'}) gene_num_median = sample_df_pivot['geneID'].median() saturation = repeat / total saturations.loc[i / 10, :] = [gene_num_median, saturation] return saturations
47.413043
149
0.63182
289
2,181
4.525952
0.179931
0.110092
0.059633
0.100917
0.840214
0.806575
0.776758
0.720183
0.720183
0.58104
0
0.013873
0.173774
2,181
45
150
48.466667
0.711987
0.0431
0
0.6
0
0
0.155914
0
0
0
0
0
0
1
0.066667
false
0
0.033333
0
0.166667
0
0
0
0
null
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
230995753fce9a552444d274719b8eedbe37be81
48
py
Python
atmos_space_flight/Python/rotevolve.py
als0052/AtmosSpaceDynamics
acf20f4ba320f55bf7e33d959539e7938a4b24d2
[ "CNRI-Python" ]
null
null
null
atmos_space_flight/Python/rotevolve.py
als0052/AtmosSpaceDynamics
acf20f4ba320f55bf7e33d959539e7938a4b24d2
[ "CNRI-Python" ]
null
null
null
atmos_space_flight/Python/rotevolve.py
als0052/AtmosSpaceDynamics
acf20f4ba320f55bf7e33d959539e7938a4b24d2
[ "CNRI-Python" ]
null
null
null
#!/usr/bin/env python # Filename: rotevolve.py
12
24
0.708333
7
48
4.857143
1
0
0
0
0
0
0
0
0
0
0
0
0.125
48
3
25
16
0.809524
0.895833
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
231b5e5caaf15952a391926d6258cd0abea00a9d
42
py
Python
user/common/config.py
spradeepv/2019-04-friendbook-user
745604a3cda8b15bd99b714178fcf45d969f102c
[ "MIT" ]
1
2019-04-23T05:40:45.000Z
2019-04-23T05:40:45.000Z
user/common/config.py
spradeepv/2019-04-friendbook-user
745604a3cda8b15bd99b714178fcf45d969f102c
[ "MIT" ]
null
null
null
user/common/config.py
spradeepv/2019-04-friendbook-user
745604a3cda8b15bd99b714178fcf45d969f102c
[ "MIT" ]
null
null
null
import os DB_HOST= os.getenv('DB_HOST')
8.4
29
0.714286
8
42
3.5
0.625
0.428571
0
0
0
0
0
0
0
0
0
0
0.142857
42
4
30
10.5
0.777778
0
0
0
0
0
0.170732
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
88b8372979f7ed1292c8cab3e47e7f413f264a2d
2,803
py
Python
insights/parsers/rhsm_conf.py
skateman/insights-core
e7cd3001ffc2558757b9e7759dbe27b8b29f4bac
[ "Apache-2.0" ]
1
2021-11-08T16:25:01.000Z
2021-11-08T16:25:01.000Z
insights/parsers/rhsm_conf.py
ahitacat/insights-core
0ba58dbe5edceef0bd4a74c1caf6b826381ccda5
[ "Apache-2.0" ]
null
null
null
insights/parsers/rhsm_conf.py
ahitacat/insights-core
0ba58dbe5edceef0bd4a74c1caf6b826381ccda5
[ "Apache-2.0" ]
null
null
null
""" rhsm.conf - File /etc/rhsm/rhsm.conf ==================================== """ from insights.core import IniConfigFile from insights.core.plugins import parser from insights.specs import Specs @parser(Specs.rhsm_conf) class RHSMConf(IniConfigFile): """ Parses content of "/etc/rhsm/rhsm.conf". Typical content of "/etc/rhsm/rhsm.conf" is:: # Unified Entitlement Platform Configuration [server] # Server hostname: hostname = subscription.rhn.redhat.com # Server prefix: prefix = /subscription # Server port: port = 443 # Set to 1 to disable certificate validation: insecure = 0 # Set the depth of certs which should be checked # when validating a certificate ssl_verify_depth = 3 # an http proxy server to use proxy_hostname = # port for http proxy server proxy_port = # user name for authenticating to an http proxy, if needed proxy_user = # password for basic http proxy auth, if needed proxy_password = [rhsm] # Content base URL: baseurl= https://cdn.redhat.com # Server CA certificate location: ca_cert_dir = /etc/rhsm/ca/ # Default CA cert to use when generating yum repo configs: repo_ca_cert = %(ca_cert_dir)sredhat-uep.pem # Where the certificates should be stored productCertDir = /etc/pki/product entitlementCertDir = /etc/pki/entitlement consumerCertDir = /etc/pki/consumer # Manage generation of yum repositories for subscribed content: manage_repos = 1 # Refresh repo files with server overrides on every yum command full_refresh_on_yum = 0 # If set to zero, the client will not report the package profile to # the subscription management service. report_package_profile = 1 # The directory to search for subscription manager plugins pluginDir = /usr/share/rhsm-plugins # The directory to search for plugin configuration files pluginConfDir = /etc/rhsm/pluginconf.d [rhsmcertd] # Interval to run cert check (in minutes): certCheckInterval = 240 # Interval to run auto-attach (in minutes): autoAttachInterval = 1440 Examples: >>> type(conf) <class 'insights.parsers.rhsm_conf.RHSMConf'> >>> conf.sections() ['server', 'rhsm', 'rhsmcertd'] >>> conf.has_option('rhsm', 'ca_cert_dir') True >>> conf.get("rhsm", "baseurl") 'https://cdn.redhat.com' >>> conf.get("rhsm", "pluginDir") '/usr/share/rhsm-plugins' >>> conf.getboolean("rhsm", "manage_repos") True """ pass
28.313131
75
0.610774
321
2,803
5.258567
0.454829
0.028436
0.01955
0.026659
0.117299
0.028436
0
0
0
0
0
0.008069
0.292544
2,803
98
76
28.602041
0.843167
0.836604
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.166667
0.5
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
5
88c58743d839711af7cf6baf5e866d1a88904c9f
135
py
Python
Core/core.py
danvitelli15/FantasyDraftHost
d60ab1846675a66cef670649442af7fc22c84a2c
[ "MIT" ]
null
null
null
Core/core.py
danvitelli15/FantasyDraftHost
d60ab1846675a66cef670649442af7fc22c84a2c
[ "MIT" ]
null
null
null
Core/core.py
danvitelli15/FantasyDraftHost
d60ab1846675a66cef670649442af7fc22c84a2c
[ "MIT" ]
null
null
null
import Core.context as db import Core.repository as repo players = None #List() def getPlayers(): return players repo.getById(1)
15
30
0.740741
20
135
5
0.75
0.2
0
0
0
0
0
0
0
0
0
0.008929
0.17037
135
9
31
15
0.883929
0.044444
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0.166667
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
88dd2fa282c683cc14f61efc494e917078783365
105
py
Python
onmt/metrics/__init__.py
KaijuML/PARENTing-rl
98d20e1899e0ff3a9a7a6bb3e50ec28ff0b3b700
[ "Apache-2.0" ]
8
2020-10-29T16:39:36.000Z
2021-04-28T19:04:40.000Z
onmt/metrics/__init__.py
KaijuML/PARENTing-rl
98d20e1899e0ff3a9a7a6bb3e50ec28ff0b3b700
[ "Apache-2.0" ]
2
2021-01-12T09:44:38.000Z
2021-03-30T19:42:46.000Z
onmt/metrics/__init__.py
KaijuML/PARENTing-rl
98d20e1899e0ff3a9a7a6bb3e50ec28ff0b3b700
[ "Apache-2.0" ]
1
2021-11-16T09:15:46.000Z
2021-11-16T09:15:46.000Z
"""All metrics than can be used for training with RL""" from onmt.metrics.parent import PARENTLossCompute
52.5
55
0.8
16
105
5.25
0.9375
0
0
0
0
0
0
0
0
0
0
0
0.12381
105
2
56
52.5
0.913043
0.466667
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
00120a5965dc42b4a4afed18e2f98e61e74f1934
967
py
Python
sdks/python/test/test_PasswordUpdateRequest.py
Brantone/appcenter-sdks
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
[ "MIT" ]
null
null
null
sdks/python/test/test_PasswordUpdateRequest.py
Brantone/appcenter-sdks
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
[ "MIT" ]
6
2019-10-23T06:38:53.000Z
2022-01-22T07:57:58.000Z
sdks/python/test/test_PasswordUpdateRequest.py
Brantone/appcenter-sdks
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
[ "MIT" ]
2
2019-10-23T06:31:05.000Z
2021-08-21T17:32:47.000Z
# coding: utf-8 """ App Center Client Microsoft Visual Studio App Center API # noqa: E501 OpenAPI spec version: preview Contact: benedetto.abbenanti@gmail.com Project Repository: https://github.com/b3nab/appcenter-sdks """ from __future__ import absolute_import import unittest import appcenter_sdk from PasswordUpdateRequest.clsPasswordUpdateRequest import PasswordUpdateRequest # noqa: E501 from appcenter_sdk.rest import ApiException class TestPasswordUpdateRequest(unittest.TestCase): """PasswordUpdateRequest unit test stubs""" def setUp(self): pass def tearDown(self): pass def testPasswordUpdateRequest(self): """Test PasswordUpdateRequest""" # FIXME: construct object with mandatory attributes with example values # model = appcenter_sdk.models.clsPasswordUpdateRequest.PasswordUpdateRequest() # noqa: E501 pass if __name__ == '__main__': unittest.main()
24.175
101
0.731127
98
967
7.05102
0.632653
0.034732
0.083936
0
0
0
0
0
0
0
0
0.014121
0.194416
967
39
102
24.794872
0.872914
0.468459
0
0.214286
0
0
0.016949
0
0
0
0
0.025641
0
1
0.214286
false
0.428571
0.357143
0
0.642857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
1
1
0
1
0
0
5
002a8b97b43b3a4c766592ee698b84f14d4e38ba
113
py
Python
libs/blocks/blocks/__init__.py
dendisuhubdy/attention-lvcsr
598d487c118e66875fdd625baa84ed29d283b800
[ "MIT" ]
1,067
2015-05-16T23:39:15.000Z
2019-02-10T13:33:00.000Z
libs/blocks/blocks/__init__.py
shenshenzhanzhan/attention-lvcsr
598d487c118e66875fdd625baa84ed29d283b800
[ "MIT" ]
577
2015-05-16T18:52:53.000Z
2018-11-27T15:31:09.000Z
libs/blocks/blocks/__init__.py
shenshenzhanzhan/attention-lvcsr
598d487c118e66875fdd625baa84ed29d283b800
[ "MIT" ]
379
2015-05-21T03:24:04.000Z
2019-01-29T02:55:00.000Z
"""The blocks library for parametrized Theano ops.""" import blocks.version __version__ = blocks.version.version
28.25
53
0.79646
14
113
6.142857
0.642857
0.302326
0.465116
0
0
0
0
0
0
0
0
0
0.106195
113
3
54
37.666667
0.851485
0.415929
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
00542bda6bc52e0952ffe878ab7f6a8d9ed664ed
43
py
Python
data/micro-benchmark/imports/import_all/main.py
vitsalis/pycg-evaluation
ce37eb5668465b0c17371914e863d699826447ee
[ "Apache-2.0" ]
121
2020-12-16T20:31:37.000Z
2022-03-21T20:32:43.000Z
data/micro-benchmark/imports/import_all/main.py
vitsalis/pycg-evaluation
ce37eb5668465b0c17371914e863d699826447ee
[ "Apache-2.0" ]
24
2021-03-13T00:04:00.000Z
2022-03-21T17:28:11.000Z
data/micro-benchmark/imports/import_all/main.py
vitsalis/pycg-evaluation
ce37eb5668465b0c17371914e863d699826447ee
[ "Apache-2.0" ]
19
2021-03-23T10:58:47.000Z
2022-03-24T19:46:50.000Z
from from_module import * func1() func2()
8.6
25
0.72093
6
43
5
0.833333
0
0
0
0
0
0
0
0
0
0
0.055556
0.162791
43
4
26
10.75
0.777778
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
cc84c1b3c001099a8e7f45535073bca4927ff92d
12
py
Python
main_dqn.py
Leonardo767/gridworld-learning
4c48e276378828cc308c83a858d719c29e8dfd80
[ "MIT" ]
null
null
null
main_dqn.py
Leonardo767/gridworld-learning
4c48e276378828cc308c83a858d719c29e8dfd80
[ "MIT" ]
null
null
null
main_dqn.py
Leonardo767/gridworld-learning
4c48e276378828cc308c83a858d719c29e8dfd80
[ "MIT" ]
null
null
null
print('hd')
6
11
0.583333
2
12
3.5
1
0
0
0
0
0
0
0
0
0
0
0
0.083333
12
1
12
12
0.636364
0
0
0
0
0
0.166667
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
ccbc284314eaf650b0c0e1f2f6a1d036addc56c9
62
py
Python
exporters/__init__.py
jt6562/GrownMemory
b07652210454abb98d7a896be9adbbf2452df621
[ "MIT" ]
null
null
null
exporters/__init__.py
jt6562/GrownMemory
b07652210454abb98d7a896be9adbbf2452df621
[ "MIT" ]
null
null
null
exporters/__init__.py
jt6562/GrownMemory
b07652210454abb98d7a896be9adbbf2452df621
[ "MIT" ]
null
null
null
# encoding: utf-8 from dir_exporter import DirectoryExporter
15.5
42
0.822581
8
62
6.25
1
0
0
0
0
0
0
0
0
0
0
0.018519
0.129032
62
3
43
20.666667
0.907407
0.241935
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
ccc78c769fdf688ef99ce1f34d3d94343b044937
94
py
Python
__init__.py
pgbreen/numsph
aea5bbf979464aa2bbf52768a44ee9ca711b2810
[ "MIT" ]
null
null
null
__init__.py
pgbreen/numsph
aea5bbf979464aa2bbf52768a44ee9ca711b2810
[ "MIT" ]
null
null
null
__init__.py
pgbreen/numsph
aea5bbf979464aa2bbf52768a44ee9ca711b2810
[ "MIT" ]
null
null
null
from .numsph import sph, alp, gegenbauer, car2sph from .tester import testgeg,testsph,testall
31.333333
49
0.808511
13
94
5.846154
0.846154
0
0
0
0
0
0
0
0
0
0
0.012048
0.117021
94
2
50
47
0.903614
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
aec8617dc5843e83a61962c55e6a001a750de5b6
197
py
Python
metnet/layers/__init__.py
ValterFallenius/metnet
7cde48a7b5fc0b69a8ce9083f934949362620fd5
[ "MIT" ]
null
null
null
metnet/layers/__init__.py
ValterFallenius/metnet
7cde48a7b5fc0b69a8ce9083f934949362620fd5
[ "MIT" ]
null
null
null
metnet/layers/__init__.py
ValterFallenius/metnet
7cde48a7b5fc0b69a8ce9083f934949362620fd5
[ "MIT" ]
null
null
null
from .ConditionTime import ConditionTime from .ConvGRU import ConvGRU from .DownSampler import DownSampler from .Preprocessor import MetNetPreprocessor from .TimeDistributed import TimeDistributed
32.833333
44
0.873096
20
197
8.6
0.4
0
0
0
0
0
0
0
0
0
0
0
0.101523
197
5
45
39.4
0.971751
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9dd691159a881201923a6e7b999a71849804f40e
28
py
Python
kpe/BertKPE/MyCode/functions/add_folder/__init__.py
thunlp/COVID19IRQA
fe359ce12ce38fd74ccc004cc524ec6011580023
[ "MIT" ]
32
2020-03-26T17:03:54.000Z
2021-09-10T08:30:48.000Z
kpe/BertKPE/MyCode/functions/add_folder/__init__.py
thunlp/COVID19IRQA
fe359ce12ce38fd74ccc004cc524ec6011580023
[ "MIT" ]
1
2020-04-06T16:35:12.000Z
2020-04-13T07:08:14.000Z
kpe/BertKPE/MyCode/functions/add_folder/__init__.py
thunlp/COVID19IRQA
fe359ce12ce38fd74ccc004cc524ec6011580023
[ "MIT" ]
6
2020-03-28T05:07:22.000Z
2021-03-04T01:46:00.000Z
# from .fileloader import *
14
27
0.714286
3
28
6.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.178571
28
1
28
28
0.869565
0.892857
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
d19470208aa19cfb966783e0568d5224375b9d24
36
py
Python
tests/__init__.py
calcite/onacol
4e4a9af6c61318d2e449840d98b1cd24251123bd
[ "MIT" ]
5
2021-07-26T08:20:23.000Z
2021-12-16T20:46:53.000Z
tests/__init__.py
calcite/onacol
4e4a9af6c61318d2e449840d98b1cd24251123bd
[ "MIT" ]
1
2021-08-30T14:23:23.000Z
2021-08-30T14:23:23.000Z
tests/__init__.py
calcite/onacol
4e4a9af6c61318d2e449840d98b1cd24251123bd
[ "MIT" ]
null
null
null
"""Unit test package for onacol."""
18
35
0.666667
5
36
4.8
1
0
0
0
0
0
0
0
0
0
0
0
0.138889
36
1
36
36
0.774194
0.805556
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5