hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
13401b3d12862c9698e3809b10e9efdb7752b74a
67
py
Python
test/test_hello_example.py
lcskrishna/python-c-extension
c8c0ec684cdb579a8b54629fd82e7635f90ac781
[ "MIT" ]
4
2017-12-14T04:00:10.000Z
2018-03-31T07:12:13.000Z
test/test_hello_example.py
lcskrishna/python-c-extension
c8c0ec684cdb579a8b54629fd82e7635f90ac781
[ "MIT" ]
null
null
null
test/test_hello_example.py
lcskrishna/python-c-extension
c8c0ec684cdb579a8b54629fd82e7635f90ac781
[ "MIT" ]
null
null
null
#!/usr/bin/python import helloworld print helloworld.helloworld()
13.4
29
0.791045
8
67
6.625
0.75
0
0
0
0
0
0
0
0
0
0
0
0.089552
67
4
30
16.75
0.868852
0.238806
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0.5
null
null
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
1
0
5
1341ecdb52b5a1bece0e13c5f147b121833c2172
36
py
Python
src/hello.py
BrunoS3D/Python-Study
50fb095480dd5aeef7e0a458b5af4efca8a98929
[ "MIT" ]
null
null
null
src/hello.py
BrunoS3D/Python-Study
50fb095480dd5aeef7e0a458b5af4efca8a98929
[ "MIT" ]
null
null
null
src/hello.py
BrunoS3D/Python-Study
50fb095480dd5aeef7e0a458b5af4efca8a98929
[ "MIT" ]
null
null
null
# Ola mundo =] print('Hello World')
12
20
0.638889
5
36
4.6
1
0
0
0
0
0
0
0
0
0
0
0
0.166667
36
2
21
18
0.766667
0.333333
0
0
0
0
0.5
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
13a47757aae5c57edfd81d7e8394360c57351efd
27
py
Python
sagii/apps/base/tests/__init__.py
jorgevilaca82/sagii
cd688df888b55a42a406ad82054d147cec13aa61
[ "MIT" ]
null
null
null
sagii/apps/base/tests/__init__.py
jorgevilaca82/sagii
cd688df888b55a42a406ad82054d147cec13aa61
[ "MIT" ]
2
2019-10-28T16:21:14.000Z
2019-10-28T20:33:17.000Z
sagii/apps/base/tests/__init__.py
jorgevilaca82/sagii
cd688df888b55a42a406ad82054d147cec13aa61
[ "MIT" ]
1
2019-08-24T13:45:53.000Z
2019-08-24T13:45:53.000Z
from .pessoa_tests import *
27
27
0.814815
4
27
5.25
1
0
0
0
0
0
0
0
0
0
0
0
0.111111
27
1
27
27
0.875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
13b89de113d0ba246619a1bbb11016297fda3fdd
349
py
Python
usure/wordvectors/core/word2vec_rep.py
coraxcr/usure
22ea892ac9814ead5e5d22266a8d53f36d077b5c
[ "MIT" ]
2
2019-12-11T17:14:03.000Z
2019-12-12T21:01:05.000Z
usure/wordvectors/core/word2vec_rep.py
coraxcr/usure
22ea892ac9814ead5e5d22266a8d53f36d077b5c
[ "MIT" ]
1
2019-07-16T04:43:39.000Z
2019-07-16T04:43:39.000Z
usure/wordvectors/core/word2vec_rep.py
coraxcr/usure
22ea892ac9814ead5e5d22266a8d53f36d077b5c
[ "MIT" ]
2
2019-07-11T19:18:49.000Z
2019-12-11T17:14:24.000Z
from abc import ABC, abstractmethod from gensim.models import Word2Vec from typing import Iterable class Word2VecRep(ABC): @abstractmethod def get(self, name: str) -> Word2Vec: pass @abstractmethod def get_all(self) -> Iterable[Word2Vec]: pass @abstractmethod def save(self, w2v: Word2Vec): pass
18.368421
44
0.670487
40
349
5.825
0.5
0.218884
0.171674
0.248927
0
0
0
0
0
0
0
0.022989
0.252149
349
18
45
19.388889
0.869732
0
0
0.461538
0
0
0
0
0
0
0
0
0
1
0.230769
false
0.230769
0.230769
0
0.538462
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
5
b914a64ade9914c67cf933d8414dd3a0abfe6d57
17,060
py
Python
tests/model_execution/test_bash.py
lucaspanayiotou/OasisLMF_SQL
619244f6c5b2e1b6483d50ada045fc24e081de42
[ "BSD-3-Clause" ]
null
null
null
tests/model_execution/test_bash.py
lucaspanayiotou/OasisLMF_SQL
619244f6c5b2e1b6483d50ada045fc24e081de42
[ "BSD-3-Clause" ]
1
2021-03-31T19:01:15.000Z
2021-03-31T19:01:15.000Z
tests/model_execution/test_bash.py
OasisLMF/OasisLMF_SQL
4c0edef7b346cf2a0b3cd0813320d063fa3e8b40
[ "BSD-3-Clause" ]
2
2019-03-21T09:22:34.000Z
2020-01-16T15:09:58.000Z
from __future__ import unicode_literals import hashlib import json import os import io import shutil from unittest import TestCase from oasislmf.model_execution.bash import genbash from oasislmf.utils import diff TEST_DIRECTORY = os.path.dirname(__file__) KPARSE_INPUT_FOLDER = os.path.join(TEST_DIRECTORY, "kparse_input") KPARSE_OUTPUT_FOLDER = os.path.join(TEST_DIRECTORY, "kparse_output") KPARSE_REFERENCE_FOLDER = os.path.join(TEST_DIRECTORY, "kparse_reference") class Genbash(TestCase): @classmethod def setUpClass(cls): if os.path.exists(KPARSE_OUTPUT_FOLDER): shutil.rmtree(KPARSE_OUTPUT_FOLDER) os.makedirs(KPARSE_OUTPUT_FOLDER) def md5(self, fname): hash_md5 = hashlib.md5() with io.open(fname, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest() def genbash(self, name, num_partitions, num_reinsurance_iterations=0, fifo_tmp_dir=False, mem_limit=False): input_filename = os.path.join(KPARSE_INPUT_FOLDER, "{}.json".format(name)) if num_reinsurance_iterations <= 0: output_filename = os.path.join(KPARSE_OUTPUT_FOLDER, "{}_{}_partition.sh".format(name, num_partitions)) else: output_filename = os.path.join( KPARSE_OUTPUT_FOLDER, "{}_{}_reins_layer_{}_partition.sh".format(name, num_reinsurance_iterations, num_partitions)) with io.open(input_filename, encoding='utf-8') as file: analysis_settings = json.load(file)['analysis_settings'] genbash( num_partitions, analysis_settings, output_filename, num_reinsurance_iterations, fifo_tmp_dir, mem_limit ) def check(self, name): output_filename = os.path.join(KPARSE_OUTPUT_FOLDER, "{}.sh".format(name)) reference_filename = os.path.join(KPARSE_REFERENCE_FOLDER, "{}.sh".format(name)) d = diff.unified_diff(reference_filename, output_filename, as_string=True) if d: self.fail(d) def update_fifo_tmpfile(self, name): ## Read random fifo dir name from generated file and replace in reference output_filename = os.path.join(KPARSE_OUTPUT_FOLDER, "{}.sh".format(name)) ref_template = os.path.join(KPARSE_REFERENCE_FOLDER, "{}.template".format(name)) ref_script = os.path.join(KPARSE_REFERENCE_FOLDER, "{}.sh".format(name)) with io.open(output_filename, 'r') as f: for line in f: if '/tmp/' in line: tmp_fifo_dir = line.split('/')[-2] print(tmp_fifo_dir) break # Replace placeholder '%FIFO_DIR%' with '<RandomDirName>' with io.open(ref_template, 'r') as f: ktools_script = f.read() ktools_script = ktools_script.replace('%FIFO_DIR%', tmp_fifo_dir) with io.open(ref_script, 'w') as f: f.write(ktools_script) def test_gul_summarycalc_1_partition(self): self.genbash("gul_summarycalc_1_output", 1) self.check("gul_summarycalc_1_output_1_partition") def test_gul_summarycalc_20_partition(self): self.genbash("gul_summarycalc_1_output", 20) self.check("gul_summarycalc_1_output_20_partition") def test_gul_eltcalc_1_partition(self): self.genbash("gul_eltcalc_1_output", 1) self.check("gul_eltcalc_1_output_1_partition") def test_gul_eltcalc_20_partition(self): self.genbash("gul_eltcalc_1_output", 20) self.check("gul_eltcalc_1_output_20_partition") def test_gul_aalcalc_1_partition(self): self.genbash("gul_aalcalc_1_output", 1) self.check("gul_aalcalc_1_output_1_partition") def test_gul_aalcalc_20_partition(self): self.genbash("gul_aalcalc_1_output", 20) self.check("gul_aalcalc_1_output_20_partition") def test_gul_pltcalc_1_partition(self): self.genbash("gul_pltcalc_1_output", 1) self.check("gul_pltcalc_1_output_1_partition") def test_gul_pltcalc_20_partition(self): self.genbash("gul_pltcalc_1_output", 20) self.check("gul_pltcalc_1_output_20_partition") def test_gul_agg_fu_lec_1_partition(self): self.genbash("gul_agg_fu_lec_1_output", 1) self.check("gul_agg_fu_lec_1_output_1_partition") def test_gul_agg_fu_lec_20_partition(self): self.genbash("gul_agg_fu_lec_1_output", 20) self.check("gul_agg_fu_lec_1_output_20_partition") def test_gul_occ_fu_lec_1_output_1_partition(self): self.genbash("gul_occ_fu_lec_1_output", 1) self.check("gul_occ_fu_lec_1_output_1_partition") def test_gul_occ_fu_lec_1_output_20_partition(self): self.genbash("gul_occ_fu_lec_1_output", 20) self.check("gul_occ_fu_lec_1_output_20_partition") def test_gul_agg_ws_lec_1_partition(self): self.genbash("gul_agg_ws_lec_1_output", 1) self.check("gul_agg_ws_lec_1_output_1_partition") def test_gul_agg_ws_lec_20_partition(self): self.genbash("gul_agg_ws_lec_1_output", 20) self.check("gul_agg_ws_lec_1_output_20_partition") def test_gul_occ_ws_lec_1_partition(self): self.genbash("gul_occ_ws_lec_1_output", 1) self.check("gul_occ_ws_lec_1_output_1_partition") def test_gul_occ_ws_lec_20_partition(self): self.genbash("gul_occ_ws_lec_1_output", 20) self.check("gul_occ_ws_lec_1_output_20_partition") def test_gul_agg_ws_mean_lec_1_partition(self): self.genbash("gul_agg_ws_mean_lec_1_output", 1) self.check("gul_agg_ws_mean_lec_1_output_1_partition") def test_gul_agg_ws_mean_lec_20_partition(self): self.genbash("gul_agg_ws_mean_lec_1_output", 20) self.check("gul_agg_ws_mean_lec_1_output_20_partition") def test_gul_occ_ws_mean_lec_1_partition(self): self.genbash("gul_occ_ws_mean_lec_1_output", 1) self.check("gul_occ_ws_mean_lec_1_output_1_partition") def test_gul_occ_ws_mean_lec_20_partition(self): self.genbash("gul_occ_ws_mean_lec_1_output", 20) self.check("gul_occ_ws_mean_lec_1_output_20_partition") def test_il_agg_sample_mean_lec_1_partition(self): self.genbash("il_agg_sample_mean_lec_1_output", 1) self.check("il_agg_sample_mean_lec_1_output_1_partition") def test_il_agg_sample_mean_lec_20_partition(self): self.genbash("il_agg_sample_mean_lec_1_output", 20) self.check("il_agg_sample_mean_lec_1_output_20_partition") def test_il_occ_sample_mean_lec_1_partition(self): self.genbash("il_occ_sample_mean_lec_1_output", 1) self.check("il_occ_sample_mean_lec_1_output_1_partition") def test_il_occ_sample_mean_lec_20_partition(self): self.genbash("il_occ_sample_mean_lec_1_output", 20) self.check("il_occ_sample_mean_lec_1_output_20_partition") def test_il_summarycalc_1_partition(self): self.genbash("il_summarycalc_1_output", 1) self.check("il_summarycalc_1_output_1_partition") def test_il_summarycalc_20_partition(self): self.genbash("il_summarycalc_1_output", 20) self.check("il_summarycalc_1_output_20_partition") def test_il_eltcalc_1_partition(self): self.genbash("il_eltcalc_1_output", 1) self.check("il_eltcalc_1_output_1_partition") def test_il_eltcalc_20_partition(self): self.genbash("il_eltcalc_1_output", 20) self.check("il_eltcalc_1_output_20_partition") def test_il_aalcalc_1_partition(self): self.genbash("il_aalcalc_1_output", 1) self.check("il_aalcalc_1_output_1_partition") def test_il_aalcalc_20_partition(self): self.genbash("il_aalcalc_1_output", 20) self.check("il_aalcalc_1_output_20_partition") def test_il_pltcalc_1_partition(self): self.genbash("il_pltcalc_1_output", 1) self.check("il_pltcalc_1_output_1_partition") def test_il_pltcalc_20_partition(self): self.genbash("il_pltcalc_1_output", 20) self.check("il_pltcalc_1_output_20_partition") def test_il_agg_fu_lec_1_partition(self): self.genbash("il_agg_fu_lec_1_output", 1) self.check("il_agg_fu_lec_1_output_1_partition") def test_il_agg_fu_lec_20_partition(self): self.genbash("il_agg_fu_lec_1_output", 20) self.check("il_agg_fu_lec_1_output_20_partition") def test_il_occ_fu_lec_1_output_1_partition(self): self.genbash("il_occ_fu_lec_1_output", 1) self.check("il_occ_fu_lec_1_output_1_partition") def test_il_occ_fu_lec_1_output_20_partition(self): self.genbash("il_occ_fu_lec_1_output", 20) self.check("il_occ_fu_lec_1_output_20_partition") def test_il_agg_ws_lec_1_partition(self): self.genbash("il_agg_ws_lec_1_output", 1) self.check("il_agg_ws_lec_1_output_1_partition") def test_il_agg_ws_lec_20_partition(self): self.genbash("il_agg_ws_lec_1_output", 20) self.check("il_agg_ws_lec_1_output_20_partition") def test_il_occ_ws_lec_1_partition(self): self.genbash("il_occ_ws_lec_1_output", 1) self.check("il_occ_ws_lec_1_output_1_partition") def test_il_occ_ws_lec_20_partition(self): self.genbash("il_occ_ws_lec_1_output", 20) self.check("il_occ_ws_lec_1_output_20_partition") def test_il_agg_ws_mean_lec_1_partition(self): self.genbash("il_agg_ws_mean_lec_1_output", 1) self.check("il_agg_ws_mean_lec_1_output_1_partition") def test_il_agg_ws_mean_lec_20_partition(self): self.genbash("il_agg_ws_mean_lec_1_output", 20) self.check("il_agg_ws_mean_lec_1_output_20_partition") def test_il_occ_ws_mean_lec_1_partition(self): self.genbash("il_occ_ws_mean_lec_1_output", 1) self.check("il_occ_ws_mean_lec_1_output_1_partition") def test_il_occ_ws_mean_lec_20_partition(self): self.genbash("il_occ_ws_mean_lec_1_output", 20) self.check("il_occ_ws_mean_lec_1_output_20_partition") def test_il_agg_sample_mean_lec_1_output_1_partition(self): self.genbash("il_agg_sample_mean_lec_1_output", 1) self.check("il_agg_sample_mean_lec_1_output_1_partition") def test_il_agg_sample_mean_lec_1_output_20_partition(self): self.genbash("il_agg_sample_mean_lec_1_output", 20) self.check("il_agg_sample_mean_lec_1_output_20_partition") def test_il_occ_sample_mean_lec_1_output_1_partition(self): self.genbash("il_occ_sample_mean_lec_1_output", 1) self.check("il_occ_sample_mean_lec_1_output_1_partition") def test_il_occ_sample_mean_lec_1_output_20_partition(self): self.genbash("il_occ_sample_mean_lec_1_output", 20) self.check("il_occ_sample_mean_lec_1_output_20_partition") def test_all_calcs_1_partition(self): self.genbash("all_calcs_1_output", 1) self.check("all_calcs_1_output_1_partition") def test_all_calcs_20_partition(self): self.genbash("all_calcs_1_output", 20) self.check("all_calcs_1_output_20_partition") def test_all_calcs_40_partition(self): self.genbash("all_calcs_1_output", 40) self.check("all_calcs_1_output_40_partition") def test_gul_no_lec_1_output_1_partition(self): self.genbash("gul_no_lec_1_output", 1) self.check("gul_no_lec_1_output_1_partition") def test_gul_no_lec_1_output_2_partition(self): self.genbash("gul_no_lec_1_output", 2) self.check("gul_no_lec_1_output_2_partition") def test_gul_no_lec_2_output_1_partition(self): self.genbash("gul_no_lec_2_output", 1) self.check("gul_no_lec_2_output_1_partition") def test_gul_no_lec_2_output_2_partitions(self): self.genbash("gul_no_lec_2_output", 2) self.check("gul_no_lec_2_output_2_partition") def test_gul_lec_1_output_1_partition(self): self.genbash("gul_lec_1_output", 1) self.check("gul_lec_1_output_1_partition") def test_gul_lec_1_output_2_partitions(self): self.genbash("gul_lec_1_output", 2) self.check("gul_lec_1_output_2_partition") def test_gul_lec_2_output_1_partition(self): self.genbash("gul_lec_2_output", 1) self.check("gul_lec_2_output_1_partition") def test_gul_lec_2_output_2_partitions(self): self.genbash("gul_lec_2_output", 2) self.check("gul_lec_2_output_2_partition") def test_il_no_lec_1_output_1_partition(self): self.genbash("il_no_lec_1_output", 1) self.check("il_no_lec_1_output_1_partition") def test_il_no_lec_1_output_2_partition(self): self.genbash("il_no_lec_1_output", 2) self.check("il_no_lec_1_output_2_partition") def test_il_no_lec_2_output_1_partition(self): self.genbash("il_no_lec_2_output", 1) self.check("il_no_lec_2_output_1_partition") def test_il_no_lec_2_output_2_partitions(self): self.genbash("il_no_lec_2_output", 2) self.check("il_no_lec_2_output_2_partition") def test_il_lec_1_output_1_partition(self): self.genbash("il_lec_1_output", 1) self.check("il_lec_1_output_1_partition") def test_il_lec_1_output_2_partitions(self): self.genbash("il_lec_1_output", 2) self.check("il_lec_1_output_2_partition") def test_il_lec_2_output_1_partition(self): self.genbash("il_lec_2_output", 1) self.check("il_lec_2_output_1_partition") def test_il_lec_2_output_2_partitions(self): self.genbash("il_lec_2_output", 2) self.check("il_lec_2_output_2_partition") def test_gul_il_no_lec_1_output_1_partition(self): self.genbash("gul_il_no_lec_1_output", 1) self.check("gul_il_no_lec_1_output_1_partition") def test_gul_il_no_lec_1_output_2_partition(self): self.genbash("gul_il_no_lec_1_output", 2) self.check("gul_il_no_lec_1_output_2_partition") def test_gul_il_no_lec_2_output_1_partition(self): self.genbash("gul_il_no_lec_2_output", 1) self.check("gul_il_no_lec_2_output_1_partition") def test_gul_il_no_lec_2_output_2_partitions(self): self.genbash("gul_il_no_lec_2_output", 2) self.check("gul_il_no_lec_2_output_2_partition") def test_gul_il_lec_1_output_1_partition(self): self.genbash("gul_il_lec_1_output", 1) self.check("gul_il_lec_1_output_1_partition") def test_gul_il_lec_1_output_2_partitions(self): self.genbash("gul_il_lec_1_output", 2) self.check("gul_il_lec_1_output_2_partition") def test_gul_il_lec_2_output_1_partition(self): self.genbash("gul_il_lec_2_output", 1) self.check("gul_il_lec_2_output_1_partition") def test_gul_il_lec_2_output_2_partitions(self): self.genbash("gul_il_lec_2_output", 2) self.check("gul_il_lec_2_output_2_partition") def test_gul_il_lec_2_output_10_partitions(self): self.genbash("gul_il_lec_2_output", 10) self.check("gul_il_lec_2_output_10_partition") def test_analysis_settings_1(self): self.genbash("analysis_settings_1", 1) self.check("analysis_settings_1_1_partition") def test_analysis_settings_2(self): self.genbash("analysis_settings_2", 1) self.check("analysis_settings_2_1_partition") def test_analysis_settings_3_0_reins_iters(self): self.genbash("analysis_settings_3", 1, 1) self.check("analysis_settings_3_1_reins_layer_1_partition") def test_analysis_settings_4_0_reins_iters(self): self.genbash("analysis_settings_4", 1, 1) self.check("analysis_settings_4_1_reins_layer_1_partition") # -------------------------------------------------------------- # def test_gul_il_lec_2_output_10_partitions_tmpfifo(self): self.genbash("gul_il_lec_2_tmpfifo_output", 10, 0, True) self.update_fifo_tmpfile("gul_il_lec_2_tmpfifo_output_10_partition") self.check("gul_il_lec_2_tmpfifo_output_10_partition") def test_gul_agg_ws_mean_lec_20_partition_tmpfifo_memlim(self): self.genbash("gul_agg_ws_mean_lec_1_tmpfifo_memlim_output", 20, 0, True, True) self.update_fifo_tmpfile("gul_agg_ws_mean_lec_1_tmpfifo_memlim_output_20_partition") self.check("gul_agg_ws_mean_lec_1_tmpfifo_memlim_output_20_partition") def test_analysis_settings_3_0_reins_iters_tmpfifo(self): self.genbash("analysis_settings_tmpfifo_3", 1, 1, True) self.update_fifo_tmpfile("analysis_settings_tmpfifo_3_1_reins_layer_1_partition") self.check("analysis_settings_tmpfifo_3_1_reins_layer_1_partition") def test_analysis_settings_4_0_reins_iters_tmpfifo_memlim(self): self.genbash("analysis_settings_tmpfifo_memlim_4", 1, 1, True, True) self.update_fifo_tmpfile("analysis_settings_tmpfifo_memlim_4_1_reins_layer_1_partition") self.check("analysis_settings_tmpfifo_memlim_4_1_reins_layer_1_partition")
39.953162
115
0.730481
2,703
17,060
4.031817
0.049575
0.093779
0.099101
0.145348
0.869609
0.847036
0.778767
0.702422
0.538631
0.384107
0
0.041252
0.17585
17,060
426
116
40.046948
0.733855
0.011137
0
0.05625
0
0
0.309494
0.257843
0
0
0
0
0
1
0.278125
false
0
0.028125
0
0.3125
0.003125
0
0
0
null
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
5
b97024c654a3c16d99238d056d31a76de61d3451
33
py
Python
__init__.py
scaperex/UltraDict
e8e89284fceaaeb5ee8bcbf2b21e0612ac5851f3
[ "Apache-2.0" ]
131
2022-03-12T11:05:42.000Z
2022-03-31T17:17:23.000Z
__init__.py
scaperex/UltraDict
e8e89284fceaaeb5ee8bcbf2b21e0612ac5851f3
[ "Apache-2.0" ]
5
2022-03-15T04:04:26.000Z
2022-03-25T11:00:23.000Z
__init__.py
scaperex/UltraDict
e8e89284fceaaeb5ee8bcbf2b21e0612ac5851f3
[ "Apache-2.0" ]
7
2022-03-18T01:39:57.000Z
2022-03-30T12:28:35.000Z
from .UltraDict import UltraDict
16.5
32
0.848485
4
33
7
0.75
0
0
0
0
0
0
0
0
0
0
0
0.121212
33
1
33
33
0.965517
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
b97ecb0d55591e51c5210a23c173042ebd2a65ba
88
py
Python
lda_classification/evaluation/__init__.py
FeryET/lda_classification
530f972b8955c9f51668475ef640cb644f9b3ab7
[ "MIT" ]
8
2020-10-12T07:35:13.000Z
2022-02-24T21:30:31.000Z
lda_classification/evaluation/__init__.py
FeryET/LDAThis
530f972b8955c9f51668475ef640cb644f9b3ab7
[ "MIT" ]
null
null
null
lda_classification/evaluation/__init__.py
FeryET/LDAThis
530f972b8955c9f51668475ef640cb644f9b3ab7
[ "MIT" ]
3
2021-01-12T22:45:15.000Z
2022-01-15T02:25:04.000Z
from lda_classification.evaluation.lda_coherence_evaluation import LDACoherenceEvaluator
88
88
0.943182
9
88
8.888889
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.034091
88
1
88
88
0.941176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b996dc11146dbf1126c941cc1e37d4c75b01c17b
36
py
Python
rsa/compute/compute.py
irahulgulati/Cryptoic
bd4a6e3d8dd9c383b350ca3496c4144c9a5f7200
[ "MIT" ]
1
2020-02-29T23:38:46.000Z
2020-02-29T23:38:46.000Z
rsa/compute/compute.py
irahulgulati/Cryptoic
bd4a6e3d8dd9c383b350ca3496c4144c9a5f7200
[ "MIT" ]
1
2020-03-23T00:00:41.000Z
2020-03-23T00:00:41.000Z
rsa/compute/compute.py
irahulgulati/Cryptoic
bd4a6e3d8dd9c383b350ca3496c4144c9a5f7200
[ "MIT" ]
null
null
null
def compute(p,q): n = p*q return n
12
17
0.611111
9
36
2.444444
0.666667
0.181818
0
0
0
0
0
0
0
0
0
0
0.222222
36
3
18
12
0.785714
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
b9a579ce04425c44b0e529a69eeab261e484fb14
96
py
Python
boa3_test/test_sc/bytes_test/BytesToStr.py
hal0x2328/neo3-boa
6825a3533384cb01660773050719402a9703065b
[ "Apache-2.0" ]
25
2020-07-22T19:37:43.000Z
2022-03-08T03:23:55.000Z
boa3_test/test_sc/bytes_test/BytesToStr.py
hal0x2328/neo3-boa
6825a3533384cb01660773050719402a9703065b
[ "Apache-2.0" ]
419
2020-04-23T17:48:14.000Z
2022-03-31T13:17:45.000Z
boa3_test/test_sc/bytes_test/BytesToStr.py
hal0x2328/neo3-boa
6825a3533384cb01660773050719402a9703065b
[ "Apache-2.0" ]
15
2020-05-21T21:54:24.000Z
2021-11-18T06:17:24.000Z
from boa3.builtin import public @public def bytes_to_str() -> str: return b'abc'.to_str()
13.714286
31
0.697917
16
96
4
0.75
0.15625
0
0
0
0
0
0
0
0
0
0.012658
0.177083
96
6
32
16
0.797468
0
0
0
0
0
0.03125
0
0
0
0
0
0
1
0.25
true
0
0.25
0.25
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
0
0
0
5
b9b918c94a5af2e743c2420b139fd41b0af519a9
17
py
Python
tests/test4.py
apascoa/Python_Code_Runner
3ccafa73917af595adace354e02e324a6b744d7e
[ "MIT" ]
null
null
null
tests/test4.py
apascoa/Python_Code_Runner
3ccafa73917af595adace354e02e324a6b744d7e
[ "MIT" ]
null
null
null
tests/test4.py
apascoa/Python_Code_Runner
3ccafa73917af595adace354e02e324a6b744d7e
[ "MIT" ]
null
null
null
print("Working4")
17
17
0.764706
2
17
6.5
1
0
0
0
0
0
0
0
0
0
0
0.058824
0
17
1
17
17
0.705882
0
0
0
0
0
0.444444
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
b9c35c338c0dd58195a3a8f574f18d371b3f5640
75
py
Python
kerasy/models.py
iwasakishuto/Keras-Imitation
8ac0cd7c8912d49d13b19a0182ad534c0781fbfe
[ "MIT" ]
4
2020-04-25T08:50:36.000Z
2020-04-26T04:49:16.000Z
kerasy/models.py
iwasakishuto/Keras-Imitation
8ac0cd7c8912d49d13b19a0182ad534c0781fbfe
[ "MIT" ]
null
null
null
kerasy/models.py
iwasakishuto/Keras-Imitation
8ac0cd7c8912d49d13b19a0182ad534c0781fbfe
[ "MIT" ]
null
null
null
"""Model-related utilities. """ from .engine.sequential import Sequential
15
41
0.76
8
75
7.125
0.875
0
0
0
0
0
0
0
0
0
0
0
0.106667
75
4
42
18.75
0.850746
0.32
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b9d3d0e56c39e6b38d5e2c45f72553bd88d63c60
251
py
Python
python/skripte_datensammlung/config.py
TimSchmittmann/R-vs-Python
4bacfaf236839a912f5a6a323d9756a47ab2d880
[ "MIT" ]
null
null
null
python/skripte_datensammlung/config.py
TimSchmittmann/R-vs-Python
4bacfaf236839a912f5a6a323d9756a47ab2d880
[ "MIT" ]
null
null
null
python/skripte_datensammlung/config.py
TimSchmittmann/R-vs-Python
4bacfaf236839a912f5a6a323d9756a47ab2d880
[ "MIT" ]
null
null
null
CONSUMER_KEY = 'Rcvp7SPl08t4t3ZNM8e7qBWy1' CONSUMER_SECRET = 'NqulWUlD8KpwYiaVdRUXg1dXuraTATNRk7vEYW26o9SAmYc4b7' ACCESS_TOKEN = '1403526518-5YNK1GGKJKWSh5BNSojGYmtfQ0ZWLyVGASpsWus' ACCESS_TOKEN_SECRET = 'z5vgcSyeWw4BnEVHu8yZzFTNL966dNDfoNFvVUrBTrdNu'
62.75
70
0.904382
14
251
15.857143
0.714286
0.099099
0
0
0
0
0
0
0
0
0
0.15
0.043825
251
4
71
62.75
0.775
0
0
0
0
0
0.674603
0.674603
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
6a4463fd9e7d85f4ab44abbb5ea600f92855dade
71
py
Python
lectures/code/functions_docstring.py
naskoch/python_course
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
[ "MIT" ]
4
2015-08-10T17:46:55.000Z
2020-04-18T21:09:03.000Z
lectures/code/functions_docstring.py
naskoch/python_course
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
[ "MIT" ]
null
null
null
lectures/code/functions_docstring.py
naskoch/python_course
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
[ "MIT" ]
2
2019-04-24T03:31:02.000Z
2019-05-13T07:36:06.000Z
def nothing(): """ This function doesn't do anything. """ pass
17.75
46
0.591549
9
71
4.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.253521
71
3
47
23.666667
0.792453
0.478873
0
0
0
0
0
0
0
0
0
0
0
1
0.5
true
0.5
0
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
0
0
0
5
dbe6616e8faa59727140bb63e644ef1db026f9da
23,718
py
Python
sdk/containerregistry/azure-containerregistry/tests/test_container_registry_client_async.py
moovy2/azure-sdk-for-python
6b0495dc9917d47a7264f26cbd3221d43461a537
[ "MIT" ]
2,728
2015-01-09T10:19:32.000Z
2022-03-31T14:50:33.000Z
sdk/containerregistry/azure-containerregistry/tests/test_container_registry_client_async.py
v-xuto/azure-sdk-for-python
9c6296d22094c5ede410bc83749e8df8694ccacc
[ "MIT" ]
17,773
2015-01-05T15:57:17.000Z
2022-03-31T23:50:25.000Z
sdk/containerregistry/azure-containerregistry/tests/test_container_registry_client_async.py
v-xuto/azure-sdk-for-python
9c6296d22094c5ede410bc83749e8df8694ccacc
[ "MIT" ]
1,916
2015-01-19T05:05:41.000Z
2022-03-31T19:36:44.000Z
# coding=utf-8 # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ from datetime import datetime import pytest import six from azure.containerregistry import ( RepositoryProperties, ArtifactManifestProperties, ManifestOrder, ArtifactTagProperties, TagOrder, ) from azure.containerregistry.aio import ContainerRegistryClient from azure.core.exceptions import ResourceNotFoundError, ClientAuthenticationError from azure.core.async_paging import AsyncItemPaged from asynctestcase import AsyncContainerRegistryTestClass from constants import TO_BE_DELETED, HELLO_WORLD, ALPINE, BUSYBOX, DOES_NOT_EXIST from preparer import acr_preparer from testcase import get_authority class TestContainerRegistryClient(AsyncContainerRegistryTestClass): @acr_preparer() async def test_list_repository_names(self, containerregistry_endpoint): client = self.create_registry_client(containerregistry_endpoint) repositories = client.list_repository_names() assert isinstance(repositories, AsyncItemPaged) count = 0 prev = None async for repo in repositories: count += 1 assert isinstance(repo, six.string_types) assert prev != repo prev = repo assert count > 0 @acr_preparer() async def test_list_repository_names_by_page(self, containerregistry_endpoint): client = self.create_registry_client(containerregistry_endpoint) results_per_page = 2 total_pages = 0 repository_pages = client.list_repository_names(results_per_page=results_per_page) prev = None async for page in repository_pages.by_page(): page_count = 0 async for repo in page: assert isinstance(repo, six.string_types) assert prev != repo prev = repo page_count += 1 assert page_count <= results_per_page total_pages += 1 assert total_pages >= 1 @acr_preparer() async def test_delete_repository(self, containerregistry_endpoint, containerregistry_resource_group): self.import_image(containerregistry_endpoint, HELLO_WORLD, [TO_BE_DELETED]) client = self.create_registry_client(containerregistry_endpoint) await client.delete_repository(TO_BE_DELETED) async for repo in client.list_repository_names(): if repo == TO_BE_DELETED: raise ValueError("Repository not deleted") @acr_preparer() async def test_delete_repository_does_not_exist(self, containerregistry_endpoint): client = self.create_registry_client(containerregistry_endpoint) await client.delete_repository("not_real_repo") @acr_preparer() async def test_get_repository_properties(self, containerregistry_endpoint): client = self.create_registry_client(containerregistry_endpoint) properties = await client.get_repository_properties(ALPINE) assert isinstance(properties, RepositoryProperties) assert properties.name == ALPINE @acr_preparer() async def test_update_properties(self, containerregistry_endpoint): repository = self.get_resource_name("repo") tag_identifier = self.get_resource_name("tag") self.import_image(containerregistry_endpoint, HELLO_WORLD, ["{}:{}".format(repository, tag_identifier)]) client = self.create_registry_client(containerregistry_endpoint) properties = await client.get_repository_properties(repository) properties.can_delete = False properties.can_read = False properties.can_list = False properties.can_write = False new_properties = await client.update_repository_properties(repository, properties) assert properties.can_delete == new_properties.can_delete assert properties.can_read == new_properties.can_read assert properties.can_list == new_properties.can_list assert properties.can_write == new_properties.can_write new_properties.can_delete = True new_properties.can_read = True new_properties.can_list = True new_properties.can_write = True new_properties = await client.update_repository_properties(repository, new_properties) assert new_properties.can_delete == True assert new_properties.can_read == True assert new_properties.can_list == True assert new_properties.can_write == True @acr_preparer() async def test_update_repository_properties_kwargs(self, containerregistry_endpoint): repo = self.get_resource_name("repo") tag = self.get_resource_name("tag") self.import_image(containerregistry_endpoint, HELLO_WORLD, ["{}:{}".format(repo, tag)]) client = self.create_registry_client(containerregistry_endpoint) properties = await client.get_repository_properties(repo) properties = self.set_all_properties(properties, True) received = await client.update_repository_properties(repo, properties) self.assert_all_properties(properties, True) received = await client.update_repository_properties(repo, can_delete=False) assert received.can_delete == False assert received.can_list == True assert received.can_read == True assert received.can_write == True received = await client.update_repository_properties(repo, can_read=False) assert received.can_delete == False assert received.can_list == True assert received.can_read == False assert received.can_write == True received = await client.update_repository_properties(repo, can_write=False) assert received.can_delete == False assert received.can_list == True assert received.can_read == False assert received.can_write == False received = await client.update_repository_properties(repo, can_list=False) assert received.can_delete == False assert received.can_list == False assert received.can_read == False assert received.can_write == False received = await client.update_repository_properties( repo, can_delete=True, can_read=True, can_write=True, can_list=True, ) self.assert_all_properties(received, True) @acr_preparer() async def test_list_registry_artifacts(self, containerregistry_endpoint): client = self.create_registry_client(containerregistry_endpoint) count = 0 async for artifact in client.list_manifest_properties(BUSYBOX): assert isinstance(artifact, ArtifactManifestProperties) assert isinstance(artifact.created_on, datetime) assert isinstance(artifact.last_updated_on, datetime) assert artifact.repository_name == BUSYBOX assert artifact.fully_qualified_reference in self.create_fully_qualified_reference(containerregistry_endpoint, BUSYBOX, artifact.digest) count += 1 assert count > 0 @acr_preparer() async def test_list_registry_artifacts_by_page(self, containerregistry_endpoint): client = self.create_registry_client(containerregistry_endpoint) results_per_page = 2 pages = client.list_manifest_properties(BUSYBOX, results_per_page=results_per_page) page_count = 0 async for page in pages.by_page(): reg_count = 0 async for tag in page: reg_count += 1 assert reg_count <= results_per_page page_count += 1 assert page_count >= 1 @acr_preparer() async def test_list_registry_artifacts_descending(self, containerregistry_endpoint): client = self.create_registry_client(containerregistry_endpoint) prev_last_updated_on = None count = 0 async for artifact in client.list_manifest_properties( BUSYBOX, order_by=ManifestOrder.LAST_UPDATE_TIME_DESCENDING ): if prev_last_updated_on: assert artifact.last_updated_on < prev_last_updated_on prev_last_updated_on = artifact.last_updated_on count += 1 assert count > 0 prev_last_updated_on = None count = 0 async for artifact in client.list_manifest_properties(BUSYBOX, order_by="timedesc"): if prev_last_updated_on: assert artifact.last_updated_on < prev_last_updated_on prev_last_updated_on = artifact.last_updated_on count += 1 assert count > 0 @acr_preparer() async def test_list_registry_artifacts_ascending(self, containerregistry_endpoint): client = self.create_registry_client(containerregistry_endpoint) prev_last_updated_on = None count = 0 async for artifact in client.list_manifest_properties( BUSYBOX, order_by=ManifestOrder.LAST_UPDATE_TIME_ASCENDING ): if prev_last_updated_on: assert artifact.last_updated_on > prev_last_updated_on prev_last_updated_on = artifact.last_updated_on count += 1 assert count > 0 prev_last_updated_on = None count = 0 async for artifact in client.list_manifest_properties(BUSYBOX, order_by="timeasc"): if prev_last_updated_on: assert artifact.last_updated_on > prev_last_updated_on prev_last_updated_on = artifact.last_updated_on count += 1 assert count > 0 @acr_preparer() async def test_get_manifest_properties(self, containerregistry_endpoint): repo = self.get_resource_name("repo") tag = self.get_resource_name("tag") self.import_image(containerregistry_endpoint, HELLO_WORLD, ["{}:{}".format(repo, tag)]) client = self.create_registry_client(containerregistry_endpoint) properties = await client.get_manifest_properties(repo, tag) assert isinstance(properties, ArtifactManifestProperties) assert properties.repository_name == repo assert properties.fully_qualified_reference in self.create_fully_qualified_reference(containerregistry_endpoint, repo, properties.digest) @acr_preparer() async def test_get_manifest_properties_does_not_exist(self, containerregistry_endpoint): client = self.create_registry_client(containerregistry_endpoint) with pytest.raises(ResourceNotFoundError): properties = await client.get_manifest_properties("DOESNOTEXIST", "DOESNOTEXIST") @acr_preparer() async def test_update_manifest_properties(self, containerregistry_endpoint): repo = self.get_resource_name("repo") tag = self.get_resource_name("tag") self.import_image(containerregistry_endpoint, HELLO_WORLD, ["{}:{}".format(repo, tag)]) client = self.create_registry_client(containerregistry_endpoint) properties = await client.get_manifest_properties(repo, tag) properties.can_delete = False properties.can_read = False properties.can_write = False properties.can_list = False received = await client.update_manifest_properties(repo, tag, properties) assert received.can_delete == properties.can_delete assert received.can_read == properties.can_read assert received.can_write == properties.can_write assert received.can_list == properties.can_list properties.can_delete = True properties.can_read = True properties.can_write = True properties.can_list = True received = await client.update_manifest_properties(repo, tag, properties) assert received.can_delete == True assert received.can_read == True assert received.can_write == True assert received.can_list == True @acr_preparer() async def test_update_manifest_properties_kwargs(self, containerregistry_endpoint): repo = self.get_resource_name("repo") tag = self.get_resource_name("tag") self.import_image(containerregistry_endpoint, HELLO_WORLD, ["{}:{}".format(repo, tag)]) client = self.create_registry_client(containerregistry_endpoint) properties = await client.get_manifest_properties(repo, tag) received = await client.update_manifest_properties(repo, tag, can_delete=False) assert received.can_delete == False received = await client.update_manifest_properties(repo, tag, can_read=False) assert received.can_read == False received = await client.update_manifest_properties(repo, tag, can_write=False) assert received.can_write == False received = await client.update_manifest_properties(repo, tag, can_list=False) assert received.can_list == False received = await client.update_manifest_properties( repo, tag, can_delete=True, can_read=True, can_write=True, can_list=True ) assert received.can_delete == True assert received.can_read == True assert received.can_write == True assert received.can_list == True @acr_preparer() async def test_get_tag_properties(self, containerregistry_endpoint): repo = self.get_resource_name("repo") tag = self.get_resource_name("tag") self.import_image(containerregistry_endpoint, HELLO_WORLD, ["{}:{}".format(repo, tag)]) client = self.create_registry_client(containerregistry_endpoint) properties = await client.get_tag_properties(repo, tag) assert isinstance(properties, ArtifactTagProperties) assert properties.name == tag @acr_preparer() async def test_get_tag_properties_does_not_exist(self, containerregistry_endpoint): client = self.create_registry_client(containerregistry_endpoint) with pytest.raises(ResourceNotFoundError): await client.get_tag_properties("Nonexistent", "Nonexistent") @acr_preparer() async def test_update_tag_properties(self, containerregistry_endpoint): repo = self.get_resource_name("repo") tag = self.get_resource_name("tag") self.import_image(containerregistry_endpoint, HELLO_WORLD, ["{}:{}".format(repo, tag)]) client = self.create_registry_client(containerregistry_endpoint) properties = await client.get_tag_properties(repo, tag) properties.can_delete = False properties.can_read = False properties.can_write = False properties.can_list = False received = await client.update_tag_properties(repo, tag, properties) assert received.can_delete == properties.can_delete assert received.can_read == properties.can_read assert received.can_write == properties.can_write assert received.can_list == properties.can_list properties.can_delete = True properties.can_read = True properties.can_write = True properties.can_list = True received = await client.update_tag_properties(repo, tag, properties) assert received.can_delete == True assert received.can_read == True assert received.can_write == True assert received.can_list == True @acr_preparer() async def test_update_tag_properties_kwargs(self, containerregistry_endpoint): repo = self.get_resource_name("repo") tag = self.get_resource_name("tag") self.import_image(containerregistry_endpoint, HELLO_WORLD, ["{}:{}".format(repo, tag)]) client = self.create_registry_client(containerregistry_endpoint) properties = await client.get_tag_properties(repo, tag) received = await client.update_tag_properties(repo, tag, can_delete=False) assert received.can_delete == False received = await client.update_tag_properties(repo, tag, can_read=False) assert received.can_read == False received = await client.update_tag_properties(repo, tag, can_write=False) assert received.can_write == False received = await client.update_tag_properties(repo, tag, can_list=False) assert received.can_list == False received = await client.update_tag_properties( repo, tag, can_delete=True, can_read=True, can_write=True, can_list=True ) assert received.can_delete == True assert received.can_read == True assert received.can_write == True assert received.can_list == True @acr_preparer() async def test_list_tag_properties(self, containerregistry_endpoint): repo = self.get_resource_name("repo") tag = self.get_resource_name("tag") tags = ["{}:{}".format(repo, tag + str(i)) for i in range(4)] self.import_image(containerregistry_endpoint, HELLO_WORLD, tags) client = self.create_registry_client(containerregistry_endpoint) count = 0 async for tag in client.list_tag_properties(repo): assert "{}:{}".format(repo, tag.name) in tags count += 1 assert count == 4 @acr_preparer() async def test_list_tag_properties_order_descending(self, containerregistry_endpoint): repo = self.get_resource_name("repo") tag = self.get_resource_name("tag") tags = ["{}:{}".format(repo, tag + str(i)) for i in range(4)] self.import_image(containerregistry_endpoint, HELLO_WORLD, tags) client = self.create_registry_client(containerregistry_endpoint) prev_last_updated_on = None count = 0 async for tag in client.list_tag_properties(repo, order_by=TagOrder.LAST_UPDATE_TIME_DESCENDING): assert "{}:{}".format(repo, tag.name) in tags if prev_last_updated_on: assert tag.last_updated_on < prev_last_updated_on prev_last_updated_on = tag.last_updated_on count += 1 assert count == 4 prev_last_updated_on = None count = 0 async for tag in client.list_tag_properties(repo, order_by="timedesc"): assert "{}:{}".format(repo, tag.name) in tags if prev_last_updated_on: assert tag.last_updated_on < prev_last_updated_on prev_last_updated_on = tag.last_updated_on count += 1 assert count == 4 @acr_preparer() async def test_list_tag_properties_order_ascending(self, containerregistry_endpoint): repo = self.get_resource_name("repo") tag = self.get_resource_name("tag") tags = ["{}:{}".format(repo, tag + str(i)) for i in range(4)] self.import_image(containerregistry_endpoint, HELLO_WORLD, tags) client = self.create_registry_client(containerregistry_endpoint) prev_last_updated_on = None count = 0 async for tag in client.list_tag_properties(repo, order_by=TagOrder.LAST_UPDATE_TIME_ASCENDING): assert "{}:{}".format(repo, tag.name) in tags if prev_last_updated_on: assert tag.last_updated_on > prev_last_updated_on prev_last_updated_on = tag.last_updated_on count += 1 assert count == 4 prev_last_updated_on = None count = 0 async for tag in client.list_tag_properties(repo, order_by="timeasc"): assert "{}:{}".format(repo, tag.name) in tags if prev_last_updated_on: assert tag.last_updated_on > prev_last_updated_on prev_last_updated_on = tag.last_updated_on count += 1 assert count == 4 @acr_preparer() async def test_delete_tag(self, containerregistry_endpoint): repo = self.get_resource_name("repo") tag = self.get_resource_name("tag") tags = ["{}:{}".format(repo, tag + str(i)) for i in range(4)] self.import_image(containerregistry_endpoint, HELLO_WORLD, tags) client = self.create_registry_client(containerregistry_endpoint) await client.delete_tag(repo, tag + str(0)) count = 0 async for tag in client.list_tag_properties(repo): assert "{}:{}".format(repo, tag.name) in tags[1:] count += 1 assert count == 3 @acr_preparer() async def test_delete_tag_does_not_exist(self, containerregistry_endpoint): client = self.create_registry_client(containerregistry_endpoint) await client.delete_tag(DOES_NOT_EXIST, DOES_NOT_EXIST) @acr_preparer() async def test_delete_manifest(self, containerregistry_endpoint): repo = self.get_resource_name("repo") tag = self.get_resource_name("tag") self.import_image(containerregistry_endpoint, HELLO_WORLD, ["{}:{}".format(repo, tag)]) client = self.create_registry_client(containerregistry_endpoint) await client.delete_manifest(repo, tag) self.sleep(10) with pytest.raises(ResourceNotFoundError): await client.get_manifest_properties(repo, tag) @acr_preparer() async def test_delete_manifest_does_not_exist(self, containerregistry_endpoint): repo = self.get_resource_name("repo") tag = self.get_resource_name("tag") self.import_image(containerregistry_endpoint, HELLO_WORLD, ["{}:{}".format(repo, tag)]) client = self.create_registry_client(containerregistry_endpoint) manifest = await client.get_manifest_properties(repo, tag) digest = manifest.digest digest = digest[:-10] + u"a" * 10 await client.delete_manifest(repo, digest) @acr_preparer() async def test_expiration_time_parsing(self, containerregistry_endpoint): from azure.containerregistry.aio._async_authentication_policy import ContainerRegistryChallengePolicy client = self.create_registry_client(containerregistry_endpoint) async for repo in client.list_repository_names(): pass for policy in client._client._client._pipeline._impl_policies: if isinstance(policy, ContainerRegistryChallengePolicy): policy._exchange_client._expiration_time = 0 break count = 0 async for repo in client.list_repository_names(): count += 1 assert count >= 1 # Live only, the fake credential doesn't check auth scope the same way @pytest.mark.live_test_only @acr_preparer() async def test_construct_container_registry_client(self, containerregistry_endpoint): authority = get_authority(containerregistry_endpoint) credential = self.get_credential(authority) client = ContainerRegistryClient(endpoint=containerregistry_endpoint, credential=credential, audience="https://microsoft.com") with pytest.raises(ClientAuthenticationError): properties = await client.get_repository_properties(HELLO_WORLD) with pytest.raises(TypeError): client = ContainerRegistryClient(endpoint=containerregistry_endpoint, credential=credential) @acr_preparer() def test_set_api_version(self, containerregistry_endpoint): client = self.create_registry_client(containerregistry_endpoint) assert client._client._config.api_version == "2021-07-01" client = self.create_registry_client(containerregistry_endpoint, api_version = "2019-08-15-preview") assert client._client._config.api_version == "2019-08-15-preview" with pytest.raises(ValueError): client = self.create_registry_client(containerregistry_endpoint, api_version = "2019-08-15")
40.405451
148
0.692976
2,705
23,718
5.758965
0.066174
0.126781
0.040891
0.034921
0.82058
0.795031
0.755809
0.715496
0.681859
0.678778
0
0.005295
0.227675
23,718
586
149
40.474403
0.845125
0.009444
0
0.622222
0
0
0.016477
0
0
0
0
0
0.24
1
0.002222
false
0.002222
0.06
0
0.064444
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e00fb5d214e79f43c65e62753a520bd3d890d46c
15,937
py
Python
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_content_from_url.py
romahamu/azure-sdk-for-python
a57c9f73b9121f79d317e1679b81fd460d6a25b8
[ "MIT" ]
2
2021-03-24T06:26:11.000Z
2021-04-18T15:55:59.000Z
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_content_from_url.py
RSidea/azure-sdk-for-python
8f691b2c95ee0fc53b12d08bd83e3f134d9cf0ef
[ "MIT" ]
null
null
null
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_content_from_url.py
RSidea/azure-sdk-for-python
8f691b2c95ee0fc53b12d08bd83e3f134d9cf0ef
[ "MIT" ]
1
2021-12-18T20:01:22.000Z
2021-12-18T20:01:22.000Z
# coding=utf-8 # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ import pytest import functools from azure.core.exceptions import HttpResponseError, ServiceRequestError, ClientAuthenticationError from azure.core.credentials import AzureKeyCredential from azure.ai.formrecognizer._generated.models import AnalyzeOperationResult from azure.ai.formrecognizer._response_handlers import prepare_content_result from azure.ai.formrecognizer import FormRecognizerClient, FormRecognizerApiVersion from testcase import FormRecognizerTest from preparers import GlobalClientPreparer as _GlobalClientPreparer from preparers import FormRecognizerPreparer GlobalClientPreparer = functools.partial(_GlobalClientPreparer, FormRecognizerClient) class TestContentFromUrl(FormRecognizerTest): @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_encoded_url(self, client): with pytest.raises(HttpResponseError) as e: poller = client.begin_recognize_content_from_url("https://fakeuri.com/blank%20space") client.close() self.assertIn("https://fakeuri.com/blank%20space", e.value.response.request.body) @FormRecognizerPreparer() def test_content_url_bad_endpoint(self, formrecognizer_test_endpoint, formrecognizer_test_api_key): with self.assertRaises(ServiceRequestError): client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(formrecognizer_test_api_key)) poller = client.begin_recognize_content_from_url(self.invoice_url_pdf) @FormRecognizerPreparer() def test_content_url_auth_bad_key(self, formrecognizer_test_endpoint, formrecognizer_test_api_key): client = FormRecognizerClient(formrecognizer_test_endpoint, AzureKeyCredential("xxxx")) with self.assertRaises(ClientAuthenticationError): poller = client.begin_recognize_content_from_url(self.invoice_url_pdf) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_bad_url(self, client): with self.assertRaises(HttpResponseError): poller = client.begin_recognize_content_from_url("https://badurl.jpg") @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_url_pass_stream(self, client): with open(self.receipt_jpg, "rb") as receipt: with self.assertRaises(HttpResponseError): poller = client.begin_recognize_content_from_url(receipt) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_url_transform_pdf(self, client): responses = [] def callback(raw_response, _, headers): analyze_result = client._deserialize(AnalyzeOperationResult, raw_response) extracted_layout = prepare_content_result(analyze_result) responses.append(analyze_result) responses.append(extracted_layout) poller = client.begin_recognize_content_from_url(self.invoice_url_pdf, cls=callback) result = poller.result() raw_response = responses[0] layout = responses[1] page_results = raw_response.analyze_result.page_results read_results = raw_response.analyze_result.read_results # Check form pages self.assertFormPagesTransformCorrect(layout, read_results, page_results) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_url_pdf(self, client): poller = client.begin_recognize_content_from_url(self.invoice_url_pdf) result = poller.result() self.assertEqual(len(result), 1) layout = result[0] self.assertEqual(layout.page_number, 1) self.assertFormPagesHasValues(result) self.assertEqual(layout.tables[0].row_count, 3) self.assertEqual(layout.tables[0].column_count, 6) self.assertEqual(layout.tables[0].page_number, 1) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_url_transform_jpg(self, client): responses = [] def callback(raw_response, _, headers): analyze_result = client._deserialize(AnalyzeOperationResult, raw_response) extracted_layout = prepare_content_result(analyze_result) responses.append(analyze_result) responses.append(extracted_layout) poller = client.begin_recognize_content_from_url(self.form_url_jpg, cls=callback) result = poller.result() raw_response = responses[0] layout = responses[1] page_results = raw_response.analyze_result.page_results read_results = raw_response.analyze_result.read_results # Check form pages self.assertFormPagesTransformCorrect(layout, read_results, page_results) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_url_jpg(self, client): poller = client.begin_recognize_content_from_url(self.form_url_jpg) result = poller.result() self.assertEqual(len(result), 1) layout = result[0] self.assertEqual(layout.page_number, 1) self.assertFormPagesHasValues(result) self.assertEqual(layout.tables[0].row_count, 5) self.assertEqual(layout.tables[0].column_count, 5) self.assertEqual(layout.tables[1].row_count, 4) self.assertEqual(layout.tables[1].column_count, 2) self.assertEqual(layout.tables[0].page_number, 1) self.assertEqual(layout.tables[1].page_number, 1) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_multipage_url(self, client): poller = client.begin_recognize_content_from_url(self.multipage_url_pdf) result = poller.result() self.assertEqual(len(result), 3) self.assertFormPagesHasValues(result) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_multipage_transform_url(self, client): responses = [] def callback(raw_response, _, headers): analyze_result = client._deserialize(AnalyzeOperationResult, raw_response) extracted_layout = prepare_content_result(analyze_result) responses.append(analyze_result) responses.append(extracted_layout) poller = client.begin_recognize_content_from_url(self.multipage_url_pdf, cls=callback) result = poller.result() raw_response = responses[0] layout = responses[1] page_results = raw_response.analyze_result.page_results read_results = raw_response.analyze_result.read_results # Check form pages self.assertFormPagesTransformCorrect(layout, read_results, page_results) @FormRecognizerPreparer() @GlobalClientPreparer() @pytest.mark.live_test_only def test_content_continuation_token(self, client): initial_poller = client.begin_recognize_content_from_url(self.form_url_jpg) cont_token = initial_poller.continuation_token() poller = client.begin_recognize_content_from_url(None, continuation_token=cont_token) result = poller.result() self.assertIsNotNone(result) initial_poller.wait() # necessary so azure-devtools doesn't throw assertion error @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_multipage_table_span_pdf(self, client): poller = client.begin_recognize_content_from_url(self.multipage_table_url_pdf) result = poller.result() self.assertEqual(len(result), 2) layout = result[0] self.assertEqual(layout.page_number, 1) self.assertEqual(len(layout.tables), 2) self.assertEqual(layout.tables[0].row_count, 29) self.assertEqual(layout.tables[0].column_count, 4) self.assertEqual(layout.tables[0].page_number, 1) self.assertEqual(layout.tables[1].row_count, 6) self.assertEqual(layout.tables[1].column_count, 5) self.assertEqual(layout.tables[1].page_number, 1) layout = result[1] self.assertEqual(len(layout.tables), 1) self.assertEqual(layout.page_number, 2) self.assertEqual(layout.tables[0].row_count, 23) self.assertEqual(layout.tables[0].column_count, 5) self.assertEqual(layout.tables[0].page_number, 2) self.assertFormPagesHasValues(result) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_multipage_table_span_transform(self, client): responses = [] def callback(raw_response, _, headers): analyze_result = client._deserialize(AnalyzeOperationResult, raw_response) extracted_layout = prepare_content_result(analyze_result) responses.append(analyze_result) responses.append(extracted_layout) poller = client.begin_recognize_content_from_url(self.multipage_table_url_pdf, cls=callback) result = poller.result() raw_response = responses[0] layout = responses[1] page_results = raw_response.analyze_result.page_results read_results = raw_response.analyze_result.read_results # Check form pages self.assertFormPagesTransformCorrect(layout, read_results, page_results) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_selection_marks(self, client): poller = client.begin_recognize_content_from_url(form_url=self.selection_mark_url_pdf) result = poller.result() self.assertEqual(len(result), 1) layout = result[0] self.assertEqual(layout.page_number, 1) self.assertFormPagesHasValues(result) @FormRecognizerPreparer() @GlobalClientPreparer(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0}) def test_content_selection_marks_v2(self, client): poller = client.begin_recognize_content_from_url(form_url=self.selection_mark_url_pdf) result = poller.result() self.assertEqual(len(result), 1) layout = result[0] self.assertEqual(layout.page_number, 1) self.assertFormPagesHasValues(result) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_specify_pages(self, client): poller = client.begin_recognize_content_from_url(self.multipage_url_pdf, pages=["1"]) result = poller.result() assert len(result) == 1 poller = client.begin_recognize_content_from_url(self.multipage_url_pdf, pages=["1", "3"]) result = poller.result() assert len(result) == 2 poller = client.begin_recognize_content_from_url(self.multipage_url_pdf, pages=["1-2"]) result = poller.result() assert len(result) == 2 poller = client.begin_recognize_content_from_url(self.multipage_url_pdf, pages=["1-2", "3"]) result = poller.result() assert len(result) == 3 @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_language_specified(self, client): poller = client.begin_recognize_content_from_url(self.form_url_jpg, language="de") assert 'de' == poller._polling_method._initial_response.http_response.request.query['language'] result = poller.result() assert result @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_language_error(self, client): with pytest.raises(HttpResponseError) as e: client.begin_recognize_content_from_url(self.form_url_jpg, language="not a language") assert "NotSupportedLanguage" == e.value.error.code @FormRecognizerPreparer() @GlobalClientPreparer(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0}) def test_content_language_v2(self, client): with pytest.raises(ValueError) as e: client.begin_recognize_content_from_url(self.form_url_jpg, language="en") assert "'language' is only available for API version V2_1_PREVIEW and up" in str(e.value) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_language_german(self, client, formrecognizer_testing_data_container_sas_url): blob_sas_url = self.get_blob_url(formrecognizer_testing_data_container_sas_url, "testingdata", "content_german.pdf") poller = client.begin_recognize_content_from_url(blob_sas_url, language="de") result = poller.result() self.assertEqual(len(result), 1) layout = result[0] self.assertEqual(layout.page_number, 1) self.assertFormPagesHasValues(result) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_language_chinese_simplified(self, client, formrecognizer_testing_data_container_sas_url): blob_sas_url = self.get_blob_url(formrecognizer_testing_data_container_sas_url, "testingdata", "content_chinese_simplified.pdf") poller = client.begin_recognize_content_from_url(blob_sas_url, language="zh-Hans") result = poller.result() self.assertEqual(len(result), 1) layout = result[0] self.assertEqual(layout.page_number, 1) self.assertFormPagesHasValues(result) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_language_dutch(self, client, formrecognizer_testing_data_container_sas_url): blob_sas_url = self.get_blob_url(formrecognizer_testing_data_container_sas_url, "testingdata", "content_dutch.pdf") poller = client.begin_recognize_content_from_url(blob_sas_url, language="nl") result = poller.result() self.assertEqual(len(result), 1) layout = result[0] self.assertEqual(layout.page_number, 1) self.assertFormPagesHasValues(result) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_language_french(self, client, formrecognizer_testing_data_container_sas_url): blob_sas_url = self.get_blob_url(formrecognizer_testing_data_container_sas_url, "testingdata", "content_french.pdf") poller = client.begin_recognize_content_from_url(blob_sas_url, language="fr") result = poller.result() self.assertEqual(len(result), 1) layout = result[0] self.assertEqual(layout.page_number, 1) self.assertFormPagesHasValues(result) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_language_italian(self, client, formrecognizer_testing_data_container_sas_url): blob_sas_url = self.get_blob_url(formrecognizer_testing_data_container_sas_url, "testingdata", "content_italian.pdf") poller = client.begin_recognize_content_from_url(blob_sas_url, language="it") result = poller.result() self.assertEqual(len(result), 1) layout = result[0] self.assertEqual(layout.page_number, 1) self.assertFormPagesHasValues(result) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_language_portuguese(self, client, formrecognizer_testing_data_container_sas_url): blob_sas_url = self.get_blob_url(formrecognizer_testing_data_container_sas_url, "testingdata", "content_portuguese.pdf") poller = client.begin_recognize_content_from_url(blob_sas_url, language="pt") result = poller.result() self.assertEqual(len(result), 1) layout = result[0] self.assertEqual(layout.page_number, 1) self.assertFormPagesHasValues(result) @FormRecognizerPreparer() @GlobalClientPreparer() def test_content_language_spanish(self, client, formrecognizer_testing_data_container_sas_url): blob_sas_url = self.get_blob_url(formrecognizer_testing_data_container_sas_url, "testingdata", "content_spanish.pdf") poller = client.begin_recognize_content_from_url(blob_sas_url, language="es") result = poller.result() self.assertEqual(len(result), 1) layout = result[0] self.assertEqual(layout.page_number, 1) self.assertFormPagesHasValues(result)
45.404558
136
0.725168
1,744
15,937
6.3125
0.108945
0.062676
0.056318
0.076029
0.835226
0.809792
0.783359
0.747207
0.691071
0.665456
0
0.008593
0.182155
15,937
350
137
45.534286
0.836044
0.017632
0
0.633562
0
0
0.031767
0.003324
0
0
0
0
0.263699
1
0.106164
false
0.003425
0.034247
0
0.143836
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e01c917b2e928fde2ea612fe5831db65a451c977
71
py
Python
djago-classic-user-accounts/ClassicUserAccounts/__init__.py
kailashchandra92/django-classic-user-account
f3a4e5c0b424d8ed63d4f393bfa141a2de526c9a
[ "BSD-2-Clause" ]
1
2018-12-24T13:55:59.000Z
2018-12-24T13:55:59.000Z
ClassicUserAccounts/__init__.py
shyampathak/django-classic-user-account
49e086de6feb2ee19fce4b8463dd8760694d03c6
[ "BSD-2-Clause" ]
null
null
null
ClassicUserAccounts/__init__.py
shyampathak/django-classic-user-account
49e086de6feb2ee19fce4b8463dd8760694d03c6
[ "BSD-2-Clause" ]
null
null
null
default_app_config='ClassicUserAccounts.apps.ClassicUserAccountsConfig'
71
71
0.929577
6
71
10.666667
1
0
0
0
0
0
0
0
0
0
0
0
0
71
1
71
71
0.901408
0
0
0
0
0
0.694444
0.694444
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e03140e6c95f3e617a16eac60a702feac1c89426
20
py
Python
chapter-1/example.py
NavyaSutari/spring-CIS5755
badd1a792367bdcc29ecb24ae509bbab3b717600
[ "MIT" ]
null
null
null
chapter-1/example.py
NavyaSutari/spring-CIS5755
badd1a792367bdcc29ecb24ae509bbab3b717600
[ "MIT" ]
null
null
null
chapter-1/example.py
NavyaSutari/spring-CIS5755
badd1a792367bdcc29ecb24ae509bbab3b717600
[ "MIT" ]
null
null
null
print("hello navya")
20
20
0.75
3
20
5
1
0
0
0
0
0
0
0
0
0
0
0
0.05
20
1
20
20
0.789474
0
0
0
0
0
0.52381
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
e04e29284b93aeba96ac669f3e0a478a70adeeef
284
py
Python
src/compapp/tests/test_base.py
tkf/compapp
d34d75553f4c5ecd8fc3112072ff58a28cdc8c87
[ "BSD-2-Clause" ]
null
null
null
src/compapp/tests/test_base.py
tkf/compapp
d34d75553f4c5ecd8fc3112072ff58a28cdc8c87
[ "BSD-2-Clause" ]
null
null
null
src/compapp/tests/test_base.py
tkf/compapp
d34d75553f4c5ecd8fc3112072ff58a28cdc8c87
[ "BSD-2-Clause" ]
null
null
null
import pickle from ..base import Unspecified def test_unspecified_pickleable(): assert pickle.loads(pickle.dumps(Unspecified)) is Unspecified def test_unspecified_repr(): assert repr(Unspecified) == 'Unspecified' def test_unspecified_bool(): assert not Unspecified
17.75
65
0.771127
33
284
6.454545
0.454545
0.197183
0.253521
0.408451
0
0
0
0
0
0
0
0
0.147887
284
15
66
18.933333
0.880165
0
0
0
0
0
0.038732
0
0
0
0
0
0.375
1
0.375
true
0
0.25
0
0.625
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
1
0
0
5
e06304c51a9fb12c81fc9aecbe0ef28f5af1e597
101
py
Python
enthought/chaco/tools/image_inspector_tool.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
3
2016-12-09T06:05:18.000Z
2018-03-01T13:00:29.000Z
enthought/chaco/tools/image_inspector_tool.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
1
2020-12-02T00:51:32.000Z
2020-12-02T08:48:55.000Z
enthought/chaco/tools/image_inspector_tool.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
null
null
null
# proxy module from __future__ import absolute_import from chaco.tools.image_inspector_tool import *
25.25
46
0.851485
14
101
5.642857
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.108911
101
3
47
33.666667
0.877778
0.118812
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e065614cab916b1ea1fc545244c1e677c626b0d6
95
py
Python
careers-api/app/models/__init__.py
kbelsvik/career-skills-capstone
23fce2c579edf0b169c7137b12fafc7a92c1fa7a
[ "MIT" ]
null
null
null
careers-api/app/models/__init__.py
kbelsvik/career-skills-capstone
23fce2c579edf0b169c7137b12fafc7a92c1fa7a
[ "MIT" ]
null
null
null
careers-api/app/models/__init__.py
kbelsvik/career-skills-capstone
23fce2c579edf0b169c7137b12fafc7a92c1fa7a
[ "MIT" ]
2
2019-01-23T19:18:41.000Z
2020-05-10T23:17:08.000Z
from flask import Blueprint bp = Blueprint('models', __name__) from app.models import skills
15.833333
34
0.778947
13
95
5.384615
0.692308
0
0
0
0
0
0
0
0
0
0
0
0.147368
95
5
35
19
0.864198
0
0
0
0
0
0.063158
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
5
0ec5b6dcbde7ffb910898541b8b68fb3e1f3799f
76
py
Python
python/testData/deprecation/hashlibMd5/hashlib.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2018-12-29T09:53:39.000Z
2018-12-29T09:53:42.000Z
python/testData/deprecation/hashlibMd5/hashlib.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/deprecation/hashlibMd5/hashlib.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
__all__ = ['md5'] def my_md5(s): return s globals()["md5"] = my_md5
8.444444
25
0.565789
12
76
3.083333
0.583333
0.27027
0
0
0
0
0
0
0
0
0
0.068966
0.236842
76
8
26
9.5
0.568966
0
0
0
0
0
0.078947
0
0
0
0
0
0
1
0.25
false
0
0
0.25
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
5
1602825b13463919f043b15387872aa21c7e0d30
97
py
Python
#!/usr/bin/python/rmdir.py
xccvv/pret
a9e411149f39bf3325a2c7696bc4c4dddb379eec
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python/rmdir.py
xccvv/pret
a9e411149f39bf3325a2c7696bc4c4dddb379eec
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python/rmdir.py
xccvv/pret
a9e411149f39bf3325a2c7696bc4c4dddb379eec
[ "Apache-2.0" ]
1
2018-10-28T09:29:10.000Z
2018-10-28T09:29:10.000Z
#!/usr/bin/python import os # This would remove "/tmp/test" directory. os.rmdir( "/tmp/test" )
16.166667
42
0.670103
15
97
4.333333
0.8
0.215385
0
0
0
0
0
0
0
0
0
0
0.14433
97
5
43
19.4
0.783133
0.587629
0
0
0
0
0.236842
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
1611bcdea5f52f969cdf98a29e4f213df4c084d5
1,928
py
Python
tests/test_errors.py
jayeshgupta91/python-webdriverwrapper
9b2c32bbf06ada669feb62ef17fd23365a14ad14
[ "MIT" ]
null
null
null
tests/test_errors.py
jayeshgupta91/python-webdriverwrapper
9b2c32bbf06ada669feb62ef17fd23365a14ad14
[ "MIT" ]
null
null
null
tests/test_errors.py
jayeshgupta91/python-webdriverwrapper
9b2c32bbf06ada669feb62ef17fd23365a14ad14
[ "MIT" ]
null
null
null
import pytest from webdriverwrapper.decorators import allowed_error_pages, allowed_any_error_message from webdriverwrapper.exceptions import ErrorPageException, ErrorMessagesException @allowed_error_pages('403') def test_check_error_page(driver_error_page): with pytest.raises(ErrorPageException) as excinfo: driver_error_page.check_errors() @allowed_error_pages('403') def test_check_errors_expected_error_page(driver_error_page): driver_error_page.check_errors(expected_error_page='403') @allowed_error_pages('403') def test_check_errors_allowed_error_pages(driver_error_page): driver_error_page.check_errors(allowed_error_pages=('403',)) @allowed_any_error_message def test_check_error_messages(driver_error_msgs): with pytest.raises(ErrorMessagesException) as excinfo: driver_error_msgs.check_errors() @allowed_any_error_message def test_check_errors_expected_error_messages(driver_error_msgs): driver_error_msgs.check_errors(expected_error_messages=('some-error', 'another-error')) @allowed_any_error_message def test_check_errors_not_all_expected_error_messages(driver_error_msgs): with pytest.raises(ErrorMessagesException) as excinfo: driver_error_msgs.check_errors(expected_error_messages=('some-error',)) @allowed_any_error_message def test_check_errors_allowed_error_messages(driver_error_msgs): driver_error_msgs.check_errors(allowed_error_messages=('some-error', 'another-error')) @allowed_any_error_message def test_check_errors_not_all_allowed_error_messages(driver_error_msgs): with pytest.raises(ErrorMessagesException) as excinfo: driver_error_msgs.check_errors(expected_error_messages=('some-error',)) @allowed_any_error_message def test_check_errors_expected_and_allowed_error_messages(driver_error_msgs): driver_error_msgs.check_errors(expected_error_messages=('some-error',), allowed_error_messages=('another-error',))
35.703704
118
0.840768
258
1,928
5.751938
0.124031
0.133423
0.121294
0.103774
0.840297
0.806604
0.774933
0.730458
0.654987
0.555256
0
0.008455
0.079876
1,928
53
119
36.377358
0.828072
0
0
0.411765
0
0
0.053942
0
0
0
0
0
0
1
0.264706
false
0
0.088235
0
0.352941
0
0
0
0
null
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
5
162dca1e2c715c422ac60c0841f2403ef964a967
150
py
Python
new/src/06.12.2019/task 3.py
VladBaryliuk/my_start_tasks
bf387543e6fa3ee303cbef04d2af48d558011ed9
[ "Apache-2.0" ]
null
null
null
new/src/06.12.2019/task 3.py
VladBaryliuk/my_start_tasks
bf387543e6fa3ee303cbef04d2af48d558011ed9
[ "Apache-2.0" ]
null
null
null
new/src/06.12.2019/task 3.py
VladBaryliuk/my_start_tasks
bf387543e6fa3ee303cbef04d2af48d558011ed9
[ "Apache-2.0" ]
null
null
null
a = int(input()) if a %2 ==0 and a%10 == 0 : print("2 и 10") elif a %2 == 0: print("2") elif a %10 == 0: print("10") else: print("0")
15
27
0.453333
30
150
2.266667
0.4
0.264706
0.088235
0.264706
0
0
0
0
0
0
0
0.161905
0.3
150
9
28
16.666667
0.485714
0
0
0
0
0
0.066667
0
0
0
0
0
0
1
0
false
0
0
0
0
0.444444
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
162e1577828aaf30df49c3c06cd096ab9919c020
5,179
py
Python
PicNumero/PicNumero.py
kmiddleton/Pic-Numero
69a295d208106c486854473521e8d1fef13a0a24
[ "MIT" ]
null
null
null
PicNumero/PicNumero.py
kmiddleton/Pic-Numero
69a295d208106c486854473521e8d1fef13a0a24
[ "MIT" ]
null
null
null
PicNumero/PicNumero.py
kmiddleton/Pic-Numero
69a295d208106c486854473521e8d1fef13a0a24
[ "MIT" ]
null
null
null
from scipy import misc from skimage import feature import matplotlib.pyplot as plt from skimage.filters import threshold_otsu, roberts, sobel, scharr, prewitt from skimage.color import rgb2gray import numpy as np import numpy.matlib from skimage.color import rgb2gray from skimage.feature import greycomatrix, greycoprops from skimage import img_as_ubyte, io import os, sys import glcm import MLP import CNN import SVM import spectral_roi import Helper import Display # Initialise list for storing sub-image (ie block data). img_data = [] # Block processing function def blockfunc(block): global img_data # Check if not all zeros if(numpy.any(block)): #io.imsave("Block2/{}.png".format(Helper.generate_random_id()), block) img_data.append(block) ################### COUNTING BY REGRESSION ##################################### def run_with_glcm(image_filename="../Wheat_Images/004.jpg"): ''' Estimates the number of grains in a given image using a regression approach and glcm features. Args: image_filename: The path to the image from which a grain count is to be obtained. Returns: count: An estimate of the number of grains in the provided image. ''' model = glcm.train() count = glcm.count(image_filename, model) print("COUNT: {}".format(count)) return count ################### COUNTING BY DETECTION ##################################### def run_with_svm(image_filename="../Wheat_Images/004.jpg", ser_filename=None): ''' Estimates the number of grains in a given image using a Support Vector Machine. Args: image_filename: The path to the image from which a grain count is to be obtained. ser_filename: path to serialized list of isub-images already extracted from the image from which a grain count is to be obtained. Returns: count: An estimate of the number of grains in the provided image. ''' global img_data # Chop image up into sub-images and serilaise or just load serialised data if # it already exists. if(ser_filename == None and image_filename == "../Wheat_Images/004.jpg"): ser_filename = "../Wheat_Images/xxx_004.data" if(Helper.unserialize(ser_filename) == None): img = img_as_ubyte(io.imread(image_filename)) roi_img = spectral_roi.extract_roi(img, [1]) Helper.block_proc(roi_img, (20,20), blockfunc) #Helper.serialize(ser_filename, img_data) else: img_data = Helper.unserialize(ser_filename) # classify r = SVM.classify(img_data, featureRepresentation='glcm', shouldSaveResult=True) # Count number of '1s' in the result and return count = r.tolist().count(1) print("COUNT: {}".format(count)) return count def run_with_mlp(image_filename="../Wheat_Images/004.jpg", ser_filename=None): ''' Estimates the number of grains in a given image using a Multilayer Perceptron neural network. Args: image_filename: The path to the image from which a grain count is to be obtained. ser_filename: path to serialized list of isub-images already extracted from the image from which a grain count is to be obtained. Returns: count: An estimate of the number of grains in the provided image. ''' global img_data # Chop image up into sub-images and serilaise or just load serialised data if # it already exists. if(ser_filename == None and image_filename == "../Wheat_Images/004.jpg"): ser_filename = "../Wheat_Images/xxx_004.data" if(Helper.unserialize(ser_filename) == None): img = img_as_ubyte(io.imread(image_filename)) roi_img = spectral_roi.extract_roi(img, [1]) Helper.block_proc(roi_img, (20,20), blockfunc) #Helper.serialize(ser_filename, img_data) else: img_data = Helper.unserialize(ser_filename) # classify #MLP.build_model('glcm', iters=30, glcm_isMultidirectional=True) r = MLP.classify(img_data, featureRepresentation='glcm', shouldSaveResult=True) # Count number of '1s' in the result and return count = r.tolist().count(1) print("COUNT: {}".format(count)) return count def run_with_cnn(image_filename="../Wheat_Images/004.jpg", ser_filename=None): ''' Estimates the number of grains in a given image using a Convolutional neural network. Args: image_filename: The path to the image from which a grain count is to be obtained. ser_filename: path to serialized list of isub-images already extracted from the image from which a grain count is to be obtained. Returns: count: An estimate of the number of grains in the provided image. ''' global img_data # Chop image up into sub-images and serilaise or just load serialised data if # it already exists. if(ser_filename == None and image_filename == "../Wheat_Images/004.jpg"): ser_filename = "../Wheat_Images/xxx_004.data" if(Helper.unserialize(ser_filename) == None): img = img_as_ubyte(io.imread(image_filename)) roi_img = spectral_roi.extract_roi(img, [1]) Helper.block_proc(roi_img, (20,20), blockfunc) #Helper.serialize(ser_filename, img_data) else: img_data = Helper.unserialize(ser_filename) # classify r = CNN.classify(img_data[0], model_file=None,featureRepresentation='glcm', shouldSaveResult=True) # Count number of '1s' in the result and return count = r.tolist().count(1) print("COUNT: {}".format(count)) return count
31.011976
99
0.736049
778
5,179
4.768638
0.195373
0.062264
0.051213
0.036658
0.777089
0.760916
0.744205
0.744205
0.744205
0.744205
0
0.012931
0.14887
5,179
166
100
31.198795
0.828721
0.470554
0
0.585714
0
0
0.108599
0.090808
0
0
0
0
0
1
0.071429
false
0
0.257143
0
0.385714
0.057143
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
16b5c023b056b69c57808a1414ad18a9dae086d4
10,486
py
Python
openapi_client/models/__init__.py
hypostulate/mbta-api-client
f18903b6269c523c733a31574ff4579349fed3f8
[ "MIT" ]
null
null
null
openapi_client/models/__init__.py
hypostulate/mbta-api-client
f18903b6269c523c733a31574ff4579349fed3f8
[ "MIT" ]
null
null
null
openapi_client/models/__init__.py
hypostulate/mbta-api-client
f18903b6269c523c733a31574ff4579349fed3f8
[ "MIT" ]
null
null
null
# coding: utf-8 # flake8: noqa """ MBTA MBTA service API. https://www.mbta.com Source code: https://github.com/mbta/api # noqa: E501 The version of the OpenAPI document: 3.0 Contact: developer@mbta.com Generated by: https://openapi-generator.tech """ from __future__ import absolute_import # import models into model package from openapi_client.models.active_period import ActivePeriod from openapi_client.models.alert import Alert from openapi_client.models.alert_resource import AlertResource from openapi_client.models.alert_resource_attributes import AlertResourceAttributes from openapi_client.models.alert_resource_relationships import AlertResourceRelationships from openapi_client.models.alert_resource_relationships_facility import AlertResourceRelationshipsFacility from openapi_client.models.alert_resource_relationships_facility_data import AlertResourceRelationshipsFacilityData from openapi_client.models.alert_resource_relationships_facility_links import AlertResourceRelationshipsFacilityLinks from openapi_client.models.alerts import Alerts from openapi_client.models.bad_request import BadRequest from openapi_client.models.bad_request_errors import BadRequestErrors from openapi_client.models.bad_request_source import BadRequestSource from openapi_client.models.facilities import Facilities from openapi_client.models.facility import Facility from openapi_client.models.facility_property import FacilityProperty from openapi_client.models.facility_resource import FacilityResource from openapi_client.models.facility_resource_attributes import FacilityResourceAttributes from openapi_client.models.facility_resource_relationships import FacilityResourceRelationships from openapi_client.models.forbidden import Forbidden from openapi_client.models.forbidden_errors import ForbiddenErrors from openapi_client.models.informed_entity import InformedEntity from openapi_client.models.line import Line from openapi_client.models.line_resource import LineResource from openapi_client.models.line_resource_attributes import LineResourceAttributes from openapi_client.models.lines import Lines from openapi_client.models.live_facilities import LiveFacilities from openapi_client.models.live_facility import LiveFacility from openapi_client.models.live_facility_resource import LiveFacilityResource from openapi_client.models.live_facility_resource_attributes import LiveFacilityResourceAttributes from openapi_client.models.not_found import NotFound from openapi_client.models.not_found_errors import NotFoundErrors from openapi_client.models.not_found_source import NotFoundSource from openapi_client.models.prediction_resource import PredictionResource from openapi_client.models.prediction_resource_attributes import PredictionResourceAttributes from openapi_client.models.prediction_resource_relationships import PredictionResourceRelationships from openapi_client.models.prediction_resource_relationships_alerts import PredictionResourceRelationshipsAlerts from openapi_client.models.prediction_resource_relationships_alerts_data import PredictionResourceRelationshipsAlertsData from openapi_client.models.prediction_resource_relationships_alerts_links import PredictionResourceRelationshipsAlertsLinks from openapi_client.models.prediction_resource_relationships_route import PredictionResourceRelationshipsRoute from openapi_client.models.prediction_resource_relationships_route_data import PredictionResourceRelationshipsRouteData from openapi_client.models.prediction_resource_relationships_route_links import PredictionResourceRelationshipsRouteLinks from openapi_client.models.prediction_resource_relationships_schedule import PredictionResourceRelationshipsSchedule from openapi_client.models.prediction_resource_relationships_schedule_data import PredictionResourceRelationshipsScheduleData from openapi_client.models.prediction_resource_relationships_schedule_links import PredictionResourceRelationshipsScheduleLinks from openapi_client.models.prediction_resource_relationships_stop import PredictionResourceRelationshipsStop from openapi_client.models.prediction_resource_relationships_stop_data import PredictionResourceRelationshipsStopData from openapi_client.models.prediction_resource_relationships_stop_links import PredictionResourceRelationshipsStopLinks from openapi_client.models.prediction_resource_relationships_trip import PredictionResourceRelationshipsTrip from openapi_client.models.prediction_resource_relationships_trip_data import PredictionResourceRelationshipsTripData from openapi_client.models.prediction_resource_relationships_trip_links import PredictionResourceRelationshipsTripLinks from openapi_client.models.prediction_resource_relationships_vehicle import PredictionResourceRelationshipsVehicle from openapi_client.models.prediction_resource_relationships_vehicle_data import PredictionResourceRelationshipsVehicleData from openapi_client.models.prediction_resource_relationships_vehicle_links import PredictionResourceRelationshipsVehicleLinks from openapi_client.models.predictions import Predictions from openapi_client.models.route import Route from openapi_client.models.route_pattern import RoutePattern from openapi_client.models.route_pattern_resource import RoutePatternResource from openapi_client.models.route_pattern_resource_attributes import RoutePatternResourceAttributes from openapi_client.models.route_pattern_resource_relationships import RoutePatternResourceRelationships from openapi_client.models.route_pattern_resource_relationships_representative_trip import RoutePatternResourceRelationshipsRepresentativeTrip from openapi_client.models.route_pattern_resource_relationships_representative_trip_data import RoutePatternResourceRelationshipsRepresentativeTripData from openapi_client.models.route_pattern_resource_relationships_representative_trip_links import RoutePatternResourceRelationshipsRepresentativeTripLinks from openapi_client.models.route_patterns import RoutePatterns from openapi_client.models.route_resource import RouteResource from openapi_client.models.route_resource_attributes import RouteResourceAttributes from openapi_client.models.routes import Routes from openapi_client.models.schedule_resource import ScheduleResource from openapi_client.models.schedule_resource_attributes import ScheduleResourceAttributes from openapi_client.models.schedule_resource_relationships import ScheduleResourceRelationships from openapi_client.models.schedule_resource_relationships_prediction import ScheduleResourceRelationshipsPrediction from openapi_client.models.schedule_resource_relationships_prediction_data import ScheduleResourceRelationshipsPredictionData from openapi_client.models.schedule_resource_relationships_prediction_links import ScheduleResourceRelationshipsPredictionLinks from openapi_client.models.schedules import Schedules from openapi_client.models.schedules_links import SchedulesLinks from openapi_client.models.service import Service from openapi_client.models.service_included import ServiceIncluded from openapi_client.models.service_links import ServiceLinks from openapi_client.models.service_resource import ServiceResource from openapi_client.models.service_resource_attributes import ServiceResourceAttributes from openapi_client.models.services import Services from openapi_client.models.shape import Shape from openapi_client.models.shape_resource import ShapeResource from openapi_client.models.shape_resource_attributes import ShapeResourceAttributes from openapi_client.models.shape_resource_relationships import ShapeResourceRelationships from openapi_client.models.shape_resource_relationships_stops import ShapeResourceRelationshipsStops from openapi_client.models.shape_resource_relationships_stops_data import ShapeResourceRelationshipsStopsData from openapi_client.models.shape_resource_relationships_stops_links import ShapeResourceRelationshipsStopsLinks from openapi_client.models.shapes import Shapes from openapi_client.models.stop import Stop from openapi_client.models.stop_resource import StopResource from openapi_client.models.stop_resource_attributes import StopResourceAttributes from openapi_client.models.stop_resource_relationships import StopResourceRelationships from openapi_client.models.stop_resource_relationships_parent_station import StopResourceRelationshipsParentStation from openapi_client.models.stop_resource_relationships_parent_station_data import StopResourceRelationshipsParentStationData from openapi_client.models.stop_resource_relationships_parent_station_links import StopResourceRelationshipsParentStationLinks from openapi_client.models.stops import Stops from openapi_client.models.too_many_requests import TooManyRequests from openapi_client.models.too_many_requests_errors import TooManyRequestsErrors from openapi_client.models.trip import Trip from openapi_client.models.trip_resource import TripResource from openapi_client.models.trip_resource_attributes import TripResourceAttributes from openapi_client.models.trip_resource_relationships import TripResourceRelationships from openapi_client.models.trip_resource_relationships_route_pattern import TripResourceRelationshipsRoutePattern from openapi_client.models.trip_resource_relationships_route_pattern_data import TripResourceRelationshipsRoutePatternData from openapi_client.models.trip_resource_relationships_route_pattern_links import TripResourceRelationshipsRoutePatternLinks from openapi_client.models.trip_resource_relationships_service import TripResourceRelationshipsService from openapi_client.models.trip_resource_relationships_service_data import TripResourceRelationshipsServiceData from openapi_client.models.trip_resource_relationships_service_links import TripResourceRelationshipsServiceLinks from openapi_client.models.trip_resource_relationships_shape import TripResourceRelationshipsShape from openapi_client.models.trip_resource_relationships_shape_data import TripResourceRelationshipsShapeData from openapi_client.models.trip_resource_relationships_shape_links import TripResourceRelationshipsShapeLinks from openapi_client.models.trips import Trips from openapi_client.models.vehicle import Vehicle from openapi_client.models.vehicle_resource import VehicleResource from openapi_client.models.vehicle_resource_attributes import VehicleResourceAttributes from openapi_client.models.vehicle_resource_relationships import VehicleResourceRelationships from openapi_client.models.vehicles import Vehicles
77.674074
153
0.922945
1,123
10,486
8.28317
0.160285
0.138357
0.213825
0.289293
0.515051
0.458074
0.333907
0.274995
0.06289
0.023866
0
0.000704
0.051116
10,486
134
154
78.253731
0.934171
0.026225
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
16bd562f3ca15d9946dd74251497b454b4461059
96
py
Python
venv/lib/python3.8/site-packages/numpy/distutils/command/install_clib.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
2
2022-03-13T01:58:52.000Z
2022-03-31T06:07:54.000Z
venv/lib/python3.8/site-packages/numpy/distutils/command/install_clib.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/numpy/distutils/command/install_clib.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/d7/1b/f4/fe53d5bb7835e868080a38e587b4fccd0e8f4a57af0aeabc068731e583
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.385417
0
96
1
96
96
0.510417
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
5
16d57c86b70395b9fbb77f6780c4622bb7845d9d
44
py
Python
python/testData/intentions/SpecifyTypeInPy3AnnotationsIntentionTest/caretOnParamUsage.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/intentions/SpecifyTypeInPy3AnnotationsIntentionTest/caretOnParamUsage.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/intentions/SpecifyTypeInPy3AnnotationsIntentionTest/caretOnParamUsage.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
def foo(var): print(va<caret>r) pass
14.666667
21
0.590909
8
44
3.25
1
0
0
0
0
0
0
0
0
0
0
0
0.25
44
3
22
14.666667
0.787879
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0.333333
0
0
0.333333
0.333333
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
5
16e12bd21360d44f5aa7dd0bab296f16cf69156a
24,291
py
Python
minfraud/webservice.py
maxmind/minfraud-api-python
bf93da6a7be736fc62d9bf21a16cd4bf1daf62c0
[ "Apache-2.0" ]
24
2015-06-24T15:41:24.000Z
2022-01-04T10:52:55.000Z
minfraud/webservice.py
maxmind/minfraud-api-python
bf93da6a7be736fc62d9bf21a16cd4bf1daf62c0
[ "Apache-2.0" ]
14
2016-10-04T15:07:36.000Z
2022-03-29T14:40:53.000Z
minfraud/webservice.py
maxmind/minfraud-api-python
bf93da6a7be736fc62d9bf21a16cd4bf1daf62c0
[ "Apache-2.0" ]
14
2017-01-24T13:30:17.000Z
2021-12-18T20:45:37.000Z
""" minfraud.webservice ~~~~~~~~~~~~~~~~~~~ This module contains the webservice client class. """ import json from typing import Any, cast, Dict, Optional, Tuple, Type, Union import aiohttp import aiohttp.http import requests import requests.utils from requests.models import Response from .version import __version__ from .errors import ( MinFraudError, HTTPError, AuthenticationError, InsufficientFundsError, InvalidRequestError, PermissionRequiredError, ) from .models import Factors, Insights, Score from .request import prepare_report, prepare_transaction _AIOHTTP_UA = f"minFraud-API/{__version__} {aiohttp.http.SERVER_SOFTWARE}" _REQUEST_UA = f"minFraud-API/{__version__} {requests.utils.default_user_agent()}" # pylint: disable=too-many-instance-attributes, missing-class-docstring class BaseClient: _account_id: str _license_key: str _locales: Tuple[str, ...] _timeout: float _score_uri: str _insights_uri: str _factors_uri: str _report_uri: str def __init__( self, account_id: int, license_key: str, host: str = "minfraud.maxmind.com", locales: Tuple[str, ...] = ("en",), timeout: float = 60, ) -> None: self._locales = locales self._account_id = str(account_id) self._license_key = license_key self._timeout = timeout base_uri = f"https://{host}/minfraud/v2.0" self._score_uri = "/".join([base_uri, "score"]) self._insights_uri = "/".join([base_uri, "insights"]) self._factors_uri = "/".join([base_uri, "factors"]) self._report_uri = "/".join([base_uri, "transactions", "report"]) def _handle_success( self, raw_body: str, uri: str, model_class: Union[Type[Factors], Type[Score], Type[Insights]], ) -> Union[Score, Factors, Insights]: """Handle successful response.""" try: decoded_body = json.loads(raw_body) except ValueError as ex: raise MinFraudError( f"Received a 200 response but could not decode the response as JSON: {raw_body}", 200, uri, ) from ex if "ip_address" in decoded_body: decoded_body["ip_address"]["_locales"] = self._locales return model_class(decoded_body) # type: ignore def _exception_for_error( self, status: int, content_type: str, raw_body: str, uri: str ) -> Union[ AuthenticationError, InsufficientFundsError, InvalidRequestError, HTTPError, PermissionRequiredError, ]: """Returns the exception for the error responses.""" if 400 <= status < 500: return self._exception_for_4xx_status(status, content_type, raw_body, uri) if 500 <= status < 600: return self._exception_for_5xx_status(status, raw_body, uri) return self._exception_for_unexpected_status(status, raw_body, uri) def _exception_for_4xx_status( self, status: int, content_type: str, raw_body: str, uri: str ) -> Union[ AuthenticationError, InsufficientFundsError, InvalidRequestError, HTTPError, PermissionRequiredError, ]: """Returns exception for error responses with 4xx status codes.""" if not raw_body: return HTTPError( f"Received a {status} error with no body", status, uri, raw_body ) if content_type.find("json") == -1: return HTTPError( f"Received a {status} with the following body: {raw_body}", status, uri, raw_body, ) try: decoded_body = json.loads(raw_body) except ValueError: return HTTPError( f"Received a {status} error but it did not " + f"include the expected JSON body: {raw_body}", status, uri, raw_body, ) else: if "code" in decoded_body and "error" in decoded_body: return self._exception_for_web_service_error( decoded_body.get("error"), decoded_body.get("code"), status, uri ) return HTTPError( "Error response contains JSON but it does not " + f"specify code or error keys: {raw_body}", status, uri, raw_body, ) @staticmethod def _exception_for_web_service_error( message: str, code: str, status: int, uri: str ) -> Union[ InvalidRequestError, AuthenticationError, PermissionRequiredError, InsufficientFundsError, ]: """Returns exception for error responses with the JSON body.""" if code in ( "ACCOUNT_ID_REQUIRED", "AUTHORIZATION_INVALID", "LICENSE_KEY_REQUIRED", "USER_ID_REQUIRED", ): return AuthenticationError(message) if code == "INSUFFICIENT_FUNDS": return InsufficientFundsError(message) if code == "PERMISSION_REQUIRED": return PermissionRequiredError(message) return InvalidRequestError(message, code, status, uri) @staticmethod def _exception_for_5xx_status( status: int, raw_body: Optional[str], uri: str, ) -> HTTPError: """Returns exception for error response with 5xx status codes.""" return HTTPError( f"Received a server error ({status}) for {uri}", status, uri, raw_body, ) @staticmethod def _exception_for_unexpected_status( status: int, raw_body: Optional[str], uri: str, ) -> HTTPError: """Returns exception for responses with unexpected status codes.""" return HTTPError( f"Received an unexpected HTTP status ({status}) for {uri}", status, uri, raw_body, ) class AsyncClient(BaseClient): """Async client for accessing the minFraud web services.""" _existing_session: aiohttp.ClientSession _proxy: Optional[str] def __init__( self, account_id: int, license_key: str, host: str = "minfraud.maxmind.com", locales: Tuple[str, ...] = ("en",), timeout: float = 60, proxy: Optional[str] = None, ) -> None: """Constructor for AsyncClient. :param account_id: Your MaxMind account ID :type account_id: int :param license_key: Your MaxMind license key :type license_key: str :param host: The host to use when connecting to the web service. :type host: str :param locales: A tuple of locale codes to use in name property :type locales: tuple[str] :param timeout: The timeout in seconds to use when waiting on the request. This sets both the connect timeout and the read timeout. The default is 60. :type timeout: float :param proxy: The URL of an HTTP proxy to use. It may optionally include a basic auth username and password, e.g., ``http://username:password@host:port``. :return: Client object :rtype: Client """ super().__init__(account_id, license_key, host, locales, timeout) self._proxy = proxy async def factors( self, transaction: Dict[str, Any], validate: bool = True, hash_email: bool = False, ) -> Factors: """Query Factors endpoint with transaction data. :param transaction: A dictionary containing the transaction to be sent to the minFraud Factors web service as specified in the `REST API documentation <https://dev.maxmind.com/minfraud/api-documentation/requests?lang=en>`_. :type transaction: dict :param validate: If set to false, validation of the transaction dictionary will be disabled. This validation helps ensure that your request is correct before sending it to MaxMind. Validation raises an InvalidRequestError. :type validate: bool :param hash_email: By default, the email address is sent in plain text. If this is set to ``True``, the email address will be normalized and converted to an MD5 hash before the request is sent. The email domain will continue to be sent in plain text. :type hash_email: bool :return: A Factors model object :rtype: Factors :raises: AuthenticationError, InsufficientFundsError, InvalidRequestError, HTTPError, MinFraudError, """ return cast( Factors, await self._response_for( self._factors_uri, Factors, transaction, validate, hash_email, ), ) async def insights( self, transaction: Dict[str, Any], validate: bool = True, hash_email: bool = False, ) -> Insights: """Query Insights endpoint with transaction data. :param transaction: A dictionary containing the transaction to be sent to the minFraud Insights web service as specified in the `REST API documentation <https://dev.maxmind.com/minfraud/api-documentation/requests?lang=en>`_. :type transaction: dict :param validate: If set to false, validation of the transaction dictionary will be disabled. This validation helps ensure that your request is correct before sending it to MaxMind. Validation raises an InvalidRequestError. :type validate: bool :param hash_email: By default, the email address is sent in plain text. If this is set to ``True``, the email address will be normalized and converted to an MD5 hash before the request is sent. The email domain will continue to be sent in plain text. :type hash_email: bool :return: An Insights model object :rtype: Insights :raises: AuthenticationError, InsufficientFundsError, InvalidRequestError, HTTPError, MinFraudError, """ return cast( Insights, await self._response_for( self._insights_uri, Insights, transaction, validate, hash_email, ), ) async def score( self, transaction: Dict[str, Any], validate: bool = True, hash_email: bool = False, ) -> Score: """Query Score endpoint with transaction data. :param transaction: A dictionary containing the transaction to be sent to the minFraud Score web service as specified in the `REST API documentation <https://dev.maxmind.com/minfraud/api-documentation/requests?lang=en>`_. :type transaction: dict :param validate: If set to false, validation of the transaction dictionary will be disabled. This validation helps ensure that your request is correct before sending it to MaxMind. Validation raises an InvalidRequestError. :type validate: bool :param hash_email: By default, the email address is sent in plain text. If this is set to ``True``, the email address will be normalized and converted to an MD5 hash before the request is sent. The email domain will continue to be sent in plain text. :type hash_email: bool :return: A Score model object :rtype: Score :raises: AuthenticationError, InsufficientFundsError, InvalidRequestError, HTTPError, MinFraudError, """ return cast( Score, await self._response_for( self._score_uri, Score, transaction, validate, hash_email, ), ) async def report( self, report: Dict[str, Optional[str]], validate: bool = True ) -> None: """Send a transaction report to the Report Transaction endpoint. :param report: A dictionary containing the transaction report to be sent to the Report Transations web service as specified in the `REST API` documentation <https://dev.maxmind.com/minfraud/report-a-transaction?lang=en>_. :type report: dict :param validate: If set to false, validation of the report dictionary will be disabled. This validation helps ensure that your request is correct before sending it to MaxMind. Validation raises an InvalidRequestError. :type validate: bool :return: Nothing :rtype: None :raises: AuthenticationError, InvalidRequestError, HTTPError, MinFraudError, """ prepared_request = prepare_report(report, validate) uri = self._report_uri async with await self._do_request(uri, prepared_request) as response: status = response.status content_type = response.content_type raw_body = await response.text() if status != 204: raise self._exception_for_error(status, content_type, raw_body, uri) async def _response_for( self, uri: str, model_class: Union[Type[Factors], Type[Score], Type[Insights]], request: Dict[str, Any], validate: bool, hash_email: bool, ) -> Union[Score, Factors, Insights]: """Send request and create response object.""" prepared_request = prepare_transaction(request, validate, hash_email) async with await self._do_request(uri, prepared_request) as response: status = response.status content_type = response.content_type raw_body = await response.text() if status != 200: raise self._exception_for_error(status, content_type, raw_body, uri) return self._handle_success(raw_body, uri, model_class) async def _do_request( self, uri: str, data: Dict[str, Any] ) -> aiohttp.ClientResponse: session = await self._session() return await session.post(uri, json=data, proxy=self._proxy) async def _session(self) -> aiohttp.ClientSession: if not hasattr(self, "_existing_session"): self._existing_session = aiohttp.ClientSession( auth=aiohttp.BasicAuth(self._account_id, self._license_key), headers={"Accept": "application/json", "User-Agent": _AIOHTTP_UA}, timeout=aiohttp.ClientTimeout(total=self._timeout), ) return self._existing_session async def close(self): """Close underlying session This will close the session and any associated connections. """ if hasattr(self, "_existing_session"): await self._existing_session.close() async def __aenter__(self) -> "AsyncClient": return self async def __aexit__(self, exc_type: None, exc_value: None, traceback: None) -> None: await self.close() class Client(BaseClient): """Synchronous client for accessing the minFraud web services.""" _proxies: Optional[Dict[str, str]] _session: requests.Session def __init__( self, account_id: int, license_key: str, host: str = "minfraud.maxmind.com", locales: Tuple[str, ...] = ("en",), timeout: float = 60, proxy: Optional[str] = None, ) -> None: """Constructor for Client. :param account_id: Your MaxMind account ID :type account_id: int :param license_key: Your MaxMind license key :type license_key: str :param host: The host to use when connecting to the web service. :type host: str :param locales: A tuple of locale codes to use in name property :type locales: tuple[str] :param timeout: The timeout in seconds to use when waiting on the request. This sets both the connect timeout and the read timeout. The default is 60. :param proxy: The URL of an HTTP proxy to use. It may optionally include a basic auth username and password, e.g., ``http://username:password@host:port``. :type timeout: float :return: Client object :rtype: Client """ super().__init__(account_id, license_key, host, locales, timeout) self._session = requests.Session() self._session.auth = (self._account_id, self._license_key) self._session.headers["Accept"] = "application/json" self._session.headers["User-Agent"] = _REQUEST_UA if proxy is None: self._proxies = None else: self._proxies = {"https": proxy} def factors( self, transaction: Dict[str, Any], validate: bool = True, hash_email: bool = False, ) -> Factors: """Query Factors endpoint with transaction data. :param transaction: A dictionary containing the transaction to be sent to the minFraud Factors web service as specified in the `REST API documentation <https://dev.maxmind.com/minfraud/api-documentation/requests?lang=en>`_. :type transaction: dict :param validate: If set to false, validation of the transaction dictionary will be disabled. This validation helps ensure that your request is correct before sending it to MaxMind. Validation raises an InvalidRequestError. :type validate: bool :param hash_email: By default, the email address is sent in plain text. If this is set to ``True``, the email address will be normalized and converted to an MD5 hash before the request is sent. The email domain will continue to be sent in plain text. :type hash_email: bool :return: A Factors model object :rtype: Factors :raises: AuthenticationError, InsufficientFundsError, InvalidRequestError, HTTPError, MinFraudError, """ return cast( Factors, self._response_for( self._factors_uri, Factors, transaction, validate, hash_email, ), ) def insights( self, transaction: Dict[str, Any], validate: bool = True, hash_email: bool = False, ) -> Insights: """Query Insights endpoint with transaction data. :param transaction: A dictionary containing the transaction to be sent to the minFraud Insights web service as specified in the `REST API documentation <https://dev.maxmind.com/minfraud/api-documentation/requests?lang=en>`_. :type transaction: dict :param validate: If set to false, validation of the transaction dictionary will be disabled. This validation helps ensure that your request is correct before sending it to MaxMind. Validation raises an InvalidRequestError. :type validate: bool :param hash_email: By default, the email address is sent in plain text. If this is set to ``True``, the email address will be normalized and converted to an MD5 hash before the request is sent. The email domain will continue to be sent in plain text. :type hash_email: bool :return: An Insights model object :rtype: Insights :raises: AuthenticationError, InsufficientFundsError, InvalidRequestError, HTTPError, MinFraudError, """ return cast( Insights, self._response_for( self._insights_uri, Insights, transaction, validate, hash_email, ), ) def score( self, transaction: Dict[str, Any], validate: bool = True, hash_email: bool = False, ) -> Score: """Query Score endpoint with transaction data. :param transaction: A dictionary containing the transaction to be sent to the minFraud Score web service as specified in the `REST API documentation <https://dev.maxmind.com/minfraud/api-documentation/requests?lang=en>`_. :type transaction: dict :param validate: If set to false, validation of the transaction dictionary will be disabled. This validation helps ensure that your request is correct before sending it to MaxMind. Validation raises an InvalidRequestError. :type validate: bool :param hash_email: By default, the email address is sent in plain text. If this is set to ``True``, the email address will be normalized and converted to an MD5 hash before the request is sent. The email domain will continue to be sent in plain text. :type hash_email: bool :return: A Score model object :rtype: Score :raises: AuthenticationError, InsufficientFundsError, InvalidRequestError, HTTPError, MinFraudError, """ return cast( Score, self._response_for( self._score_uri, Score, transaction, validate, hash_email, ), ) def report(self, report: Dict[str, Optional[str]], validate: bool = True) -> None: """Send a transaction report to the Report Transaction endpoint. :param report: A dictionary containing the transaction report to be sent to the Report Transations web service as specified in the `REST API` documentation <https://dev.maxmind.com/minfraud/report-transaction/#Request_Body>_. :type report: dict :param validate: If set to false, validation of the report dictionary will be disabled. This validation helps ensure that your request is correct before sending it to MaxMind. Validation raises an InvalidRequestError. :type validate: bool :return: Nothing :rtype: None :raises: AuthenticationError, InvalidRequestError, HTTPError, MinFraudError, """ prepared_request = prepare_report(report, validate) uri = self._report_uri response = self._do_request(uri, prepared_request) status = response.status_code content_type = response.headers["Content-Type"] raw_body = response.text if status != 204: raise self._exception_for_error(status, content_type, raw_body, uri) def _response_for( self, uri: str, model_class: Union[Type[Factors], Type[Score], Type[Insights]], request: Dict[str, Any], validate: bool, hash_email: bool, ) -> Union[Score, Factors, Insights]: """Send request and create response object.""" prepared_request = prepare_transaction(request, validate, hash_email) response = self._do_request(uri, prepared_request) status = response.status_code content_type = response.headers["Content-Type"] raw_body = response.text if status != 200: raise self._exception_for_error(status, content_type, raw_body, uri) return self._handle_success(raw_body, uri, model_class) def _do_request(self, uri: str, data: Dict[str, Any]) -> Response: return self._session.post( uri, json=data, timeout=self._timeout, proxies=self._proxies ) def close(self): """Close underlying session This will close the session and any associated connections. """ self._session.close() def __enter__(self) -> "Client": return self def __exit__(self, exc_type: None, exc_value: None, traceback: None) -> None: self.close()
36.693353
97
0.611543
2,698
24,291
5.365456
0.095626
0.01499
0.012573
0.012434
0.78675
0.765128
0.746408
0.729414
0.723473
0.717118
0
0.003285
0.31065
24,291
661
98
36.748865
0.861212
0.236754
0
0.612732
0
0
0.075946
0.010643
0
0
0
0
0
1
0.047745
false
0
0.029178
0.005305
0.190981
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
bc51c66a0861e0e85b63071bdc021542418b54d5
16
py
Python
fourth-file.py
deepti-anand/hello-world
494fc16c416dc8fe25600d5830052343b70f2070
[ "Apache-2.0" ]
null
null
null
fourth-file.py
deepti-anand/hello-world
494fc16c416dc8fe25600d5830052343b70f2070
[ "Apache-2.0" ]
null
null
null
fourth-file.py
deepti-anand/hello-world
494fc16c416dc8fe25600d5830052343b70f2070
[ "Apache-2.0" ]
null
null
null
print("fourth")
8
15
0.6875
2
16
5.5
1
0
0
0
0
0
0
0
0
0
0
0
0.0625
16
1
16
16
0.733333
0
0
0
0
0
0.375
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
bc7669776b92346d0b43eefd7030df4e550e128a
16,693
py
Python
fair/SSPs/_shared.py
OMS-NetZero/FAIR
1d89c23096ae583c06581b1a2e1cb59f1aa44e13
[ "Apache-2.0" ]
66
2017-06-20T10:30:11.000Z
2022-02-03T19:11:06.000Z
fair/SSPs/_shared.py
OMS-NetZero/FAIR
1d89c23096ae583c06581b1a2e1cb59f1aa44e13
[ "Apache-2.0" ]
99
2017-03-29T01:59:56.000Z
2021-09-23T08:45:23.000Z
fair/SSPs/_shared.py
OMS-NetZero/FAIR
1d89c23096ae583c06581b1a2e1cb59f1aa44e13
[ "Apache-2.0" ]
33
2017-03-30T04:02:51.000Z
2022-03-02T17:13:43.000Z
import numpy as np import pandas as pd from scipy.interpolate import interp1d from ..constants import molwt class Emissions: def __init__(self, loaded, startyear): self.year = loaded.columns[(startyear-1750)+7:].astype(int).to_numpy() self.co2 = (loaded[loaded["Variable"]=="Emissions|CO2"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() * molwt.C/molwt.CO2 / 1000 self.co2_fossil = (loaded[loaded["Variable"]=="Emissions|CO2|MAGICC Fossil and Industrial"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() * molwt.C/molwt.CO2 / 1000 self.co2_land = (loaded[loaded["Variable"]=="Emissions|CO2|MAGICC AFOLU"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() * molwt.C/molwt.CO2 / 1000 self.ch4 = (loaded[loaded["Variable"]=="Emissions|CH4"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.n2o = (loaded[loaded["Variable"]=="Emissions|N2O"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() / 1000 self.sox = (loaded[loaded["Variable"]=="Emissions|Sulfur"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() * molwt.S/molwt.SO2 self.co = (loaded[loaded["Variable"]=="Emissions|CO"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.nmvoc = (loaded[loaded["Variable"]=="Emissions|VOC"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.nox = (loaded[loaded["Variable"]=="Emissions|NOx"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() * molwt.N / molwt.NO2 self.bc = (loaded[loaded["Variable"]=="Emissions|BC"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.oc = (loaded[loaded["Variable"]=="Emissions|OC"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.nh3 = (loaded[loaded["Variable"]=="Emissions|NH3"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.cf4 = (loaded[loaded["Variable"]=="Emissions|F-Gases|PFC|CF4"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.c2f6 = (loaded[loaded["Variable"]=="Emissions|F-Gases|PFC|C2F6"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.c6f14 = (loaded[loaded["Variable"]=="Emissions|F-Gases|PFC|C6F14"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.hfc23 = (loaded[loaded["Variable"]=="Emissions|F-Gases|HFC|HFC23"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.hfc32 = (loaded[loaded["Variable"]=="Emissions|F-Gases|HFC|HFC32"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.hfc43_10 = (loaded[loaded["Variable"]=="Emissions|F-Gases|HFC|HFC4310mee"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.hfc125 = (loaded[loaded["Variable"]=="Emissions|F-Gases|HFC|HFC125"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.hfc134a = (loaded[loaded["Variable"]=="Emissions|F-Gases|HFC|HFC134a"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.hfc143a = (loaded[loaded["Variable"]=="Emissions|F-Gases|HFC|HFC143a"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.hfc227ea = (loaded[loaded["Variable"]=="Emissions|F-Gases|HFC|HFC227ea"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.hfc245fa = (loaded[loaded["Variable"]=="Emissions|F-Gases|HFC|HFC245fa"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.sf6 = (loaded[loaded["Variable"]=="Emissions|F-Gases|SF6"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.cfc11 = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|CFC|CFC11"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.cfc12 = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|CFC|CFC12"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.cfc113 = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|CFC|CFC113"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.cfc114 = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|CFC|CFC114"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.cfc115 = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|CFC|CFC115"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.carb_tet = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|CCl4"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.mcf = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|CH3CCl3"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.hcfc22 = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|HCFC22"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.hcfc141b = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|HCFC141b"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.hcfc142b = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|HCFC142b"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.halon1211 = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|Halon1211"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.halon1202 = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|Halon1202"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.halon1301 = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|Halon1301"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.halon2402 = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|Halon2402"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.ch3br = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|CH3Br"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.ch3cl = (loaded[loaded["Variable"]=="Emissions|Montreal Gases|CH3Cl"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.emissions = np.vstack((self.year, self.co2_fossil, self.co2_land, self.ch4, self.n2o, self.sox, self.co, self.nmvoc, self.nox, self.bc, self.oc, self.nh3, self.cf4, self.c2f6, self.c6f14, self.hfc23, self.hfc32, self.hfc43_10, self.hfc125, self.hfc134a, self.hfc143a, self.hfc227ea, self.hfc245fa, self.sf6, self.cfc11, self.cfc12, self.cfc113, self.cfc114, self.cfc115, self.carb_tet, self.mcf, self.hcfc22, self.hcfc141b, self.hcfc142b, self.halon1211, self.halon1202, self.halon1301, self.halon2402, self.ch3br, self.ch3cl)).T _nox_avi = (loaded[loaded["Variable"]=="Emissions|NOx|MAGICC Fossil and Industrial|Aircraft"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() * molwt.N / molwt.NO2 _ch4_fos = (loaded[loaded["Variable"]=="Emissions|CH4|MAGICC Fossil and Industrial"].loc[:,str(startyear):].interpolate(axis=1)).astype(float).to_numpy().squeeze() self.fossilCH4_frac = _ch4_fos / self.ch4 self.aviNOx_frac = _nox_avi / self.nox class Concentrations: def __init__(self, loaded, startyear): self.year = loaded.columns[(startyear-1700)+7:].astype(int).to_numpy() self.co2 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|CO2")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.ch4 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|CH4")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.n2o = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|N2O")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.cf4 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|F-Gases|PFC|CF4")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.c2f6 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|F-Gases|PFC|C2F6")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.c6f14 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|F-Gases|PFC|C6F14")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.hfc23 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|F-Gases|HFC|HFC23")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.hfc32 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|F-Gases|HFC|HFC32")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.hfc43_10 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|F-Gases|HFC|HFC4310mee")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.hfc125 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|F-Gases|HFC|HFC125")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.hfc134a = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|F-Gases|HFC|HFC134a")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.hfc143a = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|F-Gases|HFC|HFC143a")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.hfc227ea = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|F-Gases|HFC|HFC227ea")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.hfc245fa = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|F-Gases|HFC|HFC245fa")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.sf6 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|F-Gases|SF6")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.cfc11 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|CFC|CFC11")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.cfc12 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|CFC|CFC12")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.cfc113 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|CFC|CFC113")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.cfc114 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|CFC|CFC114")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.cfc115 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|CFC|CFC115")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.carb_tet = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|CCl4")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.mcf = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|CH3CCl3")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.hcfc22 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|HCFC22")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.hcfc141b = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|HCFC141b")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.hcfc142b = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|HCFC142b")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.halon1211 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|Halon1211")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.halon1202 = np.zeros(736) self.halon1301 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|Halon1301")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.halon2402 = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|Halon2402")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.ch3br = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|CH3Br")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.ch3cl = (loaded[(loaded["Variable"]=="Atmospheric Concentrations|Montreal Gases|CH3Cl")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.concentrations = np.vstack((self.year, self.co2, self.ch4, self.n2o, self.cf4, self.c2f6, self.c6f14, self.hfc23, self.hfc32, self.hfc43_10, self.hfc125, self.hfc134a, self.hfc143a, self.hfc227ea, self.hfc245fa, self.sf6, self.cfc11, self.cfc12, self.cfc113, self.cfc114, self.cfc115, self.carb_tet, self.mcf, self.hcfc22, self.hcfc141b, self.hcfc142b, self.halon1211, self.halon1202, self.halon1301, self.halon2402, self.ch3br, self.ch3cl)).T self.gas_indices= np.arange(1, 32) class Forcing: def __init__(self, loaded, startyear): self.year = loaded.columns[(startyear-1750)+7:].astype(int).to_numpy() self.total = (loaded[(loaded["Variable"]=="Effective Radiative Forcing")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.volcanic = (loaded[(loaded["Variable"]=="Effective Radiative Forcing|Natural|Volcanic")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.solar = (loaded[(loaded["Variable"]=="Effective Radiative Forcing|Natural|Solar")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.co2 = (loaded[(loaded["Variable"]=="Effective Radiative Forcing|Anthropogenic|CO2")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.ch4 = (loaded[(loaded["Variable"]=="Effective Radiative Forcing|Anthropogenic|CH4")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.n2o = (loaded[(loaded["Variable"]=="Effective Radiative Forcing|Anthropogenic|N2O")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.other_wmghgs = (loaded[(loaded["Variable"]=="Effective Radiative Forcing|Anthropogenic|Other|Other WMGHGs")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.aero = (loaded[(loaded["Variable"]=="Effective Radiative Forcing|Anthropogenic|Aerosols|Aerosols-radiation Interactions")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.cloud = (loaded[(loaded["Variable"]=="Effective Radiative Forcing|Anthropogenic|Aerosols|Aerosols-cloud Interactions")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.strato3 = (loaded[(loaded["Variable"]=="Effective Radiative Forcing|Anthropogenic|Stratospheric Ozone")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.tropo3 = (loaded[(loaded["Variable"]=="Effective Radiative Forcing|Anthropogenic|Tropospheric Ozone")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.stwv = (loaded[(loaded["Variable"]=="Effective Radiative Forcing|Anthropogenic|Other|CH4 Oxidation Stratospheric H2O")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.contrails = (loaded[(loaded["Variable"]=="Effective Radiative Forcing|Anthropogenic|Other|Contrails and Contrail-induced Cirrus")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.landuse = (loaded[(loaded["Variable"]=="Effective Radiative Forcing|Anthropogenic|Other|Albedo Change")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() self.bcsnow = (loaded[(loaded["Variable"]=="Effective Radiative Forcing|Anthropogenic|Other|BC on Snow")].loc[:,str(startyear):]).astype(float).to_numpy().squeeze() def load_emissions_data(filepath, scenario, startyear=1765): # do the loading, return something which has the attributes you want loaded = pd.read_csv(filepath) loaded = loaded[loaded["Scenario"]==scenario] out = Emissions(loaded, startyear) return out def load_concentrations_data(filepath, scenario, startyear=1765): loaded = pd.read_csv(filepath) loaded = loaded[loaded["Scenario"]==scenario] out = Concentrations(loaded, startyear) return out def load_forcing_data(filepath, scenario, startyear=1765): loaded = pd.read_csv(filepath) loaded = loaded[loaded["Scenario"]==scenario] out = Forcing(loaded, startyear) return out
124.574627
542
0.701552
2,060
16,693
5.619417
0.079612
0.096406
0.150311
0.13528
0.914046
0.88744
0.829907
0.71605
0.635366
0.555978
0
0.036429
0.087342
16,693
134
543
124.574627
0.7234
0.003954
0
0.115702
0
0
0.247384
0.091122
0
0
0
0
0
1
0.049587
false
0
0.033058
0
0.132231
0
0
0
0
null
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
bcb2763600c9010c758b4d154ff4c7322be7133e
136
py
Python
telegramsetup.py
timokoola/timoechobot
c6e18aa29b538b73dcef1898f1d45bb3bf6d0d55
[ "Apache-2.0" ]
null
null
null
telegramsetup.py
timokoola/timoechobot
c6e18aa29b538b73dcef1898f1d45bb3bf6d0d55
[ "Apache-2.0" ]
null
null
null
telegramsetup.py
timokoola/timoechobot
c6e18aa29b538b73dcef1898f1d45bb3bf6d0d55
[ "Apache-2.0" ]
null
null
null
import telepot from keys import telegram_api_key, telegram_bot_url bot = telepot.Bot(telegram_api_key) bot.setWebhook(telegram_bot_url)
27.2
51
0.860294
22
136
4.954545
0.454545
0.201835
0.256881
0
0
0
0
0
0
0
0
0
0.080882
136
5
52
27.2
0.872
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
bcbbb331d0d2eaf7de0461f020aeed3e5f08de3e
317
py
Python
cellpack/mgl_tools/mglutil/gui/BasicWidgets/Tk/defaultColors.py
mesoscope/cellpack
ec6b736fc706c1fae16392befa814b5337a3a692
[ "MIT" ]
null
null
null
cellpack/mgl_tools/mglutil/gui/BasicWidgets/Tk/defaultColors.py
mesoscope/cellpack
ec6b736fc706c1fae16392befa814b5337a3a692
[ "MIT" ]
21
2021-10-02T00:07:05.000Z
2022-03-30T00:02:10.000Z
cellpack/mgl_tools/mglutil/gui/BasicWidgets/Tk/defaultColors.py
mesoscope/cellpack
ec6b736fc706c1fae16392befa814b5337a3a692
[ "MIT" ]
null
null
null
defaultColor = { "1": (1, 1, 1), # white "2": (0, 0, 0), # black "3": (0.0, 0.0, 1.0), # blue "4": (0.0, 1.0, 0.0), # green "5": ( 1.0, 0.0, 0.0, ), # red "6": (0.0, 1.0, 1.0), # cyan "7": (1.0, 0.0, 1.0), # magenta "8": (1.0, 1.0, 0.0), } # yellow
21.133333
36
0.318612
59
317
1.711864
0.305085
0.336634
0.267327
0.158416
0.19802
0
0
0
0
0
0
0.25641
0.384858
317
14
37
22.642857
0.261538
0.14511
0
0.142857
0
0
0.030534
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
bcc5496786e0fc09db8493381747d108b75e70a4
248
py
Python
iMessage/Errors.py
WillHord/iMessageBot
7b794f986277b8139d58d1d758708e59135d4c11
[ "MIT" ]
null
null
null
iMessage/Errors.py
WillHord/iMessageBot
7b794f986277b8139d58d1d758708e59135d4c11
[ "MIT" ]
null
null
null
iMessage/Errors.py
WillHord/iMessageBot
7b794f986277b8139d58d1d758708e59135d4c11
[ "MIT" ]
null
null
null
class NotACommand(Exception): pass class NotValidCommand(Exception): pass class NotEnoughArgs(Exception): pass class NotImplimentedError(): pass class UserNotFound(Exception): pass class InvalidPrefixes(Exception): pass
14.588235
33
0.741935
23
248
8
0.391304
0.353261
0.391304
0
0
0
0
0
0
0
0
0
0.185484
248
17
34
14.588235
0.910891
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
bcfb94a89c476f7a6ad6c4f9db40072d06b6d668
12,481
py
Python
initDB.py
shenwei0329/rdm-flasky
ed2942cd5a89b6903b2b4fc8baccdde656da5cf5
[ "MIT" ]
null
null
null
initDB.py
shenwei0329/rdm-flasky
ed2942cd5a89b6903b2b4fc8baccdde656da5cf5
[ "MIT" ]
1
2019-08-02T00:39:41.000Z
2019-08-02T00:39:41.000Z
initDB.py
shenwei0329/rdm-flasky
ed2942cd5a89b6903b2b4fc8baccdde656da5cf5
[ "MIT" ]
null
null
null
#coding=utf-8 # # from app import db from app.models import User, Role password_str = '-' \ 'JhyVJFVg9wdQNkzMdVbduMRWrJVkhsETkUnAqxVvvyNtoo2yi6zXQwpcJjHoegmbMNPEyQfXtYCKE3ajKasjYRsnhbMQqwRb' \ 'MIIGJDCCBQygAwIBAgISAzJ7YvIMb4e0zFmkQbt70fHcMA0GCSqGSIb3DQEBCwUAMEoxCzAJBgNVBAYTAlVTMRYwFAYDVQQK' \ 'Ew1MZXQncyBFbmNyeXB0MSMwIQYDVQQDExpMZXQncy0IEF1dGhvcml0eSBYMzAeFw0xODA0MTYwMzIzNDVaFw0xODA3MTUwM' \ 'zIzNDVaMB4xHDAaBgNVBAMMEyouY2hpbmFjbG91ZC5jb20uY24wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC5Z' \ 'qA+lPkP9fzq65vtJGt55OeygbA1yEfRg25REjbSR7mLfQppqsUFtEwtWV8TvNWhs3JwXqjnBY15EYvxOXLgrMnosLX7wIfPW' \ 'K4HKlyuiZ1IAdHCUlFUlvvUy5bNsmCrhxTOf86rbq3nOnxST2XHq6PAegdtCUaMj/7ilooc93wj6uOicBCgCIkeG6udi11HH' \ 'HfiJAoRpR5XFR9YO35OJQhJENhuYpbN/05fHdBV7vv+T9fnrDQw7enwPPZkRpMyhda3MzbQ1s5HghGuTIhJ8hdSDwitXe3RM' \ 'J9PmLOAtD67MsKezAi0Bcy4HQhlS+o7bIwWuk2FcnPBUdbmZzl5AgMBAAGjggMuMIIDKjAOB' role_str = { 'super': 0, # 超级用户 'admin': 1, # 公司管理层 'manager': 2, # 部门负责人 'pd_manager': 3, # 产品负责人 'pj_manager': 4, # 项目负责人 'sys_manager': 66, # 系统维护人 'user': 99, # 一般用户 } email_str = { 'chairman@chinacloud.com.cn': 'super', 'zhuhongtao@chinacloud.com.cn': 'admin', 'lijiacheng@chinacloud.com.cn': 'admin', 'nieyong@chinacloud.com.cn': 'admin', 'wuhuaigu@chinacloud.com.cn': 'admin', 'minshengjie@chinacloud.com.cn': 'admin', 'dingxing@chinacloud.com.cn': 'manager', 'guanyuqi@chinacloud.com.cn': 'manager', 'shishuang@chinacloud.com.cn': 'manager', 'hongbo@chinacloud.com.cn': 'manager', 'gaijia@chinacloud.com.cn': 'manager', 'jiangyong@chinacloud.com.cn': 'manager', 'guofengqi@chinacloud.com.cn': 'manager', 'liuyi@chinacloud.com.cn': 'manager', 'zhangjian@chinacloud.com.cn': 'manager', 'yangfei@chinacloud.com.cn': 'manager', 'lianguo@chinacloud.com.cn': 'manager', 'zhangjing@chinacloud.com.cn': 'manager', 'lixia@chinacloud.com.cn': 'manager', 'liyating@chinacloud.com.cn': 'manager', 'wuyuming@chinacloud.com.cn': 'manager', 'xuwenbao@chinacloud.com.cn': 'pd_manager', 'wangxuekai@chinacloud.com.cn': 'pd_manager', 'cuihaozhi@chinacloud.com.cn': 'pd_manager', 'raodingyuan@chinacloud.com.cn': 'pd_manager', 'duhao@chinacloud.com.cn': 'pj_manager', 'dongjingyi@chinacloud.com.cn': 'pj_manager', 'dengliujiang@chinacloud.com.cn': 'pj_manager', 'wangli@chinacloud.com.cn': 'pj_manager', 'masong@chinacloud.com.cn': 'pj_manager', 'liuqiang@chinacloud.com.cn': 'pj_manager', 'liujinlong@chinacloud.com.cn': 'pj_manager', 'tianlong@chinacloud.com.cn': 'pj_manager', 'jiayangzheng@chinacloud.com.cn': 'pj_manager', 'guohaipeng@chinacloud.cm.cn': 'pj_manager', 'wangyunfeng@chinacloud.com.cn': 'pj_manager', 'xietao@chinacloud.com.cn': 'pj_manager', 'liuxiaokun@chinacloud.com.cn': 'pj_manager', 'marui@chinacloud.com.cn': 'pj_manager', 'wujing@chinacloud.com.cn': 'pj_manager', 'zhufeng@chinacloud.com.cn': 'pj_manager', 'lixiaolong@chinacloud.com.cn': 'pj_manager', 'lixiaowei@chinacloud.com.cn': 'pj_manager', 'wangwei_sh@chinacloud.com.cn': 'pj_manager', 'liuyichun@chinacloud.com.cn': 'pj_manager', 'xiangxiaoyan@chinacloud.com.cn': 'manager', 'tanyingqing@chinacloud.com.cn': 'manager', 'huaicuijing@chinacloud.com.cn': 'user', 'liangkangli@chinacloud.com.cn': 'user', 'weilihong@chinacloud.com.cn': 'user', 'xiabing@chinacloud.com.cn': 'user', 'baiyin@chinacloud.com.cn': 'user', 'caiyali@chinacloud.com.cn': 'user', 'chengyu@chinacloud.com.cn': 'user', 'chenyanqiu@chinacloud.com.cn': 'user', 'guanjiuwei@chinacloud.com.cn': 'user', 'heyuyang@chinacloud.com.cn': 'user', 'huzhengbo@chinacloud.com.cn': 'user', 'jiaguangyuan@chinacloud.com.cn': 'user', 'jinyi@chinacloud.com.cn': 'user', 'liangyu@chinacloud.com.cn': 'user', 'lichengbang@chinacloud.com.cn': 'user', 'lichenge@chinacloud.com.cn': 'user', 'linwengang@chinacloud.com.cn': 'user', 'liubingying@chinacloud.com.cn': 'user', 'qinliyi@chinacloud.com.cn': 'user', 'qiushi@chinacloud.com.cn': 'user', 'renchao@chinacloud.com.cn': 'user', 'tanglei@chinacloud.com.cn': 'user', 'tanglina@chinacloud.com.cn': 'user', 'wangdinghua@chinacloud.com.cn': 'user', 'wangkun@chinacloud.com.cn': 'user', 'wangwei@chinacloud.com.cn': 'user', 'wangxu@chinacloud.com.cn': 'user', 'wangyihua@chinacloud.com.cn': 'user', 'wangyu@chinacloud.com.cn': 'user', 'xiaweihu@chinacloud.com.cn': 'user', 'xulang@chinacloud.com.cn': 'user', 'yangkairui@chinacloud.com.cn': 'user', 'yangzhilin@chinacloud.com.cn': 'user', 'yutao@chinacloud.com.cn': 'user', 'zhaming@chinacloud.com.cn': 'user', 'zhanjinwei@chinacloud.com.cn': 'user', 'zhangyirui@chinacloud.com.cn': 'user', 'zhangyuandong@chinacloud.com.cn': 'user', 'zhangzhixian@chinacloud.com.cn': 'user', 'zhengbiao@chinacloud.com.cn': 'user', 'zhoulun@chinacloud.com.cn': 'user', 'zhuguoqing@chinacloud.com.cn': 'user', 'tongxiaoyu@chinacloud.com.cn': 'user', 'liji@chinacloud.com.cn': 'user', 'caojingyi@chinacloud.com.cn': 'user', 'fangbo@chinacloud.com.cn': 'user', 'gechao@chinacloud.com.cn': 'user', 'jinwei@chinacloud.com.cn': 'user', 'lijia@chinacloud.com.cn': 'user', 'lili@chinacloud.com.cn': 'user', 'lilinhong@chinacloud.com.cn': 'user', 'lishichinacloud.com.cn': 'user', 'liubo@chinacloud.com.cn': 'user', 'longchaoguo@chinacloud.com.cn': 'user', 'longjungang@chinacloud.com.cn': 'user', 'mamengyun@chinacloud.com.cn': 'user', 'qiyang@chinacloud.com.cn': 'user', 'wangmingxia@chinacloud.com.cn': 'user', 'wangyizhi@chinacloud.com.cn': 'user', 'wangyuhong@chinacloud.com.cn': 'user', 'weizhuo@chinacloud.com.cn': 'user', 'wengwei@chinacloud.com.cn': 'user', 'xiakai@chinacloud.com.cn': 'user', 'xiemeizhong@chinacloud.com.cn': 'user', 'yangqinzi@chinacloud.com.cn': 'user', 'yuyuehong@chinacloud.com.cn': 'user', 'zengqia@chinacloud.com.cn': 'user', 'zhangxin@chinacloud.com.cn': 'user', 'wanghaoyong@chinacloud.com.cn': 'user', 'zhongtao@chinacloud.com.cn': 'user', 'jinhao@chinacloud.com.cn': 'user', 'kangqingwei@chinacloud.com.cn': 'user', 'liyong@chinacloud.com.cn': 'user', 'xuliang@chinacloud.com.cn': 'user', 'zhangjiaqi@chinacloud.com.cn': 'user', 'chenwei@chinacloud.com.cn': 'user', 'leishiran@chinacloud.com.cn': 'user', 'lihelin@chinacloud.com.cn': 'user', 'liuhang@chinacloud.com.cn': 'user', 'lixin@chinacloud.com.cn': 'user', 'wangdaojin@chinacloud.com.cn': 'user', 'wangzhong@chinacloud.com.cn': 'user', 'zhangzijian@chinacloud.com.cn': 'user', 'zhaoxiaoming@chinacloud.com.cn': 'user', 'dengleilei@chinacloud.com.cn': 'user', 'liugaoyang@chinacloud.com.cn': 'user', 'longqian@chinacloud.com.cn': 'user', 'yuanfeng@chinacloud.com.cn': 'user', 'baoxiaoyu@chinacloud.com.cn': 'user', 'chenchuan@chinacloud.com.cn': 'user', 'duanjinming@chinacloud.com.cn': 'user', 'wangqiang@chinacloud.com.cn': 'user', 'yexingceng@chinacloud.com.cn': 'user', 'zhugaojun@chinacloud.com.cn': 'user', 'guoziming@chinacloud.com.cn': 'user', 'zhaohailong@chinacloud.com.cn': 'user', 'chenhongying@chinacloud.com.cn': 'user', 'dinglinmin@chinacloud.com.cn': 'user', 'guchenchen@chinacloud.com.cn': 'user', 'sunyuhua@chinacloud.com.cn': 'user', 'yangming@chinacloud.com.cn': 'user', 'zengjun@chinacloud.com.cn': 'user', 'zhongquanmei@chinacloud.com.cn': 'user', 'dingfan@chinacloud.com.cn': 'user', 'dongyizhou@chinacloud.com.cn': 'user', 'douguofeng@chinacloud.com.cn': 'user', 'fususheng@chinacloud.com.cn': 'user', 'guijia@chinacloud.com.cn': 'user', 'hujianbin@chinacloud.com.cn': 'user', 'leilei@chinacloud.com.cn': 'user', 'liuweiping@chinacloud.com.cn': 'user', 'liuxiaoxiong@chinacloud.com.cn': 'user', # 'liuyichun@chinacloud.com.cn': 'user', 'menghongyang@chinacloud.com.cn': 'user', 'shenguo@chinacloud.com.cn': 'user', 'sunshasha@chinacloud.com.cn': 'user', 'sunyu@chinacloud.com.cn': 'user', 'tonghao@chinacloud.com.cn': 'user', 'xiaoqingshan@chinacloud.com.cn': 'user', 'xuyayang@chinacloud.com.cn': 'user', 'baili@chinacloud.com.cn': 'user', 'shijiahao@chinacloud.com.cn': 'user', 'limin@chinacloud.com.cn': 'user', 'qihong@chinacloud.com.cn': 'user', 'tanggaofei@chinacloud.com.cn': 'user', 'caopan@chinacloud.com.cn': 'user', 'chenxia@chinacloud.com.cn': 'user', 'lihaiyan@chinacloud.com.cn': 'user', 'lihongxia@chinacloud.com.cn': 'user', 'pengzhi@chinacloud.com.cn': 'user', 'qinxing@chinacloud.com.cn': 'user', 'sunbanghui@chinacloud.com.cn': 'user', 'tianxin@chinacloud.com.cn': 'user', 'wanghui@chinacloud.com.cn': 'user', 'wudanyang@chinacloud.com.cn': 'user', # 'xiangxiaoyan@chinacloud.com.cn': 'user', 'zhangzhiying@chinacloud.com.cn': 'user', 'chenzhenzhu@chinacloud.com.cn': 'user', 'doujiayu@chinacloud.com.cn': 'user', 'fushuxiang@chinacloud.com.cn': 'user', 'gaozhenqing@chinacloud.com.cn': 'user', 'hejinlong@chinacloud.com.cn': 'user', 'jianghui@chinacloud.com.cn': 'user', 'liangyanlong@chinacloud.com.cn': 'user', 'liling@chinacloud.com.cn': 'user', 'liujiawei@chinacloud.com.cn': 'user', 'liuyuxing@chinacloud.com.cn': 'user', 'tangcheng@chinacloud.com.cn': 'user', 'tangyong@chinacloud.com.cn': 'user', 'wangxuemin@chinacloud.com.cn': 'user', 'wangzhe@chinacloud.com.cn': 'user', 'xuzhou@chinacloud.com.cn': 'user', 'yinqiang@chinacloud.com.cn': 'user', 'zhuqiang@chinacloud.com.cn': 'user', 'baile@chinacloud.com.cn': 'user', 'bianyue@chinacloud.com.cn': 'user', 'cuifu@chinacloud.com.cn': 'user', 'chanhaiyang@chinacloud.com.cn': 'user', 'dongxujun@chinacloud.com.cn': 'user', 'fujinwan@chinacloud.com.cn': 'user', 'liujunlei@chinacloud.com.cn': 'user', 'liumuchen@chinacloud.com.cn': 'user', 'luguoqiang@chinacloud.com.cn': 'user', 'lvchunlei@chinacloud.com.cn': 'user', 'pangyunfei@chinacloud.com.cn': 'user', 'wangxin@chinacloud.com.cn': 'user', 'wangyuanyuan@chinacloud.com.cn': 'user', 'xuebingqian@chinacloud.com.cn': 'user', 'yuanlei@chinacloud.com.cn': 'user', 'zhangjialong@chinacloud.com.cn': 'user', 'zhangshaolin@chinacloud.com.cn': 'user', 'zhangyin@chinacloud.com.cn': 'user', 'zhangzhen@chinacloud.com.cn': 'user', 'baojing@chinacloud.com.cn': 'user', 'chenhuilie@chinacloud.com.cn': 'user', 'chenjing@chinacloud.com.cn': 'user', 'dongjuanjuan@chinacloud.com.cn': 'user', 'guchunxue@chinacloud.com.cn': 'user', 'mazhenyu@chinacloud.com.cn': 'user', 'tangxiaoxiao@chinacloud.com.cn': 'user', 'xulan@chinacloud.com.cn': 'user', 'zhangjing_cd@chinacloud.com.cn': 'user', 'zhaoyibei@chinacloud.com.cn': 'user', 'chenbeibei@chinacloud.com.cn': 'pj_manager', 'dinghao@chinacloud.com.cn': 'pj_manager', } def init_users(): import random pwd_str_len = len(password_str) print pwd_str_len for _email in email_str: _idx = random.randint(1, pwd_str_len-12) _password = str(password_str[_idx:_idx+8]) # db.session.query(User).filter_by(email=_email).delete(synchronize_session=False) # db.session.commit() user = User.query.filter_by(email=_email).first() if user is None: user = User() user.username = _email user.email = _email if email_str[_email] == 'user': _password = '12345678' user.password = _password db.session.add(user) db.session.commit() # db.session.query(Role).filter_by(name=_email).delete(synchronize_session=False) # db.session.commit() role = Role() role.name = _email role.level = role_str[email_str[_email]] role.secretkey = '20131226' db.session.add(role) db.session.commit() print(">>> user: %s , password: %s" % (_email, _password))
40.654723
116
0.653313
1,374
12,481
5.884279
0.227074
0.142857
0.426716
0.427706
0.086333
0.012121
0.012121
0.012121
0
0
0
0.010903
0.162247
12,481
306
117
40.787582
0.762337
0.026761
0
0.007246
0
0
0.66587
0.564221
0
0
0
0
0
0
null
null
0.021739
0.01087
null
null
0.007246
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
4c0ce3a546b57efaf733ece03aa13b0372086e38
236
py
Python
database/__init__.py
RSMuthu/Giphy_Gifts
5d6d7870289f596d5b0c4924398e688c421645ea
[ "MIT" ]
null
null
null
database/__init__.py
RSMuthu/Giphy_Gifts
5d6d7870289f596d5b0c4924398e688c421645ea
[ "MIT" ]
null
null
null
database/__init__.py
RSMuthu/Giphy_Gifts
5d6d7870289f596d5b0c4924398e688c421645ea
[ "MIT" ]
null
null
null
## DB initialisation from .db import engine as __engine from .model import Base as __base def initialize_db(): ''' Helps to bind the DB engine with the metadata of the Base ''' __base.metadata.create_all(bind=__engine)
23.6
61
0.716102
35
236
4.542857
0.542857
0
0
0
0
0
0
0
0
0
0
0
0.207627
236
9
62
26.222222
0.850267
0.322034
0
0
0
0
0
0
0
0
0
0
0
1
0.25
true
0
0.5
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
5
4c202692505f061a9f29d60c6a2f0e76596bbed5
78
py
Python
keras_gym/utils/__init__.py
KristianHolsheimer/keras-gym
0296ddcc8685e1ce732c3173caaa0fd25af9ef58
[ "MIT" ]
16
2019-07-01T10:56:26.000Z
2021-01-31T18:56:56.000Z
keras_gym/utils/__init__.py
KristianHolsheimer/keras-gym
0296ddcc8685e1ce732c3173caaa0fd25af9ef58
[ "MIT" ]
10
2019-03-10T21:56:10.000Z
2020-09-06T21:49:55.000Z
keras_gym/utils/__init__.py
KristianHolsheimer/keras-gym
0296ddcc8685e1ce732c3173caaa0fd25af9ef58
[ "MIT" ]
5
2019-08-02T22:11:19.000Z
2020-04-19T20:18:38.000Z
# flake8: noqa from .misc import * from .tensor import * from .array import *
15.6
21
0.705128
11
78
5
0.636364
0.363636
0
0
0
0
0
0
0
0
0
0.015873
0.192308
78
4
22
19.5
0.857143
0.153846
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
4c49c00bb38320bfa1160504f7449b6c4da95198
313
py
Python
juno/pix.py
notafiscalrural/juno-python
08b0bfcbd3342b101a0d1fa0d3f085776aa22aa5
[ "MIT" ]
2
2022-03-25T21:08:46.000Z
2022-03-31T21:10:17.000Z
juno/pix.py
notafiscalrural/juno-python
08b0bfcbd3342b101a0d1fa0d3f085776aa22aa5
[ "MIT" ]
null
null
null
juno/pix.py
notafiscalrural/juno-python
08b0bfcbd3342b101a0d1fa0d3f085776aa22aa5
[ "MIT" ]
null
null
null
from juno.resources import handler_request from juno.resources.routes import pix_routes def create_keys(dictionary): return handler_request.post(pix_routes.create_keys(), dictionary) def qrcodes_static(dictionary): return handler_request.get(f"{pix_routes.get_base_url()}/qrcodes/static", dictionary)
28.454545
89
0.811502
43
313
5.651163
0.465116
0.17284
0.139918
0.246914
0
0
0
0
0
0
0
0
0.095847
313
10
90
31.3
0.858657
0
0
0
0
0
0.134185
0.134185
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
5
d5d13f75e768f64672666f382a5b8e7f81ea7d3a
19,374
py
Python
core/domain/subscription_services_test.py
imrk51/oppia
615ce1dbdaffbb0c090c52d13ef6a3f7c1043d5c
[ "Apache-2.0" ]
null
null
null
core/domain/subscription_services_test.py
imrk51/oppia
615ce1dbdaffbb0c090c52d13ef6a3f7c1043d5c
[ "Apache-2.0" ]
null
null
null
core/domain/subscription_services_test.py
imrk51/oppia
615ce1dbdaffbb0c090c52d13ef6a3f7c1043d5c
[ "Apache-2.0" ]
1
2021-08-04T13:03:16.000Z
2021-08-04T13:03:16.000Z
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for subscription management.""" from core.domain import collection_domain from core.domain import collection_services from core.domain import exp_domain from core.domain import exp_services from core.domain import feedback_domain from core.domain import feedback_services from core.domain import rights_manager from core.domain import subscription_services from core.platform import models from core.tests import test_utils (user_models,) = models.Registry.import_models([models.NAMES.user]) COLLECTION_ID = 'col_id' COLLECTION_ID_2 = 'col_id_2' EXP_ID = 'exp_id' EXP_ID_2 = 'exp_id_2' FEEDBACK_THREAD_ID = 'fthread_id' FEEDBACK_THREAD_ID_2 = 'fthread_id_2' USER_ID = 'user_id' USER_ID_2 = 'user_id_2' class SubscriptionsTest(test_utils.GenericTestBase): """Tests for subscription management.""" OWNER_2_EMAIL = 'owner2@example.com' OWNER2_USERNAME = 'owner2' def setUp(self): super(SubscriptionsTest, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) self.signup(self.OWNER_2_EMAIL, self.OWNER2_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL) self.owner_2_id = self.get_user_id_from_email(self.OWNER_2_EMAIL) def _get_thread_ids_subscribed_to(self, user_id): subscriptions_model = user_models.UserSubscriptionsModel.get( user_id, strict=False) return ( subscriptions_model.feedback_thread_ids if subscriptions_model else []) def _get_exploration_ids_subscribed_to(self, user_id): subscriptions_model = user_models.UserSubscriptionsModel.get( user_id, strict=False) return ( subscriptions_model.activity_ids if subscriptions_model else []) def _get_collection_ids_subscribed_to(self, user_id): subscriptions_model = user_models.UserSubscriptionsModel.get( user_id, strict=False) return ( subscriptions_model.collection_ids if subscriptions_model else []) def test_subscribe_to_feedback_thread(self): self.assertEqual(self._get_thread_ids_subscribed_to(USER_ID), []) subscription_services.subscribe_to_thread(USER_ID, FEEDBACK_THREAD_ID) self.assertEqual( self._get_thread_ids_subscribed_to(USER_ID), [FEEDBACK_THREAD_ID]) # Repeated subscriptions to the same thread have no effect. subscription_services.subscribe_to_thread(USER_ID, FEEDBACK_THREAD_ID) self.assertEqual( self._get_thread_ids_subscribed_to(USER_ID), [FEEDBACK_THREAD_ID]) subscription_services.subscribe_to_thread( USER_ID, FEEDBACK_THREAD_ID_2) self.assertEqual( self._get_thread_ids_subscribed_to(USER_ID), [FEEDBACK_THREAD_ID, FEEDBACK_THREAD_ID_2]) def test_subscribe_to_exploration(self): self.assertEqual(self._get_exploration_ids_subscribed_to(USER_ID), []) subscription_services.subscribe_to_exploration(USER_ID, EXP_ID) self.assertEqual( self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID]) # Repeated subscriptions to the same exploration have no effect. subscription_services.subscribe_to_exploration(USER_ID, EXP_ID) self.assertEqual( self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID]) subscription_services.subscribe_to_exploration(USER_ID, EXP_ID_2) self.assertEqual( self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID, EXP_ID_2]) def test_get_exploration_ids_subscribed_to(self): self.assertEqual( subscription_services.get_exploration_ids_subscribed_to( USER_ID), []) subscription_services.subscribe_to_exploration(USER_ID, EXP_ID) self.assertEqual( subscription_services.get_exploration_ids_subscribed_to(USER_ID), [EXP_ID]) subscription_services.subscribe_to_exploration(USER_ID, EXP_ID_2) self.assertEqual( subscription_services.get_exploration_ids_subscribed_to(USER_ID), [EXP_ID, EXP_ID_2]) def test_thread_and_exp_subscriptions_are_tracked_individually(self): self.assertEqual(self._get_thread_ids_subscribed_to(USER_ID), []) subscription_services.subscribe_to_thread(USER_ID, FEEDBACK_THREAD_ID) subscription_services.subscribe_to_exploration(USER_ID, EXP_ID) self.assertEqual( self._get_thread_ids_subscribed_to(USER_ID), [FEEDBACK_THREAD_ID]) self.assertEqual( self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID]) def test_posting_to_feedback_thread_results_in_subscription(self): # The viewer posts a message to the thread. message_text = 'text' feedback_services.create_thread( 'exp_id', 'state_name', self.viewer_id, 'subject', message_text) thread_ids_subscribed_to = self._get_thread_ids_subscribed_to( self.viewer_id) self.assertEqual(len(thread_ids_subscribed_to), 1) full_thread_id = thread_ids_subscribed_to[0] thread_id = ( feedback_domain.FeedbackThread.get_thread_id_from_full_thread_id( full_thread_id)) self.assertEqual( feedback_services.get_messages('exp_id', thread_id)[0].text, message_text) # The editor posts a follow-up message to the thread. new_message_text = 'new text' feedback_services.create_message( 'exp_id', thread_id, self.editor_id, '', '', new_message_text) # The viewer and editor are now both subscribed to the thread. self.assertEqual( self._get_thread_ids_subscribed_to(self.viewer_id), [full_thread_id]) self.assertEqual( self._get_thread_ids_subscribed_to(self.editor_id), [full_thread_id]) def test_creating_exploration_results_in_subscription(self): self.assertEqual( self._get_exploration_ids_subscribed_to(USER_ID), []) exp_services.save_new_exploration( USER_ID, exp_domain.Exploration.create_default_exploration(EXP_ID)) self.assertEqual( self._get_exploration_ids_subscribed_to(USER_ID), [EXP_ID]) def test_adding_new_exploration_owner_or_editor_role_results_in_subscription(self): # pylint: disable=line-too-long exploration = exp_domain.Exploration.create_default_exploration(EXP_ID) exp_services.save_new_exploration(self.owner_id, exploration) self.assertEqual( self._get_exploration_ids_subscribed_to(self.owner_2_id), []) rights_manager.assign_role_for_exploration( self.owner_id, EXP_ID, self.owner_2_id, rights_manager.ROLE_OWNER) self.assertEqual( self._get_exploration_ids_subscribed_to(self.owner_2_id), [EXP_ID]) self.assertEqual( self._get_exploration_ids_subscribed_to(self.editor_id), []) rights_manager.assign_role_for_exploration( self.owner_id, EXP_ID, self.editor_id, rights_manager.ROLE_EDITOR) self.assertEqual( self._get_exploration_ids_subscribed_to(self.editor_id), [EXP_ID]) def test_adding_new_exploration_viewer_role_does_not_result_in_subscription(self): # pylint: disable=line-too-long exploration = exp_domain.Exploration.create_default_exploration(EXP_ID) exp_services.save_new_exploration(self.owner_id, exploration) self.assertEqual( self._get_exploration_ids_subscribed_to(self.viewer_id), []) rights_manager.assign_role_for_exploration( self.owner_id, EXP_ID, self.viewer_id, rights_manager.ROLE_VIEWER) self.assertEqual( self._get_exploration_ids_subscribed_to(self.viewer_id), []) def test_deleting_exploration_does_not_delete_subscription(self): exploration = exp_domain.Exploration.create_default_exploration(EXP_ID) exp_services.save_new_exploration(self.owner_id, exploration) self.assertEqual( self._get_exploration_ids_subscribed_to(self.owner_id), [EXP_ID]) exp_services.delete_exploration(self.owner_id, EXP_ID) self.assertEqual( self._get_exploration_ids_subscribed_to(self.owner_id), [EXP_ID]) def test_subscribe_to_collection(self): self.assertEqual(self._get_collection_ids_subscribed_to(USER_ID), []) subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID) self.assertEqual( self._get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID]) # Repeated subscriptions to the same collection have no effect. subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID) self.assertEqual( self._get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID]) subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID_2) self.assertEqual( self._get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID, COLLECTION_ID_2]) def test_get_collection_ids_subscribed_to(self): self.assertEqual( subscription_services.get_collection_ids_subscribed_to( USER_ID), []) subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID) self.assertEqual( subscription_services.get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID]) subscription_services.subscribe_to_collection(USER_ID, COLLECTION_ID_2) self.assertEqual( subscription_services.get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID, COLLECTION_ID_2]) def test_creating_collection_results_in_subscription(self): self.assertEqual( self._get_collection_ids_subscribed_to(USER_ID), []) self.save_new_default_collection(COLLECTION_ID, USER_ID) self.assertEqual( self._get_collection_ids_subscribed_to(USER_ID), [COLLECTION_ID]) def test_adding_new_collection_owner_or_editor_role_results_in_subscription( self): self.save_new_default_collection(COLLECTION_ID, self.owner_id) self.assertEqual( self._get_collection_ids_subscribed_to(self.owner_2_id), []) rights_manager.assign_role_for_collection( self.owner_id, COLLECTION_ID, self.owner_2_id, rights_manager.ROLE_OWNER) self.assertEqual( self._get_collection_ids_subscribed_to(self.owner_2_id), [COLLECTION_ID]) self.assertEqual( self._get_collection_ids_subscribed_to(self.editor_id), []) rights_manager.assign_role_for_collection( self.owner_id, COLLECTION_ID, self.editor_id, rights_manager.ROLE_EDITOR) self.assertEqual( self._get_collection_ids_subscribed_to(self.editor_id), [COLLECTION_ID]) def test_adding_new_collection_viewer_role_does_not_result_in_subscription( self): self.save_new_default_collection(COLLECTION_ID, self.owner_id) self.assertEqual( self._get_collection_ids_subscribed_to(self.viewer_id), []) rights_manager.assign_role_for_collection( self.owner_id, COLLECTION_ID, self.viewer_id, rights_manager.ROLE_VIEWER) self.assertEqual( self._get_collection_ids_subscribed_to(self.viewer_id), []) def test_deleting_collection_does_not_delete_subscription(self): self.save_new_default_collection(COLLECTION_ID, self.owner_id) self.assertEqual( self._get_collection_ids_subscribed_to(self.owner_id), [COLLECTION_ID]) collection_services.delete_collection(self.owner_id, COLLECTION_ID) self.assertEqual( self._get_collection_ids_subscribed_to(self.owner_id), [COLLECTION_ID]) def test_adding_exploration_to_collection_does_not_create_subscription( self): self.save_new_default_collection(COLLECTION_ID, self.owner_id) # The author is subscribed to the collection but to no explorations. self.assertEqual( self._get_collection_ids_subscribed_to(self.owner_id), [COLLECTION_ID]) self.assertEqual( self._get_exploration_ids_subscribed_to(self.owner_id), []) # Another author creates an exploration. self.save_new_valid_exploration(EXP_ID, self.owner_2_id) # If the collection author adds the exploration to his/her collection, # the collection author should not be subscribed to the exploration nor # should the exploration author be subscribed to the collection. collection_services.update_collection(self.owner_id, COLLECTION_ID, [{ 'cmd': collection_domain.CMD_ADD_COLLECTION_NODE, 'exploration_id': EXP_ID }], 'Add new exploration to collection.') # Ensure subscriptions are as expected. self.assertEqual( self._get_collection_ids_subscribed_to(self.owner_id), [COLLECTION_ID]) self.assertEqual( self._get_exploration_ids_subscribed_to(self.owner_2_id), [EXP_ID]) class UserSubscriptionsTest(test_utils.GenericTestBase): """Tests for subscription management.""" OWNER_2_EMAIL = 'owner2@example.com' OWNER2_USERNAME = 'owner2' def setUp(self): super(UserSubscriptionsTest, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.OWNER_2_EMAIL, self.OWNER2_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.owner_2_id = self.get_user_id_from_email(self.OWNER_2_EMAIL) def _get_all_subscribers_of_creators(self, user_id): subscribers_model = user_models.UserSubscribersModel.get( user_id, strict=False) return ( subscribers_model.subscriber_ids if subscribers_model else []) def _get_all_creators_subscribed_to(self, user_id): subscriptions_model = user_models.UserSubscriptionsModel.get( user_id, strict=False) return ( subscriptions_model.creator_ids if subscriptions_model else []) def test_subscribe_to_creator(self): self.assertEqual(self._get_all_subscribers_of_creators( self.owner_id), []) # Subscribe a user to a creator. subscription_services.subscribe_to_creator(USER_ID, self.owner_id) self.assertEqual( self._get_all_subscribers_of_creators(self.owner_id), [USER_ID]) self.assertEqual( self._get_all_creators_subscribed_to(USER_ID), [self.owner_id]) # Repeated subscriptions to the same creator has no effect. subscription_services.subscribe_to_creator(USER_ID, self.owner_id) self.assertEqual( self._get_all_subscribers_of_creators(self.owner_id), [USER_ID]) self.assertEqual( self._get_all_creators_subscribed_to(USER_ID), [self.owner_id]) # Subscribe another creator. subscription_services.subscribe_to_creator(USER_ID_2, self.owner_id) self.assertEqual( self._get_all_subscribers_of_creators(self.owner_id), [USER_ID, USER_ID_2]) self.assertEqual( self._get_all_creators_subscribed_to( USER_ID_2), [self.owner_id]) def test_unsubscribe_from_creator(self): self.assertEqual(self._get_all_subscribers_of_creators( self.owner_id), []) # Add subscribers to a creator. subscription_services.subscribe_to_creator(USER_ID, self.owner_id) subscription_services.subscribe_to_creator(USER_ID_2, self.owner_id) self.assertEqual( self._get_all_subscribers_of_creators(self.owner_id), [ USER_ID, USER_ID_2]) self.assertEqual( self._get_all_creators_subscribed_to(USER_ID), [self.owner_id]) self.assertEqual( self._get_all_creators_subscribed_to(USER_ID_2), [self.owner_id]) # Unsubscribing a user from a creator. subscription_services.unsubscribe_from_creator(USER_ID, self.owner_id) self.assertEqual( self._get_all_subscribers_of_creators(self.owner_id), [USER_ID_2]) self.assertEqual( self._get_all_creators_subscribed_to(USER_ID), []) # Unsubscribing the same user again has no effect. subscription_services.unsubscribe_from_creator(USER_ID, self.owner_id) self.assertEqual( self._get_all_subscribers_of_creators(self.owner_id), [USER_ID_2]) self.assertEqual( self._get_all_creators_subscribed_to(USER_ID), []) # Unsubscribing the second user. subscription_services.unsubscribe_from_creator( USER_ID_2, self.owner_id) self.assertEqual(self._get_all_subscribers_of_creators( self.owner_id), []) self.assertEqual( self._get_all_creators_subscribed_to(USER_ID_2), []) def test_get_all_subscribers_of_creators(self): self.assertEqual( subscription_services.get_all_subscribers_of_creator( self.owner_id), []) subscription_services.subscribe_to_creator(USER_ID, self.owner_id) self.assertEqual( subscription_services.get_all_subscribers_of_creator(self.owner_id), [USER_ID]) subscription_services.subscribe_to_creator(USER_ID_2, self.owner_id) self.assertEqual( subscription_services.get_all_subscribers_of_creator(self.owner_id), [USER_ID, USER_ID_2]) def test_get_all_creators_subscribed_to(self): self.assertEqual( subscription_services.get_all_creators_subscribed_to( USER_ID), []) subscription_services.subscribe_to_creator(USER_ID, self.owner_id) self.assertEqual( subscription_services.get_all_creators_subscribed_to( USER_ID), [self.owner_id]) subscription_services.subscribe_to_creator(USER_ID, self.owner_2_id) self.assertEqual( subscription_services.get_all_creators_subscribed_to( USER_ID), [self.owner_id, self.owner_2_id])
41.935065
119
0.70734
2,400
19,374
5.245833
0.080833
0.045751
0.08753
0.10135
0.80413
0.776092
0.741303
0.72216
0.691819
0.670691
0
0.004491
0.218489
19,374
461
120
42.02603
0.827026
0.087953
0
0.616959
0
0
0.012032
0
0
0
0
0
0.210526
1
0.078947
false
0
0.032164
0
0.143275
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
d5ebf1675e938b2372558252ce35b907cafa4d81
74
py
Python
multiqc_uphl/modules/seqsero/__init__.py
Ikkik/MultiQC_UPHL
26658ee664613e71bb41c4a23ddaeb0e41f6cdd1
[ "MIT" ]
2
2019-11-15T16:40:08.000Z
2019-12-04T18:02:32.000Z
multiqc_uphl/modules/seqsero/__init__.py
erinyoung/MultiQC_UPHL
26658ee664613e71bb41c4a23ddaeb0e41f6cdd1
[ "MIT" ]
null
null
null
multiqc_uphl/modules/seqsero/__init__.py
erinyoung/MultiQC_UPHL
26658ee664613e71bb41c4a23ddaeb0e41f6cdd1
[ "MIT" ]
null
null
null
from __future__ import absolute_import from .seqsero import MultiqcModule
24.666667
38
0.878378
9
74
6.666667
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.108108
74
2
39
37
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
910b211e7b932a1b3807b1b45d5554ea33a1e853
5,151
py
Python
python_modules/dagster/dagster/api/snapshot_partition.py
asamoal/dagster
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
[ "Apache-2.0" ]
null
null
null
python_modules/dagster/dagster/api/snapshot_partition.py
asamoal/dagster
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
[ "Apache-2.0" ]
null
null
null
python_modules/dagster/dagster/api/snapshot_partition.py
asamoal/dagster
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
[ "Apache-2.0" ]
null
null
null
from typing import TYPE_CHECKING, List import dagster._check as check from dagster.core.errors import DagsterUserCodeProcessError from dagster.core.host_representation.external_data import ( ExternalPartitionConfigData, ExternalPartitionExecutionErrorData, ExternalPartitionNamesData, ExternalPartitionSetExecutionParamData, ExternalPartitionTagsData, ) from dagster.core.host_representation.handle import RepositoryHandle from dagster.grpc.types import PartitionArgs, PartitionNamesArgs, PartitionSetExecutionParamArgs from dagster.serdes import deserialize_as if TYPE_CHECKING: from dagster.grpc.client import DagsterGrpcClient def sync_get_external_partition_names_grpc( api_client: "DagsterGrpcClient", repository_handle: RepositoryHandle, partition_set_name: str ) -> ExternalPartitionNamesData: from dagster.grpc.client import DagsterGrpcClient check.inst_param(api_client, "api_client", DagsterGrpcClient) check.inst_param(repository_handle, "repository_handle", RepositoryHandle) check.str_param(partition_set_name, "partition_set_name") repository_origin = repository_handle.get_external_origin() result = deserialize_as( api_client.external_partition_names( partition_names_args=PartitionNamesArgs( repository_origin=repository_origin, partition_set_name=partition_set_name, ), ), (ExternalPartitionNamesData, ExternalPartitionExecutionErrorData), ) if isinstance(result, ExternalPartitionExecutionErrorData): raise DagsterUserCodeProcessError.from_error_info(result.error) return result def sync_get_external_partition_config_grpc( api_client: "DagsterGrpcClient", repository_handle: RepositoryHandle, partition_set_name: str, partition_name: str, ) -> ExternalPartitionConfigData: from dagster.grpc.client import DagsterGrpcClient check.inst_param(api_client, "api_client", DagsterGrpcClient) check.inst_param(repository_handle, "repository_handle", RepositoryHandle) check.str_param(partition_set_name, "partition_set_name") check.str_param(partition_name, "partition_name") repository_origin = repository_handle.get_external_origin() result = deserialize_as( api_client.external_partition_config( partition_args=PartitionArgs( repository_origin=repository_origin, partition_set_name=partition_set_name, partition_name=partition_name, ), ), (ExternalPartitionConfigData, ExternalPartitionExecutionErrorData), ) if isinstance(result, ExternalPartitionExecutionErrorData): raise DagsterUserCodeProcessError.from_error_info(result.error) return result def sync_get_external_partition_tags_grpc( api_client: "DagsterGrpcClient", repository_handle: RepositoryHandle, partition_set_name: str, partition_name: str, ) -> ExternalPartitionTagsData: from dagster.grpc.client import DagsterGrpcClient check.inst_param(api_client, "api_client", DagsterGrpcClient) check.inst_param(repository_handle, "repository_handle", RepositoryHandle) check.str_param(partition_set_name, "partition_set_name") check.str_param(partition_name, "partition_name") repository_origin = repository_handle.get_external_origin() result = deserialize_as( api_client.external_partition_tags( partition_args=PartitionArgs( repository_origin=repository_origin, partition_set_name=partition_set_name, partition_name=partition_name, ), ), (ExternalPartitionTagsData, ExternalPartitionExecutionErrorData), ) if isinstance(result, ExternalPartitionExecutionErrorData): raise DagsterUserCodeProcessError.from_error_info(result.error) return result def sync_get_external_partition_set_execution_param_data_grpc( api_client: "DagsterGrpcClient", repository_handle: RepositoryHandle, partition_set_name: str, partition_names: List[str], ) -> ExternalPartitionSetExecutionParamData: from dagster.grpc.client import DagsterGrpcClient check.inst_param(api_client, "api_client", DagsterGrpcClient) check.inst_param(repository_handle, "repository_handle", RepositoryHandle) check.str_param(partition_set_name, "partition_set_name") check.list_param(partition_names, "partition_names", of_type=str) repository_origin = repository_handle.get_external_origin() result = deserialize_as( api_client.external_partition_set_execution_params( partition_set_execution_param_args=PartitionSetExecutionParamArgs( repository_origin=repository_origin, partition_set_name=partition_set_name, partition_names=partition_names, ), ), (ExternalPartitionSetExecutionParamData, ExternalPartitionExecutionErrorData), ) if isinstance(result, ExternalPartitionExecutionErrorData): raise DagsterUserCodeProcessError.from_error_info(result.error) return result
39.022727
97
0.764318
484
5,151
7.756198
0.119835
0.073522
0.085242
0.073255
0.755194
0.730421
0.7187
0.7187
0.7187
0.7187
0
0
0.17317
5,151
131
98
39.320611
0.881428
0
0
0.636364
0
0
0.056494
0
0
0
0
0
0
1
0.036364
false
0
0.109091
0
0.181818
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
914b3157857a8c51c3ed2d8eca2e294d5ad1db79
488
py
Python
userApp/models.py
asifmohd/issueTracking
0c246a385a816686a79436b9043b04b828736e3c
[ "MIT" ]
1
2017-11-20T02:07:33.000Z
2017-11-20T02:07:33.000Z
userApp/models.py
asifmohd/issueTracking
0c246a385a816686a79436b9043b04b828736e3c
[ "MIT" ]
null
null
null
userApp/models.py
asifmohd/issueTracking
0c246a385a816686a79436b9043b04b828736e3c
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. class User(models.Model): email = models.CharField(max_length=200, primary_key=True) password = models.CharField(max_length=200) def __str__(self): return self.email class Detail(models.Model): email = models.OneToOneField(User, on_delete=models.CASCADE) full_name = models.CharField(max_length=200) address = models.CharField(max_length=200) def __str__(self): return self.full_name
27.111111
64
0.72541
66
488
5.121212
0.484848
0.177515
0.213018
0.284024
0.43787
0.278107
0.278107
0.278107
0.278107
0.278107
0
0.029851
0.17623
488
17
65
28.705882
0.810945
0.04918
0
0.166667
0
0
0
0
0
0
0
0
0
1
0.166667
false
0.083333
0.083333
0.166667
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
1
1
0
0
5
e670152fd106fb179cc169bca23b28be509df5be
65
py
Python
nitpicker/report_generator/__init__.py
Rumpelshtinskiy/nitpicker
b1d4850376b690134064a9c460088d901b1b51c1
[ "MIT" ]
null
null
null
nitpicker/report_generator/__init__.py
Rumpelshtinskiy/nitpicker
b1d4850376b690134064a9c460088d901b1b51c1
[ "MIT" ]
null
null
null
nitpicker/report_generator/__init__.py
Rumpelshtinskiy/nitpicker
b1d4850376b690134064a9c460088d901b1b51c1
[ "MIT" ]
null
null
null
from nitpicker.report_generator.generator import ReportGenerator
32.5
64
0.907692
7
65
8.285714
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.061538
65
1
65
65
0.95082
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e6c31865ee1869a738f47182a3b3518b08eb6088
74
py
Python
pylab/devices/generic/__init__.py
LukeSkywalker92/pylab
41df6546a167187e6f39bfdfbdf9fc2ec9ac0d88
[ "MIT" ]
1
2020-07-15T14:00:24.000Z
2020-07-15T14:00:24.000Z
pylab/devices/generic/__init__.py
LukeSkywalker92/pylab
41df6546a167187e6f39bfdfbdf9fc2ec9ac0d88
[ "MIT" ]
1
2020-02-06T17:43:46.000Z
2020-02-12T15:06:37.000Z
pylab/devices/generic/__init__.py
LukeSkywalker92/pylab
41df6546a167187e6f39bfdfbdf9fc2ec9ac0d88
[ "MIT" ]
null
null
null
from .dummy_device import DummyDevice from .time_device import TimeDevice
24.666667
37
0.864865
10
74
6.2
0.7
0.387097
0
0
0
0
0
0
0
0
0
0
0.108108
74
2
38
37
0.939394
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
e6c8ce71eb06b0820d88bf2d497934986a1614fe
1,016
py
Python
labs/lab11/tests/smallest-int.py
QinJiaHao1994/cs61a-sp20-solutions
71a481bef5ce73a4c6ff746455ccc51dc65453ea
[ "MIT" ]
8
2020-07-28T11:10:49.000Z
2021-05-29T15:27:17.000Z
31-Aggregation/lab11/lab11/tests/smallest-int.py
ericchen12377/CS61A_LearningDoc
31f23962b0e2834795bf61eeb0f4884cc5da1809
[ "MIT" ]
null
null
null
31-Aggregation/lab11/lab11/tests/smallest-int.py
ericchen12377/CS61A_LearningDoc
31f23962b0e2834795bf61eeb0f4884cc5da1809
[ "MIT" ]
1
2020-10-23T08:15:08.000Z
2020-10-23T08:15:08.000Z
test = { 'name': 'smallest-int', 'points': 1, 'suites': [ { 'cases': [ { 'code': r""" sqlite> SELECT * FROM smallest_int; 4/8/2020 21:29:28|3 4/8/2020 21:31:17|3 4/8/2020 21:34:15|3 4/8/2020 21:34:23|3 4/8/2020 22:07:34|3 4/8/2020 22:32:20|3 4/9/2020 13:05:02|3 4/9/2020 13:42:22|3 4/9/2020 15:26:39|3 4/9/2020 17:47:39|3 4/9/2020 6:03:22|3 4/8/2020 21:27:56|4 4/8/2020 21:30:24|4 4/8/2020 22:03:36|4 4/8/2020 21:21:44|5 4/8/2020 21:27:28|5 4/8/2020 21:32:00|5 4/8/2020 21:36:29|5 4/8/2020 21:37:32|5 4/8/2020 22:08:41|5 """, 'hidden': False, 'locked': False } ], 'ordered': False, 'scored': True, 'setup': r""" sqlite> .read lab11.sql """, 'teardown': '', 'type': 'sqlite' } ] }
22.577778
45
0.408465
172
1,016
2.406977
0.360465
0.072464
0.217391
0.21256
0.36715
0.05314
0
0
0
0
0
0.439597
0.413386
1,016
44
46
23.090909
0.255034
0
0
0.045455
0
0
0.765748
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
fc376451b5c5633b3292fba837d35e29c52fc415
305
py
Python
pyridge/__init__.py
cperales/PyRidge
b0029fae9e24a4e5c364bbd8fc3791eab15baa75
[ "MIT" ]
8
2019-03-09T13:47:23.000Z
2022-01-29T03:51:00.000Z
pyridge/__init__.py
cperales/pyridge
74a9aa83c1687e5362b0fd02f526281ad6837b75
[ "MIT" ]
1
2018-10-19T18:46:53.000Z
2018-10-19T18:46:53.000Z
pyridge/__init__.py
cperales/PyRidge
b0029fae9e24a4e5c364bbd8fc3791eab15baa75
[ "MIT" ]
3
2020-08-26T10:08:20.000Z
2021-11-13T11:42:23.000Z
import logging import warnings from .neural import * from .kernel import * from .negcor import * from .linear import * warnings.simplefilter('ignore') algorithm_dict = neural_algorithm.copy() algorithm_dict.update(kernel_algorithm) algorithm_dict.update(nc_algorithm) algorithm_dict.update(linear_dict)
21.785714
40
0.819672
39
305
6.205128
0.384615
0.214876
0.235537
0.231405
0
0
0
0
0
0
0
0
0.095082
305
13
41
23.461538
0.876812
0
0
0
0
0
0.019672
0
0
0
0
0
0
1
0
false
0
0.545455
0
0.545455
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
fc8a4472c2e69026aeae09efef987fc6b107dbc6
502
py
Python
src/io2048/test_io_online.py
fritjofwolf/2048-planning
266e003bc8d5b56c0f874a3eb5d992c597bbe31c
[ "MIT" ]
null
null
null
src/io2048/test_io_online.py
fritjofwolf/2048-planning
266e003bc8d5b56c0f874a3eb5d992c597bbe31c
[ "MIT" ]
null
null
null
src/io2048/test_io_online.py
fritjofwolf/2048-planning
266e003bc8d5b56c0f874a3eb5d992c597bbe31c
[ "MIT" ]
null
null
null
import pytest import numpy as np from io2048.io_online import IOOnline # def test_dummy(): # io = IOOnline() # state = io.reset() # done = False # while not done: # action = np.random.randint(4) # state, reward, done = io.step(action) # io.close_game() if __name__ == '__main__': io = IOOnline() state = io.reset() done = False while not done: action = np.random.randint(4) state, reward, done = io.step(action) io.close_game()
22.818182
47
0.595618
66
502
4.348485
0.454545
0.069686
0.10453
0.118467
0.71777
0.71777
0.71777
0.71777
0.71777
0.71777
0
0.016575
0.278884
502
22
48
22.818182
0.776243
0.400398
0
0
0
0
0.027304
0
0
0
0
0
0
1
0
false
0
0.272727
0
0.272727
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
5d8af693f6e2faa2af8d404f49ed3cc6d300bdc0
158
py
Python
series_tiempo_ar_api/libs/indexing/tests/indexing_test_case.py
datosgobar/series-tiempo-ar-api
6b553c573f6e8104f8f3919efe79089b7884280c
[ "MIT" ]
28
2017-12-16T20:30:52.000Z
2021-08-11T17:35:04.000Z
series_tiempo_ar_api/libs/indexing/tests/indexing_test_case.py
datosgobar/series-tiempo-ar-api
6b553c573f6e8104f8f3919efe79089b7884280c
[ "MIT" ]
446
2017-11-16T15:21:40.000Z
2021-06-10T20:14:21.000Z
series_tiempo_ar_api/libs/indexing/tests/indexing_test_case.py
datosgobar/series-tiempo-ar-api
6b553c573f6e8104f8f3919efe79089b7884280c
[ "MIT" ]
12
2018-08-23T16:13:32.000Z
2022-03-01T23:12:28.000Z
from django.test import TestCase, override_settings @override_settings(TS_INDEX='indexing_test_case_indicators') class IndexingTestCase(TestCase): pass
22.571429
60
0.835443
19
158
6.631579
0.789474
0.253968
0
0
0
0
0
0
0
0
0
0
0.094937
158
6
61
26.333333
0.881119
0
0
0
0
0
0.183544
0.183544
0
0
0
0
0
1
0
true
0.25
0.25
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
5dbcc143ff80733787415f9605de5f2097bba5e9
98
py
Python
timer/admin.py
eug-vs/chrono-cube-api
ddef43c1ef5ea7a30919ff638cc80ea013ac2130
[ "MIT" ]
2
2020-01-01T17:20:18.000Z
2020-01-02T18:45:58.000Z
timer/admin.py
eug-vs/chrono-cube-api
ddef43c1ef5ea7a30919ff638cc80ea013ac2130
[ "MIT" ]
3
2019-12-30T21:44:21.000Z
2020-01-21T13:55:51.000Z
timer/admin.py
Eug-VS/chrono-cube-api
ddef43c1ef5ea7a30919ff638cc80ea013ac2130
[ "MIT" ]
null
null
null
from django.contrib import admin from timer.models import Solution admin.site.register(Solution)
19.6
33
0.836735
14
98
5.857143
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.102041
98
4
34
24.5
0.931818
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5dc06909d56c4371cf6187c4558f1e540f4e8951
85
py
Python
RiverValley-Ex2/Windows runner/Running_script.py
sebastian2000/OOP-Grand-Devoir1
2bd6c86d1d3934d03439688cb7db8ca1abc4fe4a
[ "MIT" ]
null
null
null
RiverValley-Ex2/Windows runner/Running_script.py
sebastian2000/OOP-Grand-Devoir1
2bd6c86d1d3934d03439688cb7db8ca1abc4fe4a
[ "MIT" ]
5
2020-11-18T19:56:51.000Z
2020-11-19T18:40:16.000Z
RiverValley-Ex2/Windows runner/Running_script.py
sebastian2000/OOP-Grand-Devoir1
2bd6c86d1d3934d03439688cb7db8ca1abc4fe4a
[ "MIT" ]
1
2020-11-20T15:45:40.000Z
2020-11-20T15:45:40.000Z
#!/usr/bin/env python3 import os os.system('') os.system('java -jar IdleGame.jar');
14.166667
36
0.682353
14
85
4.142857
0.714286
0.275862
0
0
0
0
0
0
0
0
0
0.013158
0.105882
85
6
36
14.166667
0.75
0.247059
0
0
0
0
0.34375
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
5dd321e1f1da208e62975fe9df2a6b721ea10317
83
py
Python
batchglm/train/tf1/base/external.py
le-ander/batchglm
31b905b99b6baa7c94b82550d6a74f00d81966ea
[ "BSD-3-Clause" ]
null
null
null
batchglm/train/tf1/base/external.py
le-ander/batchglm
31b905b99b6baa7c94b82550d6a74f00d81966ea
[ "BSD-3-Clause" ]
null
null
null
batchglm/train/tf1/base/external.py
le-ander/batchglm
31b905b99b6baa7c94b82550d6a74f00d81966ea
[ "BSD-3-Clause" ]
null
null
null
from batchglm.models.base import _EstimatorBase from batchglm import pkg_constants
27.666667
47
0.879518
11
83
6.454545
0.727273
0.338028
0
0
0
0
0
0
0
0
0
0
0.096386
83
2
48
41.5
0.946667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
5ddb00c5b8f21518357aae8bb3b9a801720b8a6b
332
py
Python
by-session/class-921/week4/string_operators.py
amiraliakbari/sharif-mabani-python
5d14a08d165267fe71c28389ddbafe29af7078c5
[ "MIT" ]
2
2015-04-29T20:59:35.000Z
2018-09-26T13:33:43.000Z
by-session/class-921/week4/string_operators.py
amiraliakbari/sharif-mabani-python
5d14a08d165267fe71c28389ddbafe29af7078c5
[ "MIT" ]
null
null
null
by-session/class-921/week4/string_operators.py
amiraliakbari/sharif-mabani-python
5d14a08d165267fe71c28389ddbafe29af7078c5
[ "MIT" ]
null
null
null
a = "Hello World!" print a[0] print a[0:5] print "Hello world!"[5:10] print a[6:2] print a[-6] print a[-6:-1] print a[-6:1] print a[-6:] print a[:-6] print a[:] print a[10:15] print 2 * a print "jam " + "2 reshte" print "Hello" in a print "hello" in a print "hello" not in a print len(a) for i in range(0, len(a)): print a[:i]
15.090909
26
0.605422
75
332
2.68
0.253333
0.328358
0.208955
0.179104
0.393035
0.363184
0.114428
0
0
0
0
0.08209
0.192771
332
21
27
15.809524
0.66791
0
0
0
0
0
0.153614
0
0
0
0
0
0
0
null
null
0
0
null
null
0.9
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
5
b90c5404d8140ffeb6cc83477b01f7c38fc1803a
119
py
Python
web/app/apps.py
gubertoli/fake-check
69d0190d8616295fd806d57b5861bcfcf7d9d3e2
[ "MIT" ]
null
null
null
web/app/apps.py
gubertoli/fake-check
69d0190d8616295fd806d57b5861bcfcf7d9d3e2
[ "MIT" ]
null
null
null
web/app/apps.py
gubertoli/fake-check
69d0190d8616295fd806d57b5861bcfcf7d9d3e2
[ "MIT" ]
null
null
null
from django.apps import AppConfig from django.conf import settings class AppConfig(AppConfig): name = 'app'
14.875
33
0.731092
15
119
5.8
0.666667
0.229885
0
0
0
0
0
0
0
0
0
0
0.201681
119
8
34
14.875
0.915789
0
0
0
0
0
0.025
0
0
0
0
0
0
1
0
false
0
0.5
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
5d58fdb8b3596533fe6150ae09fdb70cfa4ed2ab
87
py
Python
python/8Kyu/Parse float.py
athasv/Codewars-data
5e106466e709fd776f23585ad9f652d0d65b48d3
[ "MIT" ]
null
null
null
python/8Kyu/Parse float.py
athasv/Codewars-data
5e106466e709fd776f23585ad9f652d0d65b48d3
[ "MIT" ]
null
null
null
python/8Kyu/Parse float.py
athasv/Codewars-data
5e106466e709fd776f23585ad9f652d0d65b48d3
[ "MIT" ]
null
null
null
def parse_float(s): return float(s) if isinstance(s,str) and not s.isalpha() else None
43.5
86
0.747126
17
87
3.764706
0.764706
0.1875
0
0
0
0
0
0
0
0
0
0
0.126437
87
1
87
87
0.842105
0
0
0
0
0
0
0
0
0
0
0
0
1
1
false
0
0
1
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
5d6acc7cb9e077dfaa6ebbfe5e3766e207e8c9ca
302
py
Python
tests/stdlib/test_queue.py
li-caspar/eventlet_0.30.2
a431842e29c26e46cfcfff60c93ca92e07663044
[ "MIT" ]
5,079
2015-01-01T03:39:46.000Z
2022-03-31T07:38:22.000Z
desktop/core/ext-py/eventlet-0.24.1/tests/stdlib/test_queue.py
zks888/hue
93a8c370713e70b216c428caa2f75185ef809deb
[ "Apache-2.0" ]
1,623
2015-01-01T08:06:24.000Z
2022-03-30T19:48:52.000Z
desktop/core/ext-py/eventlet-0.24.1/tests/stdlib/test_queue.py
zks888/hue
93a8c370713e70b216c428caa2f75185ef809deb
[ "Apache-2.0" ]
2,033
2015-01-04T07:18:02.000Z
2022-03-28T19:55:47.000Z
from eventlet import patcher from eventlet.green import Queue from eventlet.green import threading from eventlet.green import time patcher.inject( 'test.test_queue', globals(), ('Queue', Queue), ('threading', threading), ('time', time)) if __name__ == "__main__": test_main()
20.133333
36
0.692053
36
302
5.527778
0.388889
0.241206
0.256281
0.346734
0
0
0
0
0
0
0
0
0.18543
302
14
37
21.571429
0.808943
0
0
0
0
0
0.135762
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
5d6e41ae8f894dce0c2db9d695432d9c64c251a4
68
py
Python
Reliability_Tests/ex75.py
dieterch/dReliaCalc
1e0a06e904f3a60527c3a6ae0f45c666a9b48128
[ "MIT" ]
null
null
null
Reliability_Tests/ex75.py
dieterch/dReliaCalc
1e0a06e904f3a60527c3a6ae0f45c666a9b48128
[ "MIT" ]
null
null
null
Reliability_Tests/ex75.py
dieterch/dReliaCalc
1e0a06e904f3a60527c3a6ae0f45c666a9b48128
[ "MIT" ]
null
null
null
from reliability.Datasets import automotive print(help(automotive))
22.666667
43
0.852941
8
68
7.25
0.875
0
0
0
0
0
0
0
0
0
0
0
0.073529
68
2
44
34
0.920635
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
5396906a068396f2556a688b570e16fa52166a1a
131
py
Python
speck/__init__.py
schctl/speck
80fdf280e1308ba17d1ca8e951d56a3bda1cd9a8
[ "MIT" ]
null
null
null
speck/__init__.py
schctl/speck
80fdf280e1308ba17d1ca8e951d56a3bda1cd9a8
[ "MIT" ]
null
null
null
speck/__init__.py
schctl/speck
80fdf280e1308ba17d1ca8e951d56a3bda1cd9a8
[ "MIT" ]
null
null
null
""" Minimal weatherapi API wrapper. """ __version__ = 'v0.3.13' from .cache import * from .client import * from .errors import *
13.1
31
0.687023
17
131
5.058824
0.764706
0.232558
0
0
0
0
0
0
0
0
0
0.037037
0.175573
131
9
32
14.555556
0.759259
0.236641
0
0
0
0
0.076087
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
53ceddfa89765fad9a092ced340dc01be1961755
111
py
Python
lib/debounce.py
Liorst4/aramcon-firmware
4563a3b6057bc18ffd9a89336e30123f4cdbaa7d
[ "MIT" ]
8
2020-03-06T16:11:19.000Z
2021-06-20T20:55:35.000Z
lib/debounce.py
Liorst4/aramcon-firmware
4563a3b6057bc18ffd9a89336e30123f4cdbaa7d
[ "MIT" ]
4
2021-06-18T09:33:26.000Z
2021-07-16T14:06:37.000Z
lib/debounce.py
Liorst4/aramcon-firmware
4563a3b6057bc18ffd9a89336e30123f4cdbaa7d
[ "MIT" ]
5
2021-06-15T13:33:48.000Z
2021-07-03T13:08:33.000Z
from arambadge import badge def wait_for_button_release(): while badge.gamepad.get_pressed(): pass
22.2
38
0.738739
15
111
5.2
0.933333
0
0
0
0
0
0
0
0
0
0
0
0.189189
111
5
39
22.2
0.866667
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
true
0.25
0.25
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
0
0
0
5
54d93f74cd5ac120e02098e4510083dae76bd243
54
py
Python
chapter9/Tornado/tornado_study/part2/uiuiui.py
MMingLeung/Python_Study
4ff1d02d2b6dd54e96f7179fa000548936b691e7
[ "MIT" ]
3
2017-12-27T14:08:17.000Z
2018-02-10T13:01:08.000Z
chapter9/Tornado/tornado_study/part2/uiuiui.py
MMingLeung/Python_Study
4ff1d02d2b6dd54e96f7179fa000548936b691e7
[ "MIT" ]
4
2017-05-24T10:37:05.000Z
2021-06-10T18:35:32.000Z
chapter9/Tornado/tornado_study/part2/uiuiui.py
MMingLeung/Python_Study
4ff1d02d2b6dd54e96f7179fa000548936b691e7
[ "MIT" ]
1
2018-02-14T19:05:30.000Z
2018-02-14T19:05:30.000Z
# UIMethod def tab(self): return "<a>UIMethod</a>"
18
28
0.62963
8
54
4.25
0.75
0
0
0
0
0
0
0
0
0
0
0
0.166667
54
3
28
18
0.755556
0.148148
0
0
0
0
0.333333
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
54da1748c7ac5efbcdfe2709772cd29022b195fb
184
py
Python
jaegerserver/admin.py
the-bombers/jaeger
8912252af89154928f47bfde6912267d782371d1
[ "Unlicense" ]
null
null
null
jaegerserver/admin.py
the-bombers/jaeger
8912252af89154928f47bfde6912267d782371d1
[ "Unlicense" ]
19
2021-03-17T10:22:19.000Z
2021-03-19T15:13:08.000Z
jaegerserver/admin.py
the-bombers/jaeger
8912252af89154928f47bfde6912267d782371d1
[ "Unlicense" ]
null
null
null
from django.contrib import admin from jaegerserver.models import Monster, Sighting, Location admin.site.register(Monster) admin.site.register(Sighting) admin.site.register(Location)
23
59
0.831522
24
184
6.375
0.5
0.176471
0.333333
0
0
0
0
0
0
0
0
0
0.081522
184
7
60
26.285714
0.905325
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
071e56033fa656e8914e8f1634d11e8e1f41743f
417
bzl
Python
third_party/com_unboundid.bzl
or-shachar/exodus
b28bf8f4c2330de47b8aac21c2bf3c40fda7cbdc
[ "MIT" ]
null
null
null
third_party/com_unboundid.bzl
or-shachar/exodus
b28bf8f4c2330de47b8aac21c2bf3c40fda7cbdc
[ "MIT" ]
null
null
null
third_party/com_unboundid.bzl
or-shachar/exodus
b28bf8f4c2330de47b8aac21c2bf3c40fda7cbdc
[ "MIT" ]
null
null
null
load("//:import_external.bzl", import_external = "safe_wix_scala_maven_import_external") def dependencies(): import_external( name = "com_unboundid_unboundid_ldapsdk", artifact = "com.unboundid:unboundid-ldapsdk:2.3.8", jar_sha256 = "b048b8e714e06d93123b360aa301bc8d7759f303ef8f8fa994e41df495288f4b", srcjar_sha256 = "b292492f5cb869bd6d4f5d7d77ffe6afcf4ed02a092f3de55578561a7a749438", )
37.909091
89
0.786571
35
417
9
0.657143
0.177778
0.133333
0.177778
0
0
0
0
0
0
0
0.245232
0.119904
417
10
90
41.7
0.613079
0
0
0
0
0
0.609113
0.609113
0
0
0
0
0
1
0.125
true
0
0.25
0
0.375
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
073517c2e843f86ad0d1797b1bc38537a3f73d36
197
py
Python
wavespy/utils/__init__.py
DPInvaders/wavespy
8cdf06023bf4caa08dc0d714a88ffb66bbb6eb71
[ "MIT" ]
null
null
null
wavespy/utils/__init__.py
DPInvaders/wavespy
8cdf06023bf4caa08dc0d714a88ffb66bbb6eb71
[ "MIT" ]
null
null
null
wavespy/utils/__init__.py
DPInvaders/wavespy
8cdf06023bf4caa08dc0d714a88ffb66bbb6eb71
[ "MIT" ]
null
null
null
""" wavespy.utils Utilities for addresses and transactions """ from .address import WavesAddress from .address_generator import WavesAddressGenerator from .async_address import WavesAsyncAddress
19.7
52
0.837563
21
197
7.761905
0.714286
0.134969
0
0
0
0
0
0
0
0
0
0
0.111675
197
9
53
21.888889
0.931429
0.279188
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
075c820658f862b0b47dd6c8eca3bbff07c0b299
783
py
Python
coconut/icoconut/__init__.py
zhammami/coconut
d3728e880ccc586b10b0e61221c3d69bffcf2abc
[ "Apache-2.0" ]
3,624
2015-02-22T07:06:18.000Z
2022-03-31T03:38:00.000Z
coconut/icoconut/__init__.py
zhammami/coconut
d3728e880ccc586b10b0e61221c3d69bffcf2abc
[ "Apache-2.0" ]
627
2015-03-31T01:18:53.000Z
2022-03-28T07:48:31.000Z
coconut/icoconut/__init__.py
zhammami/coconut
d3728e880ccc586b10b0e61221c3d69bffcf2abc
[ "Apache-2.0" ]
162
2016-03-02T05:22:55.000Z
2022-03-31T23:42:55.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------------------------------------------------- # INFO: # ----------------------------------------------------------------------------------------------------------------------- """ Author: Evan Hubinger License: Apache 2.0 Description: The Coconut IPython kernel module. """ # ----------------------------------------------------------------------------------------------------------------------- # IMPORTS: # ----------------------------------------------------------------------------------------------------------------------- from __future__ import print_function, absolute_import, unicode_literals, division from coconut.icoconut.root import * # NOQA
37.285714
121
0.274585
38
783
5.473684
0.894737
0
0
0
0
0
0
0
0
0
0
0.004104
0.066411
783
20
122
39.15
0.280438
0.807152
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0.5
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
ab03ab3271ade3752007e3782584bb9f0b19a506
77
py
Python
tests/test_models/test_UserModel.py
ekbalba/starterkit-flask-api
36295bd84f8388a6a0b086b582580e5a0d11efe4
[ "MIT" ]
null
null
null
tests/test_models/test_UserModel.py
ekbalba/starterkit-flask-api
36295bd84f8388a6a0b086b582580e5a0d11efe4
[ "MIT" ]
null
null
null
tests/test_models/test_UserModel.py
ekbalba/starterkit-flask-api
36295bd84f8388a6a0b086b582580e5a0d11efe4
[ "MIT" ]
null
null
null
def test_new_user(new_user): assert new_user.email == "admin@google.net"
25.666667
47
0.74026
13
77
4.076923
0.692308
0.396226
0
0
0
0
0
0
0
0
0
0
0.12987
77
2
48
38.5
0.791045
0
0
0
0
0
0.207792
0
0
0
0
0
0.5
1
0.5
false
0
0
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
0
0
0
0
0
0
0
5
ab2e635ebe2823acd546ac253dc1cf9d678fed20
237
py
Python
register/admin.py
mattfishburn/DevSite
f0809e39b3f3bf04672546e07b15304f524222d8
[ "MIT" ]
null
null
null
register/admin.py
mattfishburn/DevSite
f0809e39b3f3bf04672546e07b15304f524222d8
[ "MIT" ]
null
null
null
register/admin.py
mattfishburn/DevSite
f0809e39b3f3bf04672546e07b15304f524222d8
[ "MIT" ]
null
null
null
from django.contrib import admin # Register your models here. from .models import EmailAddress from .models import EmailAddressRegistrationRequest admin.site.register(EmailAddress) admin.site.register(EmailAddressRegistrationRequest)
23.7
52
0.852321
25
237
8.08
0.48
0.09901
0.158416
0
0
0
0
0
0
0
0
0
0.092827
237
9
53
26.333333
0.939535
0.109705
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.6
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
ab4d4d151cceefd151318b8df6804b2c68bde1a0
523
py
Python
ravendb/documents/operations/backups/settings.py
ravendb/RavenDB-Python-Client
6286b459b501e755fe8e8591a48acf8616605ccd
[ "MIT" ]
8
2016-10-08T17:45:44.000Z
2018-05-29T12:16:43.000Z
ravendb/documents/operations/backups/settings.py
ravendb/RavenDB-Python-Client
6286b459b501e755fe8e8591a48acf8616605ccd
[ "MIT" ]
5
2017-02-12T15:50:53.000Z
2017-09-18T12:25:01.000Z
ravendb/documents/operations/backups/settings.py
ravendb/RavenDB-Python-Client
6286b459b501e755fe8e8591a48acf8616605ccd
[ "MIT" ]
8
2016-07-03T07:59:12.000Z
2017-09-18T11:22:23.000Z
# todo: implement class BackupConfiguration: pass class PeriodicBackupConfiguration(BackupConfiguration): pass class BackupSettings: pass class LocalSettings(BackupSettings): pass class AmazonSettings(BackupSettings): pass class S3Settings(AmazonSettings): pass class AzureSettings(BackupSettings): pass class GlacierSettings(AmazonSettings): pass class BackupStatus: pass class GoogleCloudSettings(BackupStatus): pass class FtpSettings(BackupSettings): pass
11.886364
55
0.753346
43
523
9.162791
0.348837
0.228426
0.233503
0
0
0
0
0
0
0
0
0.002364
0.191205
523
43
56
12.162791
0.929078
0.028681
0
0.5
0
0
0
0
0
0
0
0.023256
0
1
0
true
0.5
0
0
0.5
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
1
1
0
0
0
0
0
5
dbac15f99d3dbf1769b88a500b642fd0c554805a
11,330
py
Python
tests/train/test_client.py
abeja-inc/abeja-platform-sdk
97cfc99b11ffc1fccb3f527435277bc89e18b8c3
[ "Apache-2.0" ]
2
2020-10-20T18:38:16.000Z
2020-10-20T20:12:35.000Z
tests/train/test_client.py
abeja-inc/abeja-platform-sdk
97cfc99b11ffc1fccb3f527435277bc89e18b8c3
[ "Apache-2.0" ]
30
2020-04-07T01:15:47.000Z
2020-11-18T03:25:19.000Z
tests/train/test_client.py
abeja-inc/abeja-platform-sdk
97cfc99b11ffc1fccb3f527435277bc89e18b8c3
[ "Apache-2.0" ]
null
null
null
import logging import unittest from unittest import mock import requests from abeja import VERSION from abeja.common.connection import Connection from abeja.exceptions import BadRequest, InternalServerError from abeja.train.api.client import APIClient from abeja.train.client import Client from abeja.train.statistics import Statistics ORGANIZATION_ID = '1111111111111' TRAINING_JON_DEFINITION_NAME = 'tjd' TRAINING_JOB_ID = 'job-0123456789abcdef' ABEJA_API_URL = 'http://localhost:8080' PATCHED_ENVIRON = { 'ABEJA_ORGANIZATION_ID': ORGANIZATION_ID, 'TRAINING_JOB_DEFINITION_NAME': TRAINING_JON_DEFINITION_NAME, 'TRAINING_JOB_ID': TRAINING_JOB_ID} @mock.patch.dict('os.environ', PATCHED_ENVIRON) class TestClient(unittest.TestCase): @mock.patch.dict('os.environ', PATCHED_ENVIRON) def setUp(self): Connection.BASE_URL = ABEJA_API_URL self.client = Client() self.client.logger.setLevel(logging.FATAL) def test_init(self): self.assertIsInstance(self.client.api, APIClient) @mock.patch('abeja.train.client.extract_zipfile') @mock.patch('abeja.train.client.Client._get_content') @mock.patch('requests.Session.request') def test_download_training_result( self, m, m_get_content, m_extract_zipfile): dummy_binary = b'dummy' m_get_content.return_value = dummy_binary self.client.download_training_result(TRAINING_JOB_ID) url = '{}/organizations/{}/training/definitions/{}/jobs/{}/result'.format( ABEJA_API_URL, ORGANIZATION_ID, TRAINING_JON_DEFINITION_NAME, TRAINING_JOB_ID) m.assert_called_with( 'GET', url, data=None, headers={ 'User-Agent': 'abeja-platform-sdk/{}'.format(VERSION)}, json=None, params=None, timeout=30) m_extract_zipfile.assert_called_once_with(dummy_binary, path=None) @mock.patch('requests.Session.request') def test_update_statistics(self, m): statistics = Statistics(progress_percentage=0.5, epoch=1, num_epochs=5, key1='value1') statistics.add_stage( name=Statistics.STAGE_TRAIN, accuracy=0.9, loss=0.05) statistics.add_stage(name=Statistics.STAGE_VALIDATION, accuracy=0.8, loss=0.1, key2=2) self.client.update_statistics(statistics) self.assertEqual(m.call_count, 1) url = '{}/organizations/{}/training/definitions/{}/jobs/{}/statistics'.format( ABEJA_API_URL, ORGANIZATION_ID, TRAINING_JON_DEFINITION_NAME, TRAINING_JOB_ID) expected_data = { 'statistics': { 'num_epochs': 5, 'epoch': 1, 'progress_percentage': 0.5, 'stages': { 'train': { 'accuracy': 0.9, 'loss': 0.05 }, 'validation': { 'accuracy': 0.8, 'loss': 0.1, 'key2': 2 } }, 'key1': 'value1' } } m.assert_called_with( 'POST', url, params=None, headers={ 'User-Agent': 'abeja-platform-sdk/{}'.format(VERSION)}, timeout=30, data=None, json=expected_data) @mock.patch('requests.Session.request') def test_update_statistics_without_statistics(self, m): statistics = Statistics(progress_percentage=0.5) self.client.update_statistics(statistics) self.assertEqual(m.call_count, 1) url = '{}/organizations/{}/training/definitions/{}/jobs/{}/statistics'.format( ABEJA_API_URL, ORGANIZATION_ID, TRAINING_JON_DEFINITION_NAME, TRAINING_JOB_ID) m.assert_called_with( 'POST', url, params=None, headers={ 'User-Agent': 'abeja-platform-sdk/{}'.format(VERSION)}, timeout=30, data=None, json={ 'statistics': { 'progress_percentage': 0.5}}) @mock.patch('requests.Session.request') def test_update_statistics_progress_within_statistics(self, m): statistics = Statistics(progress_percentage=0.5) statistics.add_stage(name='other_stage', key1='value1') self.client.update_statistics(statistics) self.assertEqual(m.call_count, 1) url = '{}/organizations/{}/training/definitions/{}/jobs/{}/statistics'.format( ABEJA_API_URL, ORGANIZATION_ID, TRAINING_JON_DEFINITION_NAME, TRAINING_JOB_ID) expected_data = { 'statistics': { 'progress_percentage': 0.5, 'stages': { 'other_stage': { 'key1': 'value1' } } } } m.assert_called_with( 'POST', url, params=None, headers={ 'User-Agent': 'abeja-platform-sdk/{}'.format(VERSION)}, timeout=30, data=None, json=expected_data) @mock.patch('requests.Session.request') def test_update_statistics_override_organization_id(self, m): organization_id = '2222222222222' client = Client(organization_id=organization_id) statistics = Statistics(progress_percentage=0.5, key1='value1') client.update_statistics(statistics) self.assertEqual(m.call_count, 1) url = '{}/organizations/{}/training/definitions/{}/jobs/{}/statistics'.format( ABEJA_API_URL, organization_id, TRAINING_JON_DEFINITION_NAME, TRAINING_JOB_ID) m.assert_called_with( 'POST', url, params=None, headers={ 'User-Agent': 'abeja-platform-sdk/{}'.format(VERSION)}, timeout=30, data=None, json={ 'statistics': { 'progress_percentage': 0.5, 'key1': 'value1'}}) @mock.patch( 'abeja.common.connection.Connection.request', side_effect=BadRequest( 'foo', 'bar', 400, 'https://api.abeja.io/')) def test_update_statistics_raise_BadRequest(self, m): # check: don't raise Exception when model-api returns 400 Bad-Request logger_mock = mock.MagicMock() self.client.logger = logger_mock try: statistics = Statistics(progress_percentage=0.5, key1='value1') self.client.update_statistics(statistics) self.assertEqual(m.call_count, 1) url = '{}/organizations/{}/training/definitions/{}/jobs/{}/statistics'.format( ABEJA_API_URL, ORGANIZATION_ID, TRAINING_JON_DEFINITION_NAME, TRAINING_JOB_ID) m.assert_called_with( 'POST', url, params=None, headers={ 'User-Agent': 'abeja-platform-sdk/{}'.format(VERSION)}, data=None, json={ 'statistics': { 'progress_percentage': 0.5, 'key1': 'value1'}}) self.assertEqual(logger_mock.warning.call_count, 1) self.assertEqual(logger_mock.exception.call_count, 0) except Exception: self.fail() @mock.patch( 'abeja.common.connection.Connection.request', side_effect=InternalServerError( 'foo', 'bar', 500, 'https://api.abeja.io/')) def test_update_statistics_raise_InternalServerError(self, m): # check: don't raise Exception when model-api returns 500 # Internal-Server-Error logger_mock = mock.MagicMock() self.client.logger = logger_mock try: statistics = Statistics(progress_percentage=0.5, key1='value1') self.client.update_statistics(statistics) self.assertEqual(m.call_count, 1) url = '{}/organizations/{}/training/definitions/{}/jobs/{}/statistics'.format( ABEJA_API_URL, ORGANIZATION_ID, TRAINING_JON_DEFINITION_NAME, TRAINING_JOB_ID) m.assert_called_with( 'POST', url, params=None, headers={ 'User-Agent': 'abeja-platform-sdk/{}'.format(VERSION)}, data=None, json={ 'statistics': { 'progress_percentage': 0.5, 'key1': 'value1'}}) self.assertEqual(logger_mock.warning.call_count, 0) self.assertEqual(logger_mock.exception.call_count, 1) except Exception: self.fail() @mock.patch('abeja.common.connection.Connection.request', side_effect=requests.exceptions.ConnectionError()) def test_update_statistics_raise_ConnectionError(self, m): # check: don't raise Exception when model-api returns 500 # Internal-Server-Error logger_mock = mock.MagicMock() self.client.logger = logger_mock try: statistics = Statistics(progress_percentage=0.5, key1='value1') self.client.update_statistics(statistics) self.assertEqual(m.call_count, 1) url = '{}/organizations/{}/training/definitions/{}/jobs/{}/statistics'.format( ABEJA_API_URL, ORGANIZATION_ID, TRAINING_JON_DEFINITION_NAME, TRAINING_JOB_ID) m.assert_called_with( 'POST', url, params=None, headers={ 'User-Agent': 'abeja-platform-sdk/{}'.format(VERSION)}, data=None, json={ 'statistics': { 'progress_percentage': 0.5, 'key1': 'value1'}}) self.assertEqual(logger_mock.warning.call_count, 0) self.assertEqual(logger_mock.exception.call_count, 1) except Exception: self.fail() @mock.patch('abeja.common.connection.Connection.request') def test_update_statistics_statistics_none(self, m): # check: don't raise Exception logger_mock = mock.MagicMock() self.client.logger = logger_mock try: self.client.update_statistics(None) m.assert_not_called() self.assertEqual(logger_mock.warning.call_count, 1) self.assertEqual(logger_mock.exception.call_count, 0) except Exception: self.fail() @mock.patch('abeja.common.connection.Connection.request') def test_update_statistics_with_empty_statistics(self, m): # check: don't raise Exception logger_mock = mock.MagicMock() self.client.logger = logger_mock try: self.client.update_statistics(Statistics()) m.assert_not_called() self.assertEqual(logger_mock.warning.call_count, 1) self.assertEqual(logger_mock.exception.call_count, 0) except Exception: self.fail()
39.204152
94
0.576169
1,136
11,330
5.523768
0.126761
0.031873
0.04239
0.044622
0.78996
0.761434
0.749641
0.719044
0.711076
0.628685
0
0.020592
0.31421
11,330
288
95
39.340278
0.787001
0.024801
0
0.671698
0
0
0.160946
0.100625
0
0
0
0
0.109434
1
0.045283
false
0
0.037736
0
0.086792
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
915c6c892658925c8c92493330aa329ab3de192b
39
py
Python
tests/unit/__init__.py
moreiramarti/terraform-aws-lambda-scheduler-stop-start
6300813d312a87188df162b422ebef6c54fd0918
[ "Apache-2.0" ]
null
null
null
tests/unit/__init__.py
moreiramarti/terraform-aws-lambda-scheduler-stop-start
6300813d312a87188df162b422ebef6c54fd0918
[ "Apache-2.0" ]
null
null
null
tests/unit/__init__.py
moreiramarti/terraform-aws-lambda-scheduler-stop-start
6300813d312a87188df162b422ebef6c54fd0918
[ "Apache-2.0" ]
null
null
null
"""Main entry point for unit tests."""
19.5
38
0.666667
6
39
4.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.153846
39
1
39
39
0.787879
0.820513
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
9175780984726ea8c8791e309219ad65eff59f77
6,975
py
Python
tests/test_checkout.py
bockstaller/pretix-mandatory-product
92579158567c16703e1c4bdfdc30b70d1a127afb
[ "Apache-2.0" ]
null
null
null
tests/test_checkout.py
bockstaller/pretix-mandatory-product
92579158567c16703e1c4bdfdc30b70d1a127afb
[ "Apache-2.0" ]
null
null
null
tests/test_checkout.py
bockstaller/pretix-mandatory-product
92579158567c16703e1c4bdfdc30b70d1a127afb
[ "Apache-2.0" ]
null
null
null
import datetime from django.test import TestCase from django.utils.timezone import now from django_scopes import scopes_disabled from pretix.base.models import CartPosition, Event, Item, ItemCategory, Organizer, Quota from pretix.testutils.sessions import get_cart_session_key class BaseCheckoutTestCase: def _set_session(self, key, value): session = self.client.session session["carts"][get_cart_session_key(self.client, self.event)][key] = value session.save() @scopes_disabled() def setUp(self): super().setUp() self.orga = Organizer.objects.create(name="Dummy", slug="dummy") self.event = Event.objects.create( organizer=self.orga, name="Dummy", slug="dummy", date_from=now(), live=True, plugins="pretix_mandatory_product", ) self.category = ItemCategory.objects.create( event=self.event, name="Everything", position=0 ) self.quota_tickets = Quota.objects.create( event=self.event, name="Tickets", size=5 ) self.ticket_mandatory = Item.objects.create( event=self.event, name="Early-bird ticket", category=self.category, default_price=23, admission=True, ) self.quota_tickets.items.add(self.ticket_mandatory) self.ticket_mandatory2 = Item.objects.create( event=self.event, name="Early-bird ticket", category=self.category, default_price=23, admission=True, ) self.quota_tickets.items.add(self.ticket_mandatory2) self.ticket = Item.objects.create( event=self.event, name="Early-bird ticket", category=self.category, default_price=23, admission=True, ) self.quota_tickets.items.add(self.ticket) self.client.get("/%s/%s/" % (self.orga.slug, self.event.slug)) self.session_key = get_cart_session_key(self.client, self.event) self._set_session("email", "admin@localhost") self.event.settings["mandatory_product__list"] = [ self.ticket_mandatory.id, self.ticket_mandatory2.id, ] class CheckoutTestCase(BaseCheckoutTestCase, TestCase): def test_make_order_without_mandatory_product(self): with scopes_disabled(): self.event.settings["mandatory_product__combine"] = "choose" CartPosition.objects.create( event=self.event, cart_id=self.session_key, item=self.ticket, price=23, expires=now() + datetime.timedelta(minutes=10), ) response = self.client.get( "/%s/%s/checkout/questions/" % (self.event.organizer.slug, self.event.slug), ) self.assertNotEqual(response.status_code, 200) self.assertRedirects( response, "/%s/%s/?require_cookie=true" % (self.event.organizer.slug, self.event.slug), ) def test_make_order_with_mandatory_product(self): with scopes_disabled(): self.event.settings["mandatory_product__combine"] = "choose" CartPosition.objects.create( event=self.event, cart_id=self.session_key, item=self.ticket_mandatory, price=23, expires=now() + datetime.timedelta(minutes=10), ) response = self.client.get( "/%s/%s/checkout/questions/" % (self.event.organizer.slug, self.event.slug), ) self.assertEqual(response.status_code, 200) def test_make_order_without_mandatory_product_combine_fail_1(self): with scopes_disabled(): self.event.settings["mandatory_product__combine"] = "combine" CartPosition.objects.create( event=self.event, cart_id=self.session_key, item=self.ticket, price=23, expires=now() + datetime.timedelta(minutes=10), ) CartPosition.objects.create( event=self.event, cart_id=self.session_key, item=self.ticket_mandatory, price=23, expires=now() + datetime.timedelta(minutes=10), ) response = self.client.get( "/%s/%s/checkout/questions/" % (self.event.organizer.slug, self.event.slug), ) self.assertNotEqual(response.status_code, 200) self.assertRedirects( response, "/%s/%s/?require_cookie=true" % (self.event.organizer.slug, self.event.slug), ) def test_make_order_with_mandatory_product_combine_fail_2(self): with scopes_disabled(): self.event.settings["mandatory_product__combine"] = "combine" CartPosition.objects.create( event=self.event, cart_id=self.session_key, item=self.ticket, price=23, expires=now() + datetime.timedelta(minutes=10), ) CartPosition.objects.create( event=self.event, cart_id=self.session_key, item=self.ticket_mandatory2, price=23, expires=now() + datetime.timedelta(minutes=10), ) response = self.client.get( "/%s/%s/checkout/questions/" % (self.event.organizer.slug, self.event.slug), ) self.assertNotEqual(response.status_code, 200) self.assertRedirects( response, "/%s/%s/?require_cookie=true" % (self.event.organizer.slug, self.event.slug), ) def test_make_order_with_mandatory_product_combine_success(self): with scopes_disabled(): self.event.settings["mandatory_product__combine"] = "combine" CartPosition.objects.create( event=self.event, cart_id=self.session_key, item=self.ticket_mandatory, price=23, expires=now() + datetime.timedelta(minutes=10), ) CartPosition.objects.create( event=self.event, cart_id=self.session_key, item=self.ticket_mandatory2, price=23, expires=now() + datetime.timedelta(minutes=10), ) response = self.client.get( "/%s/%s/checkout/questions/" % (self.event.organizer.slug, self.event.slug), ) self.assertEqual(response.status_code, 200)
33.859223
88
0.557133
687
6,975
5.484716
0.144105
0.093153
0.062102
0.075902
0.775212
0.7569
0.740446
0.723992
0.704883
0.704883
0
0.013423
0.337778
6,975
205
89
34.02439
0.802338
0
0
0.610465
0
0
0.077563
0.055627
0
0
0
0
0.046512
1
0.040698
false
0
0.034884
0
0.087209
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
91ca33ac6c89181a1bddf04dea015ecb6026b4cb
162
py
Python
resources/sumo_exporter/re_definitions.py
awesome-archive/webots
8e74fb8393d1e3a6540749afc492635c43f1b30f
[ "Apache-2.0" ]
2
2019-07-12T13:47:44.000Z
2019-08-17T02:53:54.000Z
resources/sumo_exporter/re_definitions.py
golbh/webots
8e74fb8393d1e3a6540749afc492635c43f1b30f
[ "Apache-2.0" ]
null
null
null
resources/sumo_exporter/re_definitions.py
golbh/webots
8e74fb8393d1e3a6540749afc492635c43f1b30f
[ "Apache-2.0" ]
1
2019-06-29T06:34:14.000Z
2019-06-29T06:34:14.000Z
"""Regex definitions.""" # https://stackoverflow.com/a/4703508 floatRE = '[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?' intRE = '[-+]?\d+(?:[Ee][+-]?\d+)?'
27
61
0.41358
17
162
3.941176
0.647059
0.059701
0.119403
0
0
0
0
0
0
0
0
0.046358
0.067901
162
5
62
32.4
0.397351
0.339506
0
0
0
0
0.732673
0.732673
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
91cea23563f17bef52590f03dec99194e581898f
890
py
Python
envs/fetch/interval.py
erick-alv/g-hgg
2cc0de9810ca6823ad6339cf4d1a63e265d1b5ee
[ "MIT" ]
2
2021-04-27T21:10:36.000Z
2021-09-14T07:42:21.000Z
envs/fetch/interval.py
erick-alv/g-hgg
2cc0de9810ca6823ad6339cf4d1a63e265d1b5ee
[ "MIT" ]
1
2021-07-06T11:52:12.000Z
2021-07-06T11:52:12.000Z
envs/fetch/interval.py
erick-alv/g-hgg
2cc0de9810ca6823ad6339cf4d1a63e265d1b5ee
[ "MIT" ]
2
2021-04-06T22:57:44.000Z
2021-04-26T14:50:28.000Z
import gym import numpy as np from .fixobj import FixedObjectGoalEnv class IntervalGoalEnv(FixedObjectGoalEnv): def __init__(self, args): FixedObjectGoalEnv.__init__(self, args) def generate_goal(self): if self.has_object: goal = self.initial_gripper_xpos[:3] + self.target_offset if self.args.env=='FetchSlide-v1': goal[0] += self.target_range*0.5 goal[1] += np.random.uniform(-self.target_range, self.target_range)*0.5 else: goal[0] += np.random.uniform(-self.target_range, self.target_range) goal[1] += self.target_range #goal[1] += np.random.uniform(-self.target_range, self.target_range) # TODO: changed goal[2] = self.height_offset + int(self.target_in_the_air)*0.45 else: goal = self.initial_gripper_xpos[:3] + np.array([np.random.uniform(-self.target_range, self.target_range), self.target_range, self.target_range]) return goal.copy()
40.454545
148
0.735955
134
890
4.656716
0.350746
0.224359
0.288462
0.182692
0.501603
0.439103
0.352564
0.352564
0.352564
0.160256
0
0.020566
0.125843
890
22
149
40.454545
0.781491
0.092135
0
0.105263
1
0
0.016109
0
0
0
0
0.045455
0
1
0.105263
false
0
0.157895
0
0.368421
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
5
91d870cfed10f0c4c1cb2cb214469a6699f2bf09
151
py
Python
ML/Recommendation_sys/tag_based/predict.py
shauryajaggi/Team_Nodemon
7ffe9d02c53e6cbc96af6381803bd69619238da1
[ "MIT" ]
9
2021-09-10T18:27:43.000Z
2021-12-18T14:12:14.000Z
ML/Recommendation_sys/tag_based/predict.py
shauryajaggi/Team_Nodemon
7ffe9d02c53e6cbc96af6381803bd69619238da1
[ "MIT" ]
null
null
null
ML/Recommendation_sys/tag_based/predict.py
shauryajaggi/Team_Nodemon
7ffe9d02c53e6cbc96af6381803bd69619238da1
[ "MIT" ]
4
2021-09-10T18:28:24.000Z
2021-10-01T07:51:40.000Z
from main import * import time start_time = time.time() print(predict_products(["Red lays"])) print("--- %s seconds ---" % (time.time() - start_time))
25.166667
56
0.668874
21
151
4.666667
0.571429
0.244898
0.265306
0
0
0
0
0
0
0
0
0
0.125828
151
6
56
25.166667
0.742424
0
0
0
0
0
0.171053
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0.4
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
530b7576c1dd3432ee9c2f1219059847b662608b
158
py
Python
seglossbias/utils/__init__.py
by-liu/SegLossBia
9cc639c04084cda9d5fb20ea34699db7e0beaf5c
[ "MIT" ]
18
2021-04-20T17:03:20.000Z
2022-03-12T05:56:24.000Z
seglossbias/utils/__init__.py
by-liu/SegLossBia
9cc639c04084cda9d5fb20ea34699db7e0beaf5c
[ "MIT" ]
null
null
null
seglossbias/utils/__init__.py
by-liu/SegLossBia
9cc639c04084cda9d5fb20ea34699db7e0beaf5c
[ "MIT" ]
1
2021-07-08T17:44:15.000Z
2021-07-08T17:44:15.000Z
from .file_io import * from .checkpoint import * from .tensorboard_vis import TensorboardWriter from .misc import set_random_seed, setup_logging, get_logfile
31.6
61
0.835443
22
158
5.727273
0.727273
0.15873
0
0
0
0
0
0
0
0
0
0
0.113924
158
4
62
39.5
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
530c894a9ec616955b8045c1bbb02833d4c61d29
222
py
Python
panovel/filter/noindent.py
dickloraine/panovel
622fc4e4bd0177e7a2d05cf0bfb67edd21b76214
[ "MIT" ]
null
null
null
panovel/filter/noindent.py
dickloraine/panovel
622fc4e4bd0177e7a2d05cf0bfb67edd21b76214
[ "MIT" ]
1
2020-09-16T19:08:06.000Z
2020-09-16T19:08:06.000Z
panovel/filter/noindent.py
dickloraine/panovel
622fc4e4bd0177e7a2d05cf0bfb67edd21b76214
[ "MIT" ]
null
null
null
from panovel import run_pandoc_filter if __name__ == "__main__": run_pandoc_filter( ["noindent", "no-indent", "no_indent"], '\\noindent\n{text}\n', '<div class="noindent">\n{text}\n</div>\n')
24.666667
51
0.603604
29
222
4.172414
0.551724
0.14876
0.247934
0.231405
0.280992
0
0
0
0
0
0
0
0.202703
222
8
52
27.75
0.683616
0
0
0
0
0
0.423423
0.157658
0
0
0
0
0
1
0
true
0
0.166667
0
0.166667
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
533137176c8afda5ae839ad064228c56b491683d
610
py
Python
Flask backend/src/core/entities/customer.py
MuhamedAbdalla/e-commerce
9e06e699e696d50d7739df355f0bc8708195cb35
[ "MIT" ]
1
2021-04-26T00:17:12.000Z
2021-04-26T00:17:12.000Z
Flask backend/src/core/entities/customer.py
MuhamedAbdalla/e-commerce
9e06e699e696d50d7739df355f0bc8708195cb35
[ "MIT" ]
null
null
null
Flask backend/src/core/entities/customer.py
MuhamedAbdalla/e-commerce
9e06e699e696d50d7739df355f0bc8708195cb35
[ "MIT" ]
null
null
null
class Customer: def __init__(self, id, custName, password, gender, birthdate, job): self._id = id self._custName = custName self._password = password self._gender = gender self._birthdate = birthdate self._job = job def id(self): return self._id def custName(self): return self._custName def password(self): return self._password def gender(self): return self._gender def birthdate(self): return self._birthdate def job(self): return self._job def toJson(self): pass
20.333333
71
0.595082
69
610
5.028986
0.202899
0.172911
0.242075
0
0
0
0
0
0
0
0
0
0.32459
610
29
72
21.034483
0.842233
0
0
0
0
0
0
0
0
0
0
0
0
1
0.363636
false
0.227273
0
0.272727
0.681818
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
5
5361cbb6ca0bb0403f467843809aa831c26bbd21
69
py
Python
Scripts/ict/tests/flows.py
mspgeek/Client_Portal
0267168bb90e8e9c85aecdd715972b9622b82384
[ "MIT" ]
4
2020-04-08T01:13:48.000Z
2020-08-15T17:12:07.000Z
Scripts/ict/tests/flows.py
mspgeek/Client_Portal
0267168bb90e8e9c85aecdd715972b9622b82384
[ "MIT" ]
1
2021-04-12T12:55:24.000Z
2021-04-12T12:55:24.000Z
Scripts/ict/tests/flows.py
mspgeek/Client_Portal
0267168bb90e8e9c85aecdd715972b9622b82384
[ "MIT" ]
null
null
null
from .test_templatetags_viewflow import TestTemplateTagsFlow # NOQA
34.5
68
0.869565
7
69
8.285714
1
0
0
0
0
0
0
0
0
0
0
0
0.101449
69
1
69
69
0.935484
0.057971
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
536c237599020eb227174fe6616abd6d6b8d4199
87
py
Python
clients/admin.py
loribonna/EsameLDPython
02f671d0813e4e3cfed5a977018ab295b8675d60
[ "MIT" ]
null
null
null
clients/admin.py
loribonna/EsameLDPython
02f671d0813e4e3cfed5a977018ab295b8675d60
[ "MIT" ]
null
null
null
clients/admin.py
loribonna/EsameLDPython
02f671d0813e4e3cfed5a977018ab295b8675d60
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Client admin.site.register(Client)
29
32
0.83908
13
87
5.615385
0.692308
0
0
0
0
0
0
0
0
0
0
0
0.091954
87
3
33
29
0.924051
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
536df4d51713164ba879b80ac8981803fda8230e
65
py
Python
yt/utilities/answer_testing/api.py
aemerick/yt
984484616d75c6d7603e71b9d45c5d617705a0e5
[ "BSD-3-Clause-Clear" ]
null
null
null
yt/utilities/answer_testing/api.py
aemerick/yt
984484616d75c6d7603e71b9d45c5d617705a0e5
[ "BSD-3-Clause-Clear" ]
null
null
null
yt/utilities/answer_testing/api.py
aemerick/yt
984484616d75c6d7603e71b9d45c5d617705a0e5
[ "BSD-3-Clause-Clear" ]
null
null
null
from yt.utilities.answer_testing.framework import AnswerTesting
21.666667
63
0.876923
8
65
7
1
0
0
0
0
0
0
0
0
0
0
0
0.076923
65
2
64
32.5
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7274c7a14a4baddec49afb545bde7f4a9474119a
847
py
Python
math_ops_linear algebra/tf_qr.py
Asurada2015/TFAPI_translation
1c8d9432b0b8a21c2bb5670b25456d095d0a1ecf
[ "Apache-2.0" ]
7
2017-10-19T13:59:24.000Z
2019-11-26T03:40:08.000Z
math_ops_linear algebra/tf_qr.py
Asurada2015/TFAPI_translation
1c8d9432b0b8a21c2bb5670b25456d095d0a1ecf
[ "Apache-2.0" ]
null
null
null
math_ops_linear algebra/tf_qr.py
Asurada2015/TFAPI_translation
1c8d9432b0b8a21c2bb5670b25456d095d0a1ecf
[ "Apache-2.0" ]
5
2018-08-22T02:57:03.000Z
2020-03-05T07:14:21.000Z
import tensorflow as tf """tf.qr(input, full_matrices=None, name=None) 功能:对矩阵进行qr分解。 输入:。""" a = tf.constant([1, 2, 2, 1, 0, 2, 0, 1, 1], shape=[3, 3], dtype=tf.float64) q, r = tf.qr(a) sess = tf.Session() print(sess.run(tf.qr(a))) sess.close() # q==>[[-0.70710678 0.57735027 -0.40824829] # [-0.70710678 -0.57735027 0.40824829] # [0. 0.57735027 0.81649658 ]] # r==>[[-1.41421356 -1.41421356 -2.82842712] # [0. 1.73205081 0.57735027] # [0. 0. 0.81649658]] # Qr(q=array([[-0.70710678, 0.57735027, -0.40824829], # [-0.70710678, -0.57735027, 0.40824829], # [ 0. , 0.57735027, 0.81649658]]), r=array([[-1.41421356, -1.41421356, -2.82842712], # [ 0. , 1.73205081, 0.57735027], # [ 0. , 0. , 0.81649658]]))
35.291667
101
0.507674
120
847
3.575
0.308333
0.167832
0.18648
0.167832
0.610723
0.610723
0.610723
0.610723
0.610723
0.610723
0
0.445351
0.276269
847
24
102
35.291667
0.254486
0.680047
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.166667
0
0
0
null
0
1
1
0
0
0
0
0
1
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
7296a309e2620b28f42f1263a0ef32d172858988
165
py
Python
APS/1-Musical/HC05-Controle-Exemplo/PC_Python/pyvjoy/__init__.py
Otofuji/embarcados
6e0cc4b657ab7c7765051308fc462c1226be22e3
[ "MIT" ]
8
2018-02-21T04:40:58.000Z
2021-02-07T17:31:51.000Z
APS/1-Musical/HC05-Controle-Exemplo/PC_Python/pyvjoy/__init__.py
Otofuji/embarcados
6e0cc4b657ab7c7765051308fc462c1226be22e3
[ "MIT" ]
1
2019-03-22T13:55:40.000Z
2019-03-22T13:55:40.000Z
APS/1-Musical/HC05-Controle-Exemplo/PC_Python/pyvjoy/__init__.py
Otofuji/embarcados
6e0cc4b657ab7c7765051308fc462c1226be22e3
[ "MIT" ]
3
2019-04-18T14:35:38.000Z
2019-07-30T22:49:53.000Z
__all__ = ['constants', 'exceptions'] from pyvjoy.constants import * from pyvjoy.exceptions import * import pyvjoy._sdk from pyvjoy.vjoydevice import VJoyDevice
18.333333
40
0.781818
19
165
6.526316
0.421053
0.241935
0
0
0
0
0
0
0
0
0
0
0.133333
165
8
41
20.625
0.867133
0
0
0
0
0
0.115152
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
72b18b2620ed6ae680a6997f8ed47fbdf218a8fc
18
py
Python
python/pcasig/gls/__init__.py
zpace/pcasig
f659d956192125f89a17ecaf59825ce301d95455
[ "BSD-3-Clause" ]
null
null
null
python/pcasig/gls/__init__.py
zpace/pcasig
f659d956192125f89a17ecaf59825ce301d95455
[ "BSD-3-Clause" ]
null
null
null
python/pcasig/gls/__init__.py
zpace/pcasig
f659d956192125f89a17ecaf59825ce301d95455
[ "BSD-3-Clause" ]
null
null
null
from .gls import *
18
18
0.722222
3
18
4.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.166667
18
1
18
18
0.866667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
f460efc834855902319ff6b75d10b9883a4dcb7b
2,154
py
Python
briefingt/plugins/briefingt.py
thriuin/oc_search_definitions
ea9a629dd1a385b687ab9ad8ccd4efbf057797ad
[ "MIT" ]
null
null
null
briefingt/plugins/briefingt.py
thriuin/oc_search_definitions
ea9a629dd1a385b687ab9ad8ccd4efbf057797ad
[ "MIT" ]
null
null
null
briefingt/plugins/briefingt.py
thriuin/oc_search_definitions
ea9a629dd1a385b687ab9ad8ccd4efbf057797ad
[ "MIT" ]
null
null
null
from django.http import HttpRequest from search.models import Search, Field, Code from SolrClient import SolrResponse def plugin_api_version(): return 1.0 def pre_search_solr_query(context: dict, solr_query: dict, request: HttpRequest, search: Search, fields: dict, codes: dict, facets: list, record_ids: str): return context, solr_query def post_search_solr_query(context: dict, solr_response: SolrResponse, solr_query: dict, request: HttpRequest, search: Search, fields: dict, codes: dict, facets: list, record_ids: str): return context, solr_response def pre_record_solr_query(context: dict, solr_query: dict, request: HttpRequest, search: Search, fields: dict, codes: dict, facets: list, record_ids: str): return context, solr_query def post_record_solr_query(context: dict, solr_response: SolrResponse, solr_query: dict, request: HttpRequest, search: Search, fields: dict, codes: dict, facets: list, record_ids: str): return context, solr_response def pre_export_solr_query(solr_query: dict, request: HttpRequest, search: Search, fields: dict, codes: dict, facets: list): return solr_query def post_export_solr_query(solr_response: SolrResponse, solr_query: dict, request: HttpRequest, search: Search, fields: dict, codes: dict, facets: list): return solr_response def pre_mlt_solr_query(context: dict, solr_query: dict, request: HttpRequest, search: Search, fields: dict, codes: dict, record_is: str): return context, solr_query def post_mlt_solr_query(context: dict, solr_response: SolrResponse, solr_query: dict, request: HttpRequest, search: Search, fields: dict, codes: dict, record_ids: str): return context, solr_response def filter_csv_record(csv_record,search: Search, fields: dict, codes: dict, format: str): return True, csv_record def load_csv_record(csv_record: dict, solr_record: dict, search: Search, fields: dict, codes: dict, format: str): if solr_record['title_en']: solr_record['title_en'] = str(solr_record['title_en']).strip() if solr_record['title_fr']: solr_record['title_fr'] = str(solr_record['title_fr']).strip() return solr_record
41.423077
185
0.759517
304
2,154
5.141447
0.151316
0.115163
0.115163
0.140755
0.734485
0.734485
0.715291
0.694818
0.621881
0.621881
0
0.001079
0.139276
2,154
51
186
42.235294
0.841963
0
0
0.206897
0
0
0.022284
0
0
0
0
0
0
1
0.37931
false
0
0.103448
0.344828
0.862069
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
5
be48b1f79a91fa4e55e175a5b969e13f5281a421
85
py
Python
dashboard/app/src/config.py
alesanmed-educational-projects/core-data-covid-project
ab43beb98dba4153320796b54a078bb6075b0fd5
[ "Unlicense" ]
null
null
null
dashboard/app/src/config.py
alesanmed-educational-projects/core-data-covid-project
ab43beb98dba4153320796b54a078bb6075b0fd5
[ "Unlicense" ]
18
2021-07-24T20:17:40.000Z
2021-08-19T09:55:01.000Z
dashboard/app/src/config.py
alesanmed-educational-projects/core-data-covid-project
ab43beb98dba4153320796b54a078bb6075b0fd5
[ "Unlicense" ]
null
null
null
import os class Config(object): BACK_URL: str = os.environ.get("BACK_URL", "")
14.166667
50
0.658824
13
85
4.153846
0.769231
0.259259
0
0
0
0
0
0
0
0
0
0
0.176471
85
5
51
17
0.771429
0
0
0
0
0
0.094118
0
0
0
0
0
0
1
0
true
0
0.333333
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
be78f950281e67de8239b2dc13318c8c53a9b507
180
py
Python
poglink/error.py
DoDoom/dodo
592844d7f8df356438f61e857cd53f7daf0dddfa
[ "MIT" ]
null
null
null
poglink/error.py
DoDoom/dodo
592844d7f8df356438f61e857cd53f7daf0dddfa
[ "MIT" ]
46
2021-12-13T01:38:07.000Z
2022-02-15T17:20:23.000Z
poglink/error.py
DoDoom/dodo
592844d7f8df356438f61e857cd53f7daf0dddfa
[ "MIT" ]
1
2022-02-22T14:26:20.000Z
2022-02-22T14:26:20.000Z
class RatesWriteError(Exception): pass class RatesFetchError(Exception): pass class RatesProcessError(Exception): pass class ConfigReadError(Exception): pass
12
35
0.744444
16
180
8.375
0.4375
0.38806
0.402985
0
0
0
0
0
0
0
0
0
0.188889
180
14
36
12.857143
0.917808
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
be8e3d7c89a8fb9149eddd97720774f83c974a61
91
py
Python
Parte1/Cap6/alien.py
fabianoflorentino/python-CursoIntensivoDePython
822288cc4b382936dde1bc647e3f8c2b925ced70
[ "Apache-2.0" ]
null
null
null
Parte1/Cap6/alien.py
fabianoflorentino/python-CursoIntensivoDePython
822288cc4b382936dde1bc647e3f8c2b925ced70
[ "Apache-2.0" ]
null
null
null
Parte1/Cap6/alien.py
fabianoflorentino/python-CursoIntensivoDePython
822288cc4b382936dde1bc647e3f8c2b925ced70
[ "Apache-2.0" ]
1
2020-02-05T13:07:08.000Z
2020-02-05T13:07:08.000Z
alien_0 = {'color': 'green', 'points': 5} print(alien_0['color']) print(alien_0['points'])
22.75
41
0.648352
14
91
4
0.5
0.321429
0.392857
0
0
0
0
0
0
0
0
0.048193
0.087912
91
4
42
22.75
0.626506
0
0
0
0
0
0.293478
0
0
0
0
0
0
1
0
false
0
0
0
0
0.666667
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
beb2738ee0d0122f13883bd2f34e5aa7b7b6b8f7
751
py
Python
forms.py
taoxen/flaskapp
c0e7464b2196117522eefd8a9a6bb1396af9b951
[ "MIT" ]
null
null
null
forms.py
taoxen/flaskapp
c0e7464b2196117522eefd8a9a6bb1396af9b951
[ "MIT" ]
null
null
null
forms.py
taoxen/flaskapp
c0e7464b2196117522eefd8a9a6bb1396af9b951
[ "MIT" ]
null
null
null
from flask_wtf import FlaskForm from wtforms import StringField, PasswordField, BooleanField from wtforms.validators import InputRequired, Email, Length class LoginForm(FlaskForm): username = StringField('username', validators=[InputRequired(), Length(min=4, max=15)]) password = PasswordField('password', validators=[InputRequired(), Length(min=8, max=80)]) remember = BooleanField('remember me') class RegisterForm(FlaskForm): email = StringField('email', validators=[InputRequired(), Email(message='Invalid Email Address'), Length(max=50)]) username = StringField('username', validators=[InputRequired(), Length(min=4, max=15)]) password = PasswordField('password', validators=[InputRequired(), Length(min=8, max=80)])
44.176471
118
0.747004
81
751
6.91358
0.37037
0.205357
0.207143
0.228571
0.471429
0.471429
0.471429
0.471429
0.471429
0.471429
0
0.021021
0.113182
751
16
119
46.9375
0.81982
0
0
0.363636
0
0
0.092123
0
0
0
0
0
0
1
0
false
0.272727
0.272727
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
5
fe2c946a2fe863eca9192222eb0c782852e63600
131
py
Python
solutions/575.distribute-candies.242494828.ac.py
satu0king/Leetcode-Solutions
2edff60d76c2898d912197044f6284efeeb34119
[ "MIT" ]
78
2020-10-22T11:31:53.000Z
2022-02-22T13:27:49.000Z
solutions/575.distribute-candies.242494828.ac.py
satu0king/Leetcode-Solutions
2edff60d76c2898d912197044f6284efeeb34119
[ "MIT" ]
null
null
null
solutions/575.distribute-candies.242494828.ac.py
satu0king/Leetcode-Solutions
2edff60d76c2898d912197044f6284efeeb34119
[ "MIT" ]
26
2020-10-23T15:10:44.000Z
2021-11-07T16:13:50.000Z
class Solution: def distributeCandies(self, candies: List[int]) -> int: return min(len(candies)//2, len(set(candies)))
32.75
59
0.664122
17
131
5.117647
0.764706
0
0
0
0
0
0
0
0
0
0
0.009259
0.175573
131
3
60
43.666667
0.796296
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
fe4f9a07c11c716383e8794c8172848d872508fe
229
py
Python
laguinho/errors/handlers.py
MarioHdpz/laguinho-api
36ad947e1efe98b0842889d2baf947ed349234b4
[ "MIT" ]
27
2019-02-09T04:25:23.000Z
2020-05-22T18:34:06.000Z
laguinho/errors/handlers.py
MarioHdpz/laguinho-api
36ad947e1efe98b0842889d2baf947ed349234b4
[ "MIT" ]
51
2019-02-15T03:17:53.000Z
2021-05-10T23:44:40.000Z
laguinho/errors/handlers.py
RonnanSouza/laguinho-api
cef9ee0c4794597796607b47b72c8a4ae009ca0f
[ "MIT" ]
28
2019-02-15T03:16:03.000Z
2020-11-25T15:12:41.000Z
from flask import Blueprint, jsonify from marshmallow import ValidationError errors = Blueprint('errors', __name__) @errors.app_errorhandler(ValidationError) def handle_validation_error(e): return jsonify(e.messages), 422
22.9
41
0.80786
27
229
6.592593
0.703704
0
0
0
0
0
0
0
0
0
0
0.014778
0.113537
229
9
42
25.444444
0.862069
0
0
0
0
0
0.026201
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0.166667
0.666667
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
fea673a90cc2b1dcca65b3866b7e4f8d64113c3a
230
py
Python
app/config.py
bzg/graph-explorer
86208e091a4f259a3a2076a811ac0e89b18787ee
[ "MIT" ]
null
null
null
app/config.py
bzg/graph-explorer
86208e091a4f259a3a2076a811ac0e89b18787ee
[ "MIT" ]
null
null
null
app/config.py
bzg/graph-explorer
86208e091a4f259a3a2076a811ac0e89b18787ee
[ "MIT" ]
null
null
null
class Config(object): pass class ProductionConfig(Config): ELASTICSEARCH_HOST = 'elasticsearch' JANUS_HOST = 'janus' class DevelopmentConfig(Config): ELASTICSEARCH_HOST = '127.0.0.1' JANUS_HOST = '127.0.0.1'
20.909091
40
0.704348
28
230
5.642857
0.428571
0.240506
0.291139
0.113924
0.126582
0
0
0
0
0
0
0.063492
0.178261
230
10
41
23
0.772487
0
0
0
0
0
0.156522
0
0
0
0
0
0
1
0
false
0.125
0
0
0.875
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
5
22905ca4b8515545f7706514c2408df9c55ab0f1
330
py
Python
rameniaapp/migrations/0017_merge_20201019_0448.py
awlane/ramenia
6bf8e75a1f279ac584daa4ee19927ffccaa67551
[ "MIT" ]
null
null
null
rameniaapp/migrations/0017_merge_20201019_0448.py
awlane/ramenia
6bf8e75a1f279ac584daa4ee19927ffccaa67551
[ "MIT" ]
null
null
null
rameniaapp/migrations/0017_merge_20201019_0448.py
awlane/ramenia
6bf8e75a1f279ac584daa4ee19927ffccaa67551
[ "MIT" ]
null
null
null
# Generated by Django 3.1.2 on 2020-10-19 04:48 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('rameniaapp', '0016_auto_20201013_2359'), ('rameniaapp', '0016_auto_20201015_0207'), ('rameniaapp', '0016_auto_20201015_1928'), ] operations = [ ]
20.625
50
0.654545
38
330
5.447368
0.736842
0.202899
0.26087
0.251208
0
0
0
0
0
0
0
0.245136
0.221212
330
15
51
22
0.560311
0.136364
0
0
1
0
0.349823
0.243816
0
0
0
0
0
1
0
false
0
0.111111
0
0.444444
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
229a31253e0b7fbfa7f873389ada2257e43bded8
42
py
Python
grortir/main/logging/__init__.py
wojtekPi/grortir
0ef8b495527a4f3861e5df5db756d0ee3ed4aa6f
[ "MIT" ]
null
null
null
grortir/main/logging/__init__.py
wojtekPi/grortir
0ef8b495527a4f3861e5df5db756d0ee3ed4aa6f
[ "MIT" ]
null
null
null
grortir/main/logging/__init__.py
wojtekPi/grortir
0ef8b495527a4f3861e5df5db756d0ee3ed4aa6f
[ "MIT" ]
null
null
null
"""Package contains tools for logging."""
21
41
0.714286
5
42
6
1
0
0
0
0
0
0
0
0
0
0
0
0.119048
42
1
42
42
0.810811
0.833333
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
22a253fa924d484e252300626b18c4ca9bf92dd3
34
py
Python
problema9/main.py
aaronorduna/PC3
08e571430d8e301a1682a14851c1675613c1a16e
[ "Apache-2.0" ]
null
null
null
problema9/main.py
aaronorduna/PC3
08e571430d8e301a1682a14851c1675613c1a16e
[ "Apache-2.0" ]
null
null
null
problema9/main.py
aaronorduna/PC3
08e571430d8e301a1682a14851c1675613c1a16e
[ "Apache-2.0" ]
null
null
null
import funcion funcion.adivinar()
17
18
0.823529
4
34
7
0.75
0
0
0
0
0
0
0
0
0
0
0
0.088235
34
2
18
17
0.903226
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
22c34d17952868b45d710f301a3d14a3d907d207
147
py
Python
src/environments/layouts/small.py
grockious/lcrl
65c528e0d19600d83a1158e2e2990fe45aa7918f
[ "MIT" ]
18
2020-08-03T20:26:45.000Z
2022-03-16T14:50:05.000Z
src/environments/layouts/small.py
grockious/lcrl
65c528e0d19600d83a1158e2e2990fe45aa7918f
[ "MIT" ]
1
2020-08-30T13:02:42.000Z
2020-09-29T17:38:18.000Z
src/environments/layouts/small.py
grockious/lcrl
65c528e0d19600d83a1158e2e2990fe45aa7918f
[ "MIT" ]
4
2021-01-09T02:35:52.000Z
2021-11-04T02:49:14.000Z
%%%%%%%%%%%%%%%%%%%% %......%G G%......% %.%..%.%%%% %.%..%.% %.%o............o%.% %.%..%.%%%%%%.%..%.% %........P.........% %%%%%%%%%%%%%%%%%%%%
18.375
20
0.034014
5
147
1
0.6
0
0
0
0
0
0
0
0
0
0
0
0.068027
147
7
21
21
0.036496
0
0
0.285714
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
22efdd19086ec7277907233a3c0993b4388d8aaa
9,172
py
Python
pyscf/pbc/df/test/test_ft_ao.py
KMCzajkowski/pyscf
e8af41d910cc0d3963655120c0b689590ad978e7
[ "BSD-2-Clause" ]
null
null
null
pyscf/pbc/df/test/test_ft_ao.py
KMCzajkowski/pyscf
e8af41d910cc0d3963655120c0b689590ad978e7
[ "BSD-2-Clause" ]
null
null
null
pyscf/pbc/df/test/test_ft_ao.py
KMCzajkowski/pyscf
e8af41d910cc0d3963655120c0b689590ad978e7
[ "BSD-2-Clause" ]
1
2018-12-06T03:10:50.000Z
2018-12-06T03:10:50.000Z
#!/usr/bin/env python import unittest import numpy from pyscf.pbc import gto as pgto from pyscf.pbc import dft as pdft from pyscf.pbc.df import ft_ao from pyscf.pbc import tools from pyscf import lib cell = pgto.Cell() cell.atom = ''' He1 1.3 .2 .3 He2 .1 .1 1.1 ''' cell.basis = {'He1': 'sto3g', 'He2': 'ccpvdz'} cell.gs = (15,)*3 cell.a = numpy.diag([2.2, 1.9, 2.]) cell.build() cell1 = pgto.Cell() cell1.atom = ''' He 1.3 .2 .3 He .1 .1 1.1 ''' cell1.basis = {'He': [[0, [0.8, 1]], [1, [0.6, 1]] ]} cell1.gs = [8]*3 cell1.a = numpy.array(([2.0, .9, 0. ], [0.1, 1.9, 0.4], [0.8, 0 , 2.1])) cell1.build() def finger(a): w = numpy.cos(numpy.arange(a.size)) return numpy.dot(w, a.ravel()) class KnowValues(unittest.TestCase): def test_ft_ao(self): coords = pdft.gen_grid.gen_uniform_grids(cell) aoR = pdft.numint.eval_ao(cell, coords) ngs, nao = aoR.shape ref = numpy.asarray([tools.fft(aoR[:,i], cell.gs) for i in range(nao)]) ref = ref.T * (cell.vol/ngs) dat = ft_ao.ft_ao(cell, cell.Gv) self.assertAlmostEqual(numpy.linalg.norm(ref[:,0]-dat[:,0]) , 8.4358614794095722e-11, 9) self.assertAlmostEqual(numpy.linalg.norm(ref[:,1]-dat[:,1]) , 0.0041669297531642616 , 4) self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:]-dat[:,2:]), 5.8677286005879366e-14, 9) coords = pdft.gen_grid.gen_uniform_grids(cell1) aoR = pdft.numint.eval_ao(cell1, coords) ngs, nao = aoR.shape ref = numpy.asarray([tools.fft(aoR[:,i], cell1.gs) for i in range(nao)]) ref = ref.T * (cell1.vol/ngs) dat = ft_ao.ft_ao(cell1, cell1.Gv) self.assertAlmostEqual(numpy.linalg.norm(ref[:,0]-dat[:,0]) , 0, 5) self.assertAlmostEqual(numpy.linalg.norm(ref[:,1]-dat[:,1]) , 0, 3) self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:]-dat[:,2:]), 0, 3) def test_ft_ao_with_kpts(self): numpy.random.seed(1) kpt = numpy.random.random(3) coords = pdft.gen_grid.gen_uniform_grids(cell) aoR = pdft.numint.eval_ao(cell, coords, kpt=kpt) ngs, nao = aoR.shape expmikr = numpy.exp(-1j*numpy.dot(coords,kpt)) ref = numpy.asarray([tools.fftk(aoR[:,i], cell.gs, expmikr) for i in range(nao)]) ref = ref.T * (cell.vol/ngs) dat = ft_ao.ft_ao(cell, cell.Gv, kpt=kpt) self.assertAlmostEqual(numpy.linalg.norm(ref[:,0]-dat[:,0]) , 1.3359899490499813e-10, 9) self.assertAlmostEqual(numpy.linalg.norm(ref[:,1]-dat[:,1]) , 0.0042404556036939756 , 4) self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:]-dat[:,2:]), 4.8856357999633564e-14, 9) coords = pdft.gen_grid.gen_uniform_grids(cell1) aoR = pdft.numint.eval_ao(cell1, coords, kpt=kpt) ngs, nao = aoR.shape expmikr = numpy.exp(-1j*numpy.dot(coords,kpt)) ref = numpy.asarray([tools.fftk(aoR[:,i], cell1.gs, expmikr) for i in range(nao)]) ref = ref.T * (cell1.vol/ngs) dat = ft_ao.ft_ao(cell1, cell1.Gv, kpt=kpt) self.assertAlmostEqual(numpy.linalg.norm(ref[:,0]-dat[:,0]) , 0, 5) self.assertAlmostEqual(numpy.linalg.norm(ref[:,1]-dat[:,1]) , 0, 3) self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:]-dat[:,2:]), 0, 3) def test_ft_aoao(self): #coords = pdft.gen_grid.gen_uniform_grids(cell) #aoR = pdft.numint.eval_ao(cell, coords) #ngs, nao = aoR.shape #ref = numpy.asarray([tools.fft(aoR[:,i].conj()*aoR[:,j], cell.gs) # for i in range(nao) for j in range(nao)]) #ref = ref.reshape(nao,nao,-1).transpose(2,0,1) * (cell.vol/ngs) #dat = ft_ao.ft_aopair(cell, cell.Gv, aosym='s1hermi') #self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,0]-dat[:,0,0]) , 0, 5) #self.assertAlmostEqual(numpy.linalg.norm(ref[:,1,1]-dat[:,1,1]) , 0.02315483195832373, 4) #self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,2:]-dat[:,2:,2:]), 0, 9) #self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,2:]-dat[:,0,2:]) , 0, 9) #self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,0]-dat[:,2:,0]) , 0, 9) #idx = numpy.tril_indices(nao) #ref = dat[:,idx[0],idx[1]] #dat = ft_ao.ft_aopair(cell, cell.Gv, aosym='s2') #self.assertAlmostEqual(abs(dat-ref).sum(), 0, 9) coords = pdft.gen_grid.gen_uniform_grids(cell1) Gv, Gvbase, kws = cell1.get_Gv_weights(cell1.gs) b = cell1.reciprocal_vectors() gxyz = lib.cartesian_prod([numpy.arange(len(x)) for x in Gvbase]) dat = ft_ao.ft_aopair(cell1, cell1.Gv, aosym='s1', b=b, gxyz=gxyz, Gvbase=Gvbase) self.assertAlmostEqual(finger(dat), 1.5666516306798806+1.953555017583245j, 9) dat = ft_ao.ft_aopair(cell1, cell1.Gv, aosym='s2', b=b, gxyz=gxyz, Gvbase=Gvbase) self.assertAlmostEqual(finger(dat), -0.85276967757297917+1.0378751267506394j, 9) dat = ft_ao.ft_aopair(cell1, cell1.Gv, aosym='s1hermi', b=b, gxyz=gxyz, Gvbase=Gvbase) self.assertAlmostEqual(finger(dat), 1.5666516306798806+1.953555017583245j, 9) aoR = pdft.numint.eval_ao(cell1, coords) ngs, nao = aoR.shape ref = numpy.asarray([tools.fft(aoR[:,i].conj()*aoR[:,j], cell1.gs) for i in range(nao) for j in range(nao)]) ref = ref.reshape(nao,nao,-1).transpose(2,0,1) * (cell1.vol/ngs) self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,0]-dat[:,0,0]) , 0, 7) self.assertAlmostEqual(numpy.linalg.norm(ref[:,1,1]-dat[:,1,1]) , 0, 7) self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,2:]-dat[:,2:,2:]), 0, 7) self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,2:]-dat[:,0,2:]) , 0, 7) self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,0]-dat[:,2:,0]) , 0, 7) idx = numpy.tril_indices(nao) ref = dat[:,idx[0],idx[1]] dat = ft_ao.ft_aopair(cell1, cell1.Gv, aosym='s2') self.assertAlmostEqual(abs(dat-ref).sum(), 0, 9) def test_ft_aoao_with_kpts(self): numpy.random.seed(1) kpti, kptj = numpy.random.random((2,3)) dat = ft_ao.ft_aopair(cell, cell.Gv, kpti_kptj=(kpti,kptj)) self.assertAlmostEqual(finger(dat), -0.80184732435570638+2.4078835207597176j, 9) coords = pdft.gen_grid.gen_uniform_grids(cell) aoi = pdft.numint.eval_ao(cell, coords, kpt=kpti) aoj = pdft.numint.eval_ao(cell, coords, kpt=kptj) ngs, nao = aoj.shape q = kptj - kpti expmikr = numpy.exp(-1j*numpy.dot(coords,q)) ref = numpy.asarray([tools.fftk(aoi[:,i].conj()*aoj[:,j], cell.gs, expmikr) for i in range(nao) for j in range(nao)]) ref = ref.reshape(nao,nao,-1).transpose(2,0,1) * (cell.vol/ngs) self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,0]-dat[:,0,0]) , 0, 5) self.assertAlmostEqual(numpy.linalg.norm(ref[:,1,1]-dat[:,1,1]) , 0.023225471785938184 , 4) self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,2:]-dat[:,2:,2:]), 0, 9) self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,2:]-dat[:,0,2:]) , 0, 9) self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,0]-dat[:,2:,0]) , 0, 9) coords = pdft.gen_grid.gen_uniform_grids(cell1) aoi = pdft.numint.eval_ao(cell1, coords, kpt=kpti) aoj = pdft.numint.eval_ao(cell1, coords, kpt=kptj) ngs, nao = aoj.shape q = kptj - kpti dat = ft_ao.ft_aopair(cell1, cell1.Gv, kpti_kptj=(kpti,kptj), q=q) self.assertAlmostEqual(finger(dat), 0.72664436503332241+3.2542145296611373j, 9) expmikr = numpy.exp(-1j*numpy.dot(coords,q)) ref = numpy.asarray([tools.fftk(aoi[:,i].conj()*aoj[:,j], cell1.gs, expmikr) for i in range(nao) for j in range(nao)]) ref = ref.reshape(nao,nao,-1).transpose(2,0,1) * (cell1.vol/ngs) self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,0]-dat[:,0,0]) , 0, 7) self.assertAlmostEqual(numpy.linalg.norm(ref[:,1,1]-dat[:,1,1]) , 0, 7) self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,2:]-dat[:,2:,2:]), 0, 7) self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,2:]-dat[:,0,2:]) , 0, 7) self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,0]-dat[:,2:,0]) , 0, 7) def test_ft_aoao_with_kpts1(self): numpy.random.seed(1) kpti, kptj = kpts = numpy.random.random((2,3)) Gv = cell.get_Gv([5]*3) q = numpy.random.random(3) dat = ft_ao._ft_aopair_kpts(cell, Gv, q=q, kptjs=kpts) self.assertAlmostEqual(finger(dat[0]), (2.3753953914129382-2.5365192689115088j), 9) self.assertAlmostEqual(finger(dat[1]), (2.4951510097641840-3.1990956672116355j), 9) dat = ft_ao.ft_aopair(cell, Gv) self.assertAlmostEqual(finger(dat), (1.2534723618134684+1.830086071817564j), 9) if __name__ == '__main__': print('Full Tests for ft_ao') unittest.main()
50.395604
103
0.593109
1,370
9,172
3.89854
0.115328
0.165138
0.155776
0.191724
0.798914
0.750047
0.743119
0.713162
0.681146
0.667291
0
0.103114
0.222852
9,172
181
104
50.674033
0.646184
0.098888
0
0.369128
0
0
0.019399
0
0
0
0
0
0.241611
1
0.040268
false
0
0.04698
0
0.100671
0.006711
0
0
0
null
0
0
1
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
22f6417811cf9ee26669b00ee3b6fb9129d2b62c
610
py
Python
grab/spider/queue_backend/base.py
gonchik/grab
d007afb7aeab63036d494f3b2704be96ea570810
[ "MIT" ]
null
null
null
grab/spider/queue_backend/base.py
gonchik/grab
d007afb7aeab63036d494f3b2704be96ea570810
[ "MIT" ]
null
null
null
grab/spider/queue_backend/base.py
gonchik/grab
d007afb7aeab63036d494f3b2704be96ea570810
[ "MIT" ]
null
null
null
""" QueueInterface defines interface of queue backend. """ class QueueInterface(object): def __init__(self, spider_name, **kwargs): pass def put(self, task, priority): raise NotImplementedError def get(self): """ Return `Task` object or raise `Queue.Empty` exception @returns: `grab.spider.task.Task` object @raises: `Queue.Empty` exception """ raise NotImplementedError def size(self): raise NotImplementedError def clear(self): """Remove all tasks from the queue.""" raise NotImplementedError
21.785714
61
0.622951
62
610
6.048387
0.564516
0.256
0.216
0
0
0
0
0
0
0
0
0
0.277049
610
27
62
22.592593
0.85034
0.347541
0
0.363636
0
0
0
0
0
0
0
0
0
1
0.454545
false
0.090909
0
0
0.545455
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
5
a3ace384a40830c38abbd075d7c7cce9f270e2ab
19
py
Python
my_file2.py
adirir/My_repo
0b0d82e4ab9740c3c3b29d612fce062e3e5f9fbd
[ "Apache-2.0" ]
2
2018-11-08T09:20:42.000Z
2021-07-15T18:12:06.000Z
dir2/new_file2.py
ktbyers/pyneta
1690ce5a6ddb640198ccf3bca26f32a65d772b92
[ "Apache-2.0" ]
null
null
null
dir2/new_file2.py
ktbyers/pyneta
1690ce5a6ddb640198ccf3bca26f32a65d772b92
[ "Apache-2.0" ]
null
null
null
print("something")
9.5
18
0.736842
2
19
7
1
0
0
0
0
0
0
0
0
0
0
0
0.052632
19
1
19
19
0.777778
0
0
0
0
0
0.473684
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
a3fa476d28827e984e4a0453bd4aae7d0b827b64
46
py
Python
climoji/__main__.py
psincraian/climoji
097a4f647efb5d086588fc158e4c557ce1453d6c
[ "MIT" ]
18
2018-02-09T18:52:08.000Z
2022-02-21T08:10:05.000Z
climoji/__main__.py
psincraian/climoji
097a4f647efb5d086588fc158e4c557ce1453d6c
[ "MIT" ]
3
2018-02-09T18:10:48.000Z
2018-03-18T18:40:03.000Z
climoji/__main__.py
psincraian/climoji
097a4f647efb5d086588fc158e4c557ce1453d6c
[ "MIT" ]
1
2020-09-02T11:33:22.000Z
2020-09-02T11:33:22.000Z
from climoji.infrastructure import cli cli()
11.5
38
0.804348
6
46
6.166667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.130435
46
3
39
15.333333
0.925
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
4a29e62df02b36cd0d1e08c3973ef7781fd7afd8
7,490
py
Python
server/tests/steps/test_filter.py
JeremyJacquemont/weaverbird
e04ab6f9c8381986ab71078e5199ece7a875e743
[ "BSD-3-Clause" ]
4
2019-03-01T11:26:12.000Z
2019-04-23T16:48:52.000Z
server/tests/steps/test_filter.py
JeremyJacquemont/weaverbird
e04ab6f9c8381986ab71078e5199ece7a875e743
[ "BSD-3-Clause" ]
279
2019-02-15T10:54:22.000Z
2019-10-19T19:41:48.000Z
server/tests/steps/test_filter.py
JeremyJacquemont/weaverbird
e04ab6f9c8381986ab71078e5199ece7a875e743
[ "BSD-3-Clause" ]
1
2019-05-28T18:44:15.000Z
2019-05-28T18:44:15.000Z
import pytest from pandas import DataFrame from tests.utils import assert_dataframes_equals from weaverbird.backends.pandas_executor.steps.filter import execute_filter from weaverbird.pipeline.conditions import ComparisonCondition from weaverbird.pipeline.steps import FilterStep @pytest.fixture def sample_df(): return DataFrame({'colA': ['toto', 'tutu', 'tata'], 'colB': [1, 2, 3], 'colC': [100, 50, 25]}) @pytest.mark.parametrize('value', [0, False, True, 0.0, 1, 1.5, '', '0', None, [], [0], '0.0']) def test_simple_condition_valid_values(value) -> None: # Ensure pydantic cast does not change types for `value` field: sc = ComparisonCondition(column='x', operator='eq', value=value) result_value = sc.value assert value == result_value assert type(value) == type(result_value) def test_simple_eq_filter(sample_df): step = FilterStep( name='filter', condition={ 'column': 'colA', 'operator': 'eq', 'value': 'tutu', }, ) df_result = execute_filter(step, sample_df) assert_dataframes_equals(df_result, DataFrame({'colA': ['tutu'], 'colB': [2], 'colC': [50]})) def test_simple_ne_filter(sample_df): step = FilterStep( name='filter', condition={ 'column': 'colA', 'operator': 'ne', 'value': 'tutu', }, ) df_result = execute_filter(step, sample_df) assert_dataframes_equals( df_result, DataFrame({'colA': ['toto', 'tata'], 'colB': [1, 3], 'colC': [100, 25]}) ) def test_simple_gt_filter(sample_df): step = FilterStep( name='filter', condition={ 'column': 'colB', 'operator': 'gt', 'value': 2, }, ) df_result = execute_filter(step, sample_df) assert_dataframes_equals(df_result, DataFrame({'colA': ['tata'], 'colB': [3], 'colC': [25]})) def test_simple_ge_filter(sample_df): step = FilterStep( name='filter', condition={ 'column': 'colB', 'operator': 'ge', 'value': 2, }, ) df_result = execute_filter(step, sample_df) assert_dataframes_equals( df_result, DataFrame({'colA': ['tutu', 'tata'], 'colB': [2, 3], 'colC': [50, 25]}) ) def test_simple_lt_filter(sample_df): step = FilterStep( name='filter', condition={ 'column': 'colB', 'operator': 'lt', 'value': 2, }, ) df_result = execute_filter(step, sample_df) assert_dataframes_equals(df_result, DataFrame({'colA': ['toto'], 'colB': [1], 'colC': [100]})) def test_simple_le_filter(sample_df): step = FilterStep( name='filter', condition={ 'column': 'colB', 'operator': 'le', 'value': 2, }, ) df_result = execute_filter(step, sample_df) assert_dataframes_equals( df_result, DataFrame({'colA': ['toto', 'tutu'], 'colB': [1, 2], 'colC': [100, 50]}) ) def test_simple_in_filter(sample_df): step = FilterStep( name='filter', condition={ 'column': 'colA', 'operator': 'in', 'value': ['toto', 'tutu'], }, ) df_result = execute_filter(step, sample_df) assert_dataframes_equals( df_result, DataFrame({'colA': ['toto', 'tutu'], 'colB': [1, 2], 'colC': [100, 50]}) ) def test_simple_nin_filter(sample_df): step = FilterStep( name='filter', condition={ 'column': 'colA', 'operator': 'nin', 'value': ['toto', 'tutu'], }, ) df_result = execute_filter(step, sample_df) assert_dataframes_equals(df_result, DataFrame({'colA': ['tata'], 'colB': [3], 'colC': [25]})) def test_simple_null_filter(sample_df): step = FilterStep( name='filter', condition={ 'column': 'colA', 'operator': 'isnull', }, ) df_result = execute_filter(step, sample_df) assert df_result.empty def test_simple_notnull_filter(sample_df): step = FilterStep( name='filter', condition={ 'column': 'colA', 'operator': 'notnull', }, ) df_result = execute_filter(step, sample_df) assert_dataframes_equals(df_result, sample_df) def test_simple_matches_filter(sample_df): step = FilterStep( name='filter', condition={ 'column': 'colA', 'operator': 'matches', 'value': 'a.a', }, ) df_result = execute_filter(step, sample_df) assert_dataframes_equals(df_result, DataFrame({'colA': ['tata'], 'colB': [3], 'colC': [25]})) def test_simple_notmatches_filter(sample_df): step = FilterStep( name='filter', condition={ 'column': 'colA', 'operator': 'notmatches', 'value': 'a.a', }, ) df_result = execute_filter(step, sample_df) assert_dataframes_equals( df_result, DataFrame({'colA': ['toto', 'tutu'], 'colB': [1, 2], 'colC': [100, 50]}) ) def test_and_logical_conditions(sample_df): step = FilterStep( name='filter', condition={ 'and': [ { 'column': 'colB', 'operator': 'le', 'value': 2, }, { 'column': 'colC', 'operator': 'gt', 'value': 75, }, ] }, ) df_result = execute_filter(step, sample_df) assert_dataframes_equals(df_result, DataFrame({'colA': ['toto'], 'colB': [1], 'colC': [100]})) def test_or_logical_conditions(sample_df): step = FilterStep( name='filter', condition={ 'or': [ { 'column': 'colA', 'operator': 'eq', 'value': 'toto', }, { 'column': 'colC', 'operator': 'lt', 'value': 33, }, ] }, ) df_result = execute_filter(step, sample_df) assert_dataframes_equals( df_result, DataFrame({'colA': ['toto', 'tata'], 'colB': [1, 3], 'colC': [100, 25]}) ) def test_nested_logical_conditions(sample_df): step = FilterStep( name='filter', condition={ 'and': [ { 'or': [ { 'column': 'colA', 'operator': 'eq', 'value': 'toto', }, { 'column': 'colC', 'operator': 'lt', 'value': 33, }, ] }, {'column': 'colB', 'operator': 'gt', 'value': 2}, ] }, ) df_result = execute_filter(step, sample_df) assert_dataframes_equals(df_result, DataFrame({'colA': ['tata'], 'colB': [3], 'colC': [25]})) def test_benchmark_filter(benchmark): big_df = DataFrame({'value': list(range(1000))}) step = FilterStep(name='filter', condition={'column': 'value', 'operator': 'lt', 'value': 20}) result = benchmark(execute_filter, step, big_df) assert len(result) == 20
26.654804
98
0.506275
733
7,490
4.946794
0.129604
0.070601
0.079426
0.105902
0.735521
0.73497
0.716492
0.716492
0.705736
0.689741
0
0.020959
0.337517
7,490
280
99
26.75
0.709794
0.008144
0
0.546256
0
0
0.125353
0
0
0
0
0
0.0837
1
0.079295
false
0
0.026432
0.004405
0.110132
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4a5562306de75e3db48f45552499c6446979364c
98
py
Python
accounts/admin.py
anil-allipilli/File-Transfer
b4ee321c3701a74c92590095449a89e3c2e7647a
[ "MIT" ]
null
null
null
accounts/admin.py
anil-allipilli/File-Transfer
b4ee321c3701a74c92590095449a89e3c2e7647a
[ "MIT" ]
null
null
null
accounts/admin.py
anil-allipilli/File-Transfer
b4ee321c3701a74c92590095449a89e3c2e7647a
[ "MIT" ]
null
null
null
from django.contrib import admin from accounts.models import MyUser admin.site.register(MyUser)
16.333333
34
0.826531
14
98
5.785714
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.112245
98
5
35
19.6
0.931034
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4a558dccc72070bd1d95fd5b3984aaad790968f8
59
py
Python
setup.py
dsunivie/deeperwin
83281a74250cd3548d75ee170d59fcb1ac584ba6
[ "MIT" ]
10
2021-09-27T12:47:17.000Z
2022-01-29T08:10:50.000Z
setup.py
dsunivie/deeperwin
83281a74250cd3548d75ee170d59fcb1ac584ba6
[ "MIT" ]
2
2022-02-22T10:31:30.000Z
2022-02-25T13:20:16.000Z
setup.py
mdsunivie/deeperwin
83281a74250cd3548d75ee170d59fcb1ac584ba6
[ "MIT" ]
2
2022-01-27T14:52:49.000Z
2022-02-04T16:45:52.000Z
import setuptools from distutils.core import setup setup()
14.75
32
0.830508
8
59
6.125
0.75
0
0
0
0
0
0
0
0
0
0
0
0.118644
59
4
33
14.75
0.942308
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5