hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7c63a02f8a8760ebf799987385880dce0b8ed958
| 234
|
py
|
Python
|
basen/__init__.py
|
wernersbacher/basen
|
2218a14ada3918771628180b2e3c8299b4ce2331
|
[
"MIT"
] | null | null | null |
basen/__init__.py
|
wernersbacher/basen
|
2218a14ada3918771628180b2e3c8299b4ce2331
|
[
"MIT"
] | null | null | null |
basen/__init__.py
|
wernersbacher/basen
|
2218a14ada3918771628180b2e3c8299b4ce2331
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2017-2019 by Vd.
# This file is part of BaseN package.
# BaseN is released under the MIT License (see LICENSE).
from .basen import BaseN
from .int2base import int2base, base2int
def version():
return "2019.03"
| 19.5
| 56
| 0.722222
| 36
| 234
| 4.694444
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089947
| 0.192308
| 234
| 11
| 57
| 21.272727
| 0.804233
| 0.517094
| 0
| 0
| 0
| 0
| 0.06422
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 4
|
7ce44aa0c07ff7e7e525ede6fb620556d9017f79
| 1,142
|
py
|
Python
|
tests/test_user.py
|
Kevson102/Phoenix-Blogs
|
78e2728cf050752ad45957887eb52067eda718dd
|
[
"MIT"
] | null | null | null |
tests/test_user.py
|
Kevson102/Phoenix-Blogs
|
78e2728cf050752ad45957887eb52067eda718dd
|
[
"MIT"
] | null | null | null |
tests/test_user.py
|
Kevson102/Phoenix-Blogs
|
78e2728cf050752ad45957887eb52067eda718dd
|
[
"MIT"
] | null | null | null |
import unittest # imports the unittest module
from app.models import User # imports the users class from the user.py file
class TestUsers(unittest.TestCase):
'''
Test class that defines the test cases for the behaviour of the Users class.
Args:
unittest.TestCase: Test case class that helps in creating test cases.
'''
def setUp(self):
'''
Setup method to run before each test case.
'''
self.new_user = User(12, "Kevson", "kevson@gmail.com", "qwrttyy", "my name is kevson", "CsGitituComp")
def tearDown(self):
'''
Method that cleans up after every test case
'''
User.user_list = []
def test_init(self):
self.assertEqual(self.new_user.id, 12)
self.assertEqual(self.new_user.username, "Kevson")
self.assertEqual(self.new_user.email, "kevson@gmail.com")
self.assertEqual(self.new_user.profile_pic_path, "qwrttyy")
self.assertEqual(self.new_user.user_bio, "my name is kevson")
self.assertEqual(self.new_user.pass_secure, "CsGitituComp")
# if __name__ == '__main__':
# unittest.main()
| 35.6875
| 110
| 0.652364
| 150
| 1,142
| 4.826667
| 0.426667
| 0.06768
| 0.106354
| 0.18232
| 0.232044
| 0.088398
| 0
| 0
| 0
| 0
| 0
| 0.004603
| 0.239054
| 1,142
| 32
| 111
| 35.6875
| 0.828539
| 0.317863
| 0
| 0
| 0
| 0
| 0.164074
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.214286
| false
| 0.071429
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
6b1b5941fddeb76d9b8c358b09292f101f5fe074
| 74
|
py
|
Python
|
CodeUp/6023.py
|
chae-heechan/Algorithm_Study
|
183a77e2cfe352cd82fb5e988b493082529a73dd
|
[
"MIT"
] | null | null | null |
CodeUp/6023.py
|
chae-heechan/Algorithm_Study
|
183a77e2cfe352cd82fb5e988b493082529a73dd
|
[
"MIT"
] | null | null | null |
CodeUp/6023.py
|
chae-heechan/Algorithm_Study
|
183a77e2cfe352cd82fb5e988b493082529a73dd
|
[
"MIT"
] | null | null | null |
# 시분초 입력받아 분만 출력하기
hour, minute, second = input().split(":")
print(minute)
| 24.666667
| 41
| 0.675676
| 11
| 74
| 4.545455
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 74
| 3
| 42
| 24.666667
| 0.78125
| 0.216216
| 0
| 0
| 0
| 0
| 0.017544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
6b20806f78a3c28b65e2e24005e5e3b25de861ce
| 7,563
|
py
|
Python
|
SLpackage/private/pacbio/pythonpkgs/pbsvtools/lib/python2.7/site-packages/pbsv1/independent/repeats.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 5
|
2022-02-20T07:10:02.000Z
|
2022-03-18T17:47:53.000Z
|
SLpackage/private/pacbio/pythonpkgs/pbsvtools/lib/python2.7/site-packages/pbsv1/independent/repeats.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | null | null | null |
SLpackage/private/pacbio/pythonpkgs/pbsvtools/lib/python2.7/site-packages/pbsv1/independent/repeats.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
REPEATS_CONTENT = """\
>ALU
GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGAGGA
TTGCTTGAGCCCAGGAGTTCGAGACCAGCCTGGGCAACATAGCGAGACCCCGTCTCTACA
AAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAGTCCCAGCTACTCGGGAG
GCTGAGGCAGGAGGATCGCTTGAGCCCAGGAGTTCGAGGCTGCAGTGAGCTATGATCGCG
CCACTGCACTCCAGCCTGGGCGACAGAGCGAGACCCTGTCTCAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAA
>L1
GGGGGAGGAGCCAAGATGGCCGAATAGGAACAGCTCCGGTCTACAGCTCCCAGCGTGAGC
GACGCAGAAGACGGGTGATTTCTGCATTTCCAACTGAGGTACCAGGTTCATCTCACTGGG
GAGTGCCAGACAGTGGGCGCAGGACAGTGGGTGCAGCGCACCGTGCGTGAGCCGAAGCAG
GGCGAGGCATCGCCTCACCCGGGAAGCGCAAGGGGTCAGGGAATTCCCTTTCCTAGTCAA
AGAAAGGGGTGACAGACGGCACCTGGAAAATCGGGTCACTCCCGCCCTAATACTGCGCTT
TTCCGACGGGCTTAAAAAACGGCGCACCAGGAGATTATATCCCGCACCTGGCTCGGAGGG
TCCTACGCCCACGGAGTCTCGCTGATTGCTAGCACAGCAGTCCGAGATCAAACTGCAAGG
CGGCAGCGAGGCTGGGGGAGGGGCGCCCGCCATTGCCCAGGCTTGATTAGGTAAACAAAG
CGGCCGGGAAGCTCGAACTGGGTGGAGCCCACCACAGCTCAAGGAGGCCTGCCTGCCTCT
GTAGGCTCCACCTCTGGGGGCAGGGCACAGACAAACAAAAAGACAGCAGTAACCTCTGCA
GACTTAAATGTCCCTGTCTGACAGCTTTGAAGAGAGCAGTGGTTCTCCCAGCACGCAGCT
TCAGATCTGAGAACGGGCAGACTGCCTCCTCAAGTGGGTCCCTGACCCCCGAGTAGCCTA
ACTGGGAGGCACCCCCCAGTAGGGGCGGACTGACACCTCACACGGCCGGGTACTCCTCTG
AGACAAAACTTCCAGAGGAACGATCAGGCAGCAGCATCTGCGGTTCACCAATATCCACTG
TTCTGCAGCCACCGCTGCTGATACCCAGGCAAACAGGGTCTGGAGTGGACCTCCAGCAAA
CTCCAACAGACCTGCAGCTGAGGGTCCTGTCTGTTAGAAGGAAAACTAACAAACAGAAAG
GACATCCACACCAAAAACCCATCTGTACGTCACCATCATCAAAGACCAAAGGTAGATAAA
ACCACAAAGATGGGGAAAAAACAGAGCAGAAAAACTGGAAACTCTAAAAATCAGAGCGCC
TCTCCTTCTCCAAAGGAACGCAGCTCCTCACCAGCAACGGAACAAAGCTGGACGGAGAAT
GACTTTGACGAGTTGAGAGAAGAAGGCTTCAGACGATCAAACTACTCCGAGCTACGGGAG
GAAATTCGAACCAACGGCAAAGAAGTTAAAAACTTTGAAAAAAAATTAGATGAATGGATA
ACTAGAATAACCAATGCAGAGAAGTCCTTAAAGGACCTGATGGAGCTGAAAACCAAGGCA
CGAGAACTACGTGACGAATGCAGAAGCCTCAGTAGCCGATGCGATCAACTGGAAGAAAGG
GTATCAGTGACGGAAGATGAAATGAATGAAATGAAGCGAGAAGAGAAGTTTAGAGAAAAA
AGAATAAAAAGAAACGAACAAAGCCTCCAAGAAATATGGGACTATGTGAAAAGACCAAAT
CTGCGTCTGATTGGTGTACCTGAAAGTGACGGGGAGAATGGAACCAAGTTGGAAAACACT
CTGCAGGATATTATCCAGGAGAACTTCCCCAATCTAGCAAGGCAGGCCAACGTTCAGATT
CAGGAAATACAGAGAACGCCACAAAGATACTCCTCGAGAAGAGCAACTCCAAGACACATA
ATTGTCAGATTCACCAAAGTTGAAATGAAGGAAAAAATGTTAAGGGCAGCCAGAGAGAAA
GGTCGGGTTACCCACAAAGGGAAGCCCATCAGACTAACGGCTGATCTCTCGGCAGAAACT
CTACAAGCCAGAAGAGAGTGGGGGCCAATATTCAACATTCTTAAAGAAAAGAATTTTCGA
CCCAGAATTTCATATCCAGCCAAACTAAGCTTCATAAGCGAAGGAGAAATAAAATACTTT
ACAGACAAGCAAATGCTGAGAGATTTTGTCACCACCAGGCCTGCCCTAAAAGAGCTCCTG
AAGGAAGCGCTAAACATGGAAAGGAACAACCAGTACCAGCCGCTGCAAAAACATGCCAAA
TTGTAAAGACCATCAAGGCTAGGAAGAAACTGCATCAACTAACGAGCAAAATAACCAGCT
AACGTCATAATGACAGGATCAAATTCACACATAACAATATTAACTTTAAATGTAAATGGG
CTAAATGCTCCAATTAAAAGACACAGACTGGCAAATTGGATAAAGAGTCAAGACCCATCA
GTGTGCCGTATTCAGGAAACCCATCTCACGTGCAGAGACACACATAGGCTCGAAATAAAA
GGATGGAGGAAGATCTACCAAGCAAATGGAAAACAAAAAAAGGCAGGGGTTGCAATCCTA
GTCTCTGATAAAACAGATTTTAAACCAACAAAGATCAAAAGAGACAAAGAAGGCCATTAC
ATAATGGTAAAGGGATCAATTCAACAAGAAGAGCTAACTATCCTAAATATATATGCACCC
AATACAGGAGCACCCAGATTCATAAAGCAAGTCCTGAGTGACCTACAAAGAGACTTAGAC
TCCCACACAATAATAATGGGAGACTTTAACACCCCACTGTCAACATTAGACAGATCAACG
AGACAGAAAGTTAACAAGGATACCCAGGAATTGAACTCAGCTCTGCACCAAGCGGACCTA
ATAGACATCTACAGAACTCTCCACCCCAAATCAACAGAATATACATTCTTTTCAGCACCA
CACCACACCTATTCCAAAATTGACCACATAGTTGGAAGTAAAGCTCTCCTCAGCAAATGT
AAAAGAACAGAAATTATAACAAACTGTCTCTCAGACCACAGTGCAATCAAACTAGAACTC
AGGATTAAGAAACTCACTCAAAACCGCTCAACTACATGGAAACTGAACAACCTGCTCCTG
AATGACTACTGGGTACATAACGAAATGAAGGCAGAAATAAAGATGTTCTTTGAAACCAAC
GAGAACAAAGACACAACATACCAGAATCTCTGGGACACATTCAAAGCAGTGTGTAGAGGG
AAATTTATAGCACTAAATGCCCACAAGAGAAAGCAGGAAAGATCTAAAATTGACACCCTA
ACATCACAATTAAAAGAACTAGAAAAGCAAGAGCAAACACATTCAAAAGCTAGCAGAAGG
CAAGAAATAACTAAAATCAGAGCAGAACTGAAGGAAATAGAGACACAAAAAACCCTTCAA
AAAATTAATGAATCCAGGAGCTGGTTTTTTGAAAAGATCAACAAAATTGATAGACCGCTA
GCAAGACTAATAAAGAAGAAAAGAGAGAAGAATCAAATAGACGCAATAAAAAATGATACA
GGGGATATCACCACCGATCCCACAGAAATACAAACTACCGTCAGAGAATACTATAAACAC
CTCTACGCAAATAAACTAGAAAATCTAGAAGAAATGGATAAATTCCTCGACACGTACACT
CTCCCAAGACTAAACCAGGAAGAAGTTGAATCTCTGAATAGACCAATAACAGGCTCTGAA
ATTGAGGCAATAATCAATAGCTTACCAACCAAAAAAAGTCCGGGACCAGATGGATTCACA
GCCGAATTCTACCAGAGGTACAAGGAGGAGCTGGTACCATTCCTTCTGAAACTATTCCAA
TCAATAGAAAAAGAGGGAATCCTCCCTAACTCATTTTATGAGGCCAGCATCATCCTGATA
CCAAAGCCTGGCAGAGACACAACAAAAAAAGAGAATTTTAGACCAATATCCTTGATGAAC
ATCGATGCAAAAATCCTCAATAAAATACTGGCAAACCGAATCCAGCAGCACATCAAAAAG
CTTATCCACCATGATCAAGTGGGCTTCATCCCTGGGATGCAAGGCTGGTTCAACATACGC
AAATCAATAAACGTAATCCAGCATATAAACAGAACCAAAGACAAAAACCACATGATTATC
TCAATAGATGCAGAAAAGGCCTTTGACAAAATTCAACAACGCTTCATGCTAAAAACTCTC
AATAAATTAGGTATTGATGGGACGTATCTCAAAATAATAAGAGCTATCTATGACAAACCC
ACAGCCAATATCATACTGAATGGGCAAAAACTGGAAGCATTCCCTTTGAAAACTGGCACA
AGACAGGGATGCCCTCTCTCACCACTCCTATTCAACATAGTGTTGGAAGTTCTGGCCAGG
GCAATCAGGCAGGAGAAGGAAATAAAGGGTATTCAATTAGGAAAAGAGGAAGTCAAATTG
TCCCTGTTTGCAGATGACATGATTGTATATCTAGAAAACCCCATCGTCTCAGCCCAAAAT
CTCCTTAAGCTGATAAGCAACTTCAGCAAAGTCTCAGGATACAAAATCAATGTGCAAAAA
TCACAAGCATTCTTATACACCAATAACAGACAAACAGAGAGCCAAATCATGAGTGAACTC
CCATTCACAATTGCTTCAAAGAGAATAAAATACCTAGGAATCCAACTTACAAGGGATGTG
AAGGACCTCTTCAAGGAGAACTACAAACCACTGCTCAATGAAATAAAAGAGGATACAAAC
AAATGGAAGAACATTCCATGCTCATGGGTAGGAAGAATCAATATCGTGAAAATGGCCATA
CTGCCCAAGGTAATTTATAGATTCAATGCCATCCCCATCAAGCTACCAATGACTTTCTTC
ACAGAATTGGAAAAAACTACTTTAAAGTTCATATGGAACCAAAAAAGAGCCCACATCGCC
AAGTCAATCCTAAGCCAAAAGAACAAAGCTGGAGGCATCACGCTACCTGACTTCAAACTA
TACTACAAGGCTACGGTAACCAAAACAGCATGGTACTGGTACCAAAACAGAGATATAGAC
CAATGGAACAGAACAGAGCCCTCAGAAATAATGCCGCATATCTACAACTATCCGATCTTT
GACAAACCTGAGAAAAACAAGCAATGGGGAAAGGATTCCCTATTTAATAAATGGTGCTGG
GAAAACTGGCTAGCCATATGTAGAAAGCTGAAACTGGATCCCTTCCTTACACCTTATACA
AAAATTAATTCAAGATGGATTAAAGACTTAAACGTTAGACCTAAAACCATAAAAACCCTA
GAAGAAAACCTAGGCAATACCATTCAGGACATAGGCATGGGCAAGGACTTCATGTCTAAA
ACACCAAAAGCAATGGCAACAAAAGCCAAAATTGACAAACGGGATCTAATTAAACTAAAG
AGCTTCTGCACAGCAAAAGAAACTACCATCAGAGTGAACAGGCAACCTACAAAATGGGAG
AAAATTTTTGCAACCTACTCATCTGACAAAGGGCTAATATCCAGAATCTACAATGAACTC
AAACAAATTTACAAGAAAAAAACAAACAACCCCATCAAAAAGTGGGCAAAGGATATGAAC
AGACACTTCTCAAAAGAAGACATTTATGCAGCCAAAAAACACATGAAAAAATGCTCATCA
TCA
>SVA_A
CTCCCTCTCCCTCACCCTCTCCCCATGGTCTCCCTCTCCCTCTCTTTCCACGGTCTCCCT
CTGATGCCGAGCCGAAGCTGGACGGTACTGCTGCCATCTCGGCTCACTGCAACCTCCCTG
CCTGATTCTCCTGCCTCAGCTTGCCGAGTGCCTGCGATTGCAGGCGCGCGCCGCCACGCC
TGACTGGTTTTCGTATTTTGTTAGTGGAGACGGGGTTTCGCTGTGTTGGCCGGGCTGGTC
TCCAGCTCCTAACCGCGAGTGATCCACCAGCCTCGGCCTCCCGAGGTGCTGGGATTGCAG
ACGGAGTCTCGTTCACTCAGTGCTCAATGATGCCCAGGCTGGAGTGCAGTGGCGTGATCT
CGGCTCGCTACAACCTCCACCTCCCAGCAGCCTGCCTTGGCCTCCCAAAGTGCCGAGATT
GCAGCCTCTGCCCGGCCGCCACCCCGTCTGGGAAGTGAGGAGTGTCTCCGCCTGGCCACC
CATCGTCTGGGATGTGAGGAGCGTCTCTGCCCTGCCGCCCATCGTCTGAGATGTGGGGAG
CACCTCTGCCCGGCCGCCCCGTCCGGGATGTGAGGAGCGTCGCTGCCCGGCCGCCCCGTC
TGAGAAGTGAGGAGACCCTCTGCCTGGCAACCGCTCCATCTGAGAAGTGAGGAGCCCCTC
CGCCCGGCAGCCGCCCTGTCTGAGAAGTGAGGAGCCCCTCCGCCCAGCAGCCACCTGGTC
CGGGAGGGAGGTGGGGGGGTCAGCCCCCCGCCCGGCCAGCCGCCCCGTCCGGGAGGGAGG
TGGGGGGGTCAGCCCCCAGCCCGGCCAGCCGCCCCGTCCGGGAAGTGAGGGGCGCCTCTG
CCCGGCCGCCCCTACTGGGAAGTGAGGAGCCACTTTGCCCGGCCAGCCACTCTGTCCGGG
AGGGAGGTGGGGGGGTCAGCCCCCCGCCCGGCCAGCCGCCCCGTCCGGGAGGGAGGTGGG
GGGATCAGCCCCCCGCCCAGCCAGCCGCCCCGTCCGGGAGGGAGGTGGGGGGGTCAGCCC
CCCGCCCGGCCAGCCGCCCTGTCCGGGAGGTGAGGGGCGCCTCTGCCCGGCCGCGCCTAC
TGGAAAGTGAGGAGCCCCTCTGCCCGGCCACCACCCCGTCTGGGAGGTGTGCCCAACAGC
TCATTGAGAAGGGGCCATGATGACAATGGCGGTTTTGTGGAATAGAAAGGGGGGAAAGGT
GGGGAAAAGATTGAGAAATCGGATGGTTGCCGTGTCTGTGTAGAAAGAGGTAGACCTGGG
AGACTTTTCATTTTGTTCTGTACTAAGAAAAATTCTTCTGCCTTGGGATCCTGTTGATCG
GTGACCTTACCCCCAACCCTGTGCTCTCTGAAACATGTGCTGTATCCACTCAGGGTTGAA
TGGATTAAGAGCGGTGCAAGATGTGCTTTGTTAAACAGATGCTTGAAGGCAGCATGCTCC
TTAAGAGTCATCACCACTCCCTAATCTCAAGTACCCAGGGACACAAACACTGCGGAAGGC
CGCAGGGTCCTCTGCCTAGGAAAACCAGAGACCTTTGTTCACTTGTTTATCTGCTGACCT
TCCCTCCACTATTGTCCTGTGACCCTGCCAAATCCCCCTCTGTGAGAAACACCCAAGAAT
GATCAATAAAAAAAAAAAAA
"""
| 56.864662
| 60
| 0.980431
| 136
| 7,563
| 54.470588
| 0.992647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000135
| 0.018115
| 7,563
| 132
| 61
| 57.295455
| 0.997441
| 0
| 0
| 0
| 0
| 0
| 0.991406
| 0.96787
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007634
| 0
| 0.007634
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
860f8f1641cfedbec44aa3e70bdb4ace34ad89ab
| 12,607
|
py
|
Python
|
tensorflow_quantum/core/ops/math_ops/fidelity_op_test.py
|
quantummind/quantum
|
fd952d0362c5445eef0da4437fb3e5ebb16b7948
|
[
"Apache-2.0"
] | 2
|
2021-09-24T09:41:47.000Z
|
2021-10-04T20:55:09.000Z
|
tensorflow_quantum/core/ops/math_ops/fidelity_op_test.py
|
quantummind/quantum
|
fd952d0362c5445eef0da4437fb3e5ebb16b7948
|
[
"Apache-2.0"
] | 1
|
2020-03-09T23:26:43.000Z
|
2020-03-09T23:26:43.000Z
|
tensorflow_quantum/core/ops/math_ops/fidelity_op_test.py
|
quantummind/quantum
|
fd952d0362c5445eef0da4437fb3e5ebb16b7948
|
[
"Apache-2.0"
] | 1
|
2020-04-11T19:31:34.000Z
|
2020-04-11T19:31:34.000Z
|
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that specifically target tfq_inner_product."""
import copy
import numpy as np
from absl.testing import parameterized
import tensorflow as tf
import cirq
from tensorflow_quantum.core.ops.math_ops import fidelity_op
from tensorflow_quantum.python import util
class FidelityTest(tf.test.TestCase, parameterized.TestCase):
"""Tests tfq_fidelity_op."""
@parameterized.parameters([
{
'n_qubits': 5,
'batch_size': 1,
'inner_dim_size': 5
},
{
'n_qubits': 5,
'batch_size': 10,
'inner_dim_size': 1
},
{
'n_qubits': 10,
'batch_size': 10,
'inner_dim_size': 2
},
{
'n_qubits': 5,
'batch_size': 10,
'inner_dim_size': 5
},
])
def test_correctness_with_symbols(self, n_qubits, batch_size,
inner_dim_size):
"""Tests that inner_product works with symbols."""
symbol_names = ['alpha', 'beta', 'gamma']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, batch_size)
other_batch = [
util.random_circuit_resolver_batch(qubits, inner_dim_size)[0]
for i in range(batch_size)
]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
programs = util.convert_to_tensor(circuit_batch)
other_programs = util.convert_to_tensor(other_batch)
symbol_names = tf.convert_to_tensor(symbol_names,
dtype=tf.dtypes.string)
symbol_values = tf.convert_to_tensor(symbol_values_array)
out = fidelity_op.fidelity(programs, symbol_names, symbol_values,
other_programs)
out_arr = np.empty((batch_size, inner_dim_size), dtype=np.complex64)
for i in range(batch_size):
final_circuit = cirq.resolve_parameters(circuit_batch[i],
resolver_batch[i])
final_wf = cirq.final_state_vector(final_circuit)
for j in range(inner_dim_size):
internal_wf = cirq.final_state_vector(other_batch[i][j])
out_arr[i][j] = np.abs(np.vdot(final_wf, internal_wf))**2
self.assertAllClose(out, out_arr, atol=1e-5)
self.assertDTypeEqual(out, tf.float32.as_numpy_dtype)
@parameterized.parameters([
{
'n_qubits': 5,
'batch_size': 1,
'inner_dim_size': 5
},
{
'n_qubits': 5,
'batch_size': 2,
'inner_dim_size': 1
},
{
'n_qubits': 10,
'batch_size': 3,
'inner_dim_size': 2
},
{
'n_qubits': 5,
'batch_size': 10,
'inner_dim_size': 5
},
])
def test_correctness_without_symbols(self, n_qubits, batch_size,
inner_dim_size):
"""Tests that inner_product works without symbols."""
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, _ = \
util.random_circuit_resolver_batch(
qubits, batch_size)
other_batch = [
util.random_circuit_resolver_batch(qubits, inner_dim_size)[0]
for i in range(batch_size)
]
programs = util.convert_to_tensor(circuit_batch)
other_programs = util.convert_to_tensor(other_batch)
symbol_names = tf.convert_to_tensor([], dtype=tf.dtypes.string)
symbol_values = tf.convert_to_tensor([[] for _ in range(batch_size)])
out = fidelity_op.fidelity(programs, symbol_names, symbol_values,
other_programs)
out_arr = np.empty((batch_size, inner_dim_size), dtype=np.complex64)
for i in range(batch_size):
final_wf = cirq.final_state_vector(circuit_batch[i])
for j in range(inner_dim_size):
internal_wf = cirq.final_state_vector(other_batch[i][j])
out_arr[i][j] = np.abs(np.vdot(final_wf, internal_wf))**2
self.assertAllClose(out, out_arr, atol=1e-5)
self.assertDTypeEqual(out, tf.float32.as_numpy_dtype)
def test_correctness_empty(self):
"""Tests the fidelity with empty circuits."""
empty_circuit = util.convert_to_tensor([cirq.Circuit()])
empty_symbols = tf.convert_to_tensor([], dtype=tf.dtypes.string)
empty_values = tf.convert_to_tensor([[]])
other_program = util.convert_to_tensor([[cirq.Circuit()]])
out = fidelity_op.fidelity(empty_circuit, empty_symbols, empty_values,
other_program)
expected = np.array([[1.0]], dtype=np.complex64)
self.assertAllClose(out, expected)
self.assertDTypeEqual(out, tf.float32.as_numpy_dtype)
qubit = cirq.GridQubit(0, 0)
non_empty_circuit = util.convert_to_tensor(
[cirq.Circuit(cirq.X(qubit))])
empty_symbols = tf.convert_to_tensor([], dtype=tf.dtypes.string)
empty_values = tf.convert_to_tensor([[]])
other_program = util.convert_to_tensor([[cirq.Circuit()]])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'qubits not found'):
fidelity_op.fidelity(non_empty_circuit, empty_symbols, empty_values,
other_program)
@parameterized.parameters([
{
'n_qubits': 5,
'batch_size': 1,
'inner_dim_size': 1
},
{
'n_qubits': 5,
'batch_size': 3,
'inner_dim_size': 1
},
])
def test_tf_gradient_correctness_with_symbols(self, n_qubits, batch_size,
inner_dim_size):
"""Tests that tf.gradient of inner_product works with symbols."""
symbol_names = ['alpha', 'beta', 'gamma']
n_params = len(symbol_names)
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, batch_size)
other_batch = [0 for i in range(batch_size)]
for i in range(len(other_batch)):
other_batch[i] = copy.deepcopy(circuit_batch)
for j in range(len(other_batch[i])):
other_batch[i][j] = cirq.resolve_parameters(
circuit_batch[i], resolver_batch[i])
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
programs = util.convert_to_tensor(circuit_batch)
other_programs = util.convert_to_tensor(other_batch)
symbol_names_tensor = tf.convert_to_tensor(symbol_names,
dtype=tf.dtypes.string)
symbol_values = tf.convert_to_tensor(symbol_values_array)
with tf.GradientTape() as tape:
tape.watch(symbol_values)
ip = fidelity_op.fidelity(programs, symbol_names_tensor,
symbol_values, other_programs)
out = tape.gradient(ip, symbol_values)
out_arr = np.zeros((batch_size, n_params), dtype=np.complex64)
# dx came from _GRAD_EPS of core/src/adj_util.cc
dx = 5e-3
for i in range(batch_size):
for k, name in enumerate(symbol_names):
if name in resolver_batch[i].param_dict:
new_resolver = copy.deepcopy(resolver_batch[i])
new_resolver.param_dict[name] += dx
final_circuit_p = cirq.resolve_parameters(
circuit_batch[i], new_resolver)
new_resolver = copy.deepcopy(resolver_batch[i])
new_resolver.param_dict[name] -= dx
final_circuit_m = cirq.resolve_parameters(
circuit_batch[i], new_resolver)
final_wf_p = cirq.final_state_vector(final_circuit_p)
final_wf_m = cirq.final_state_vector(final_circuit_m)
# Performs central finite difference.
for j in range(inner_dim_size):
internal_wf = cirq.final_state_vector(other_batch[i][j])
fid_p = cirq.fidelity(final_wf_p, internal_wf)
fid_m = cirq.fidelity(final_wf_m, internal_wf)
grad_fid = 0.5 * (fid_p - fid_m) / dx
out_arr[i][k] += grad_fid
self.assertAllClose(out, out_arr, atol=1e-3)
self.assertDTypeEqual(out, tf.float32.as_numpy_dtype)
@parameterized.parameters([
{
'n_qubits': 5,
'batch_size': 1,
'inner_dim_size': 5
},
{
'n_qubits': 5,
'batch_size': 3,
'inner_dim_size': 2
},
])
def test_tf_gradient_correctness_without_symbols(self, n_qubits, batch_size,
inner_dim_size):
"""Tests that tf.gradient of inner_product works without symbols."""
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, _ = \
util.random_circuit_resolver_batch(
qubits, batch_size)
other_batch = [
util.random_circuit_resolver_batch(qubits, inner_dim_size)[0]
for i in range(batch_size)
]
programs = util.convert_to_tensor(circuit_batch)
other_programs = util.convert_to_tensor(other_batch)
symbol_names = tf.convert_to_tensor([], dtype=tf.dtypes.string)
symbol_values = tf.convert_to_tensor([[] for _ in range(batch_size)])
with tf.GradientTape() as tape:
tape.watch(symbol_values)
ip = fidelity_op.fidelity(programs, symbol_names, symbol_values,
other_programs)
out = tape.gradient(ip, symbol_values)
self.assertAllClose(out, tf.zeros_like(symbol_values), atol=1e-3)
self.assertDTypeEqual(out, tf.float32.as_numpy_dtype)
def test_correctness_no_circuit(self):
"""Test the inner product between no circuits."""
empty_circuit = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_symbols = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
other_program = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)
out = fidelity_op.fidelity(empty_circuit, empty_symbols, empty_values,
other_program)
self.assertShapeEqual(np.zeros((0, 0)), out)
self.assertDTypeEqual(out, tf.float32.as_numpy_dtype)
def test_tf_gradient_correctness_no_circuit(self):
"""Test the inner product grad between no circuits."""
empty_circuit = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_symbols = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
other_program = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)
with tf.GradientTape() as tape:
tape.watch(empty_values)
out = fidelity_op.fidelity(empty_circuit, empty_symbols,
empty_values, other_program)
self.assertShapeEqual(np.zeros((0, 0)), out)
self.assertDTypeEqual(out, tf.float32.as_numpy_dtype)
if __name__ == "__main__":
tf.test.main()
| 40.277955
| 80
| 0.588165
| 1,495
| 12,607
| 4.658863
| 0.131104
| 0.04135
| 0.04135
| 0.032735
| 0.788945
| 0.776884
| 0.758507
| 0.742857
| 0.694042
| 0.664896
| 0
| 0.013573
| 0.310383
| 12,607
| 312
| 81
| 40.407051
| 0.787555
| 0.093123
| 0
| 0.638211
| 0
| 0
| 0.03834
| 0
| 0
| 0
| 0
| 0
| 0.060976
| 1
| 0.028455
| false
| 0
| 0.028455
| 0
| 0.060976
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
861c890f76b85d973e13f2698627a2ee03fe3e19
| 1,392
|
py
|
Python
|
boto3_type_annotations/boto3_type_annotations/codecommit/paginator.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations/boto3_type_annotations/codecommit/paginator.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations/boto3_type_annotations/codecommit/paginator.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import Dict
from botocore.paginate import Paginator
class DescribePullRequestEvents(Paginator):
def paginate(self, pullRequestId: str, pullRequestEventType: str = None, actorArn: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class GetCommentsForComparedCommit(Paginator):
def paginate(self, repositoryName: str, afterCommitId: str, beforeCommitId: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class GetCommentsForPullRequest(Paginator):
def paginate(self, pullRequestId: str, repositoryName: str = None, beforeCommitId: str = None, afterCommitId: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class GetDifferences(Paginator):
def paginate(self, repositoryName: str, afterCommitSpecifier: str, beforeCommitSpecifier: str = None, beforePath: str = None, afterPath: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class ListBranches(Paginator):
def paginate(self, repositoryName: str, PaginationConfig: Dict = None) -> Dict:
pass
class ListPullRequests(Paginator):
def paginate(self, repositoryName: str, authorArn: str = None, pullRequestStatus: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class ListRepositories(Paginator):
def paginate(self, sortBy: str = None, order: str = None, PaginationConfig: Dict = None) -> Dict:
pass
| 36.631579
| 192
| 0.724138
| 141
| 1,392
| 7.148936
| 0.248227
| 0.090278
| 0.138889
| 0.166667
| 0.535714
| 0.535714
| 0.256944
| 0.218254
| 0
| 0
| 0
| 0
| 0.180316
| 1,392
| 37
| 193
| 37.621622
| 0.883436
| 0
| 0
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.304348
| false
| 0.304348
| 0.086957
| 0
| 0.695652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
8627ffa730a235ff3849ef4a70d6058a330c7f48
| 1,872
|
py
|
Python
|
pymnn/pip_package/MNN/tools/mnn_fb/Reshape.py
|
xhuan28/MNN
|
81df3a48d79cbc0b75251d12934345948866f7be
|
[
"Apache-2.0"
] | 3
|
2019-12-27T01:10:32.000Z
|
2021-05-14T08:10:40.000Z
|
pymnn/pip_package/MNN/tools/mnn_fb/Reshape.py
|
xhuan28/MNN
|
81df3a48d79cbc0b75251d12934345948866f7be
|
[
"Apache-2.0"
] | 10
|
2019-07-04T01:40:13.000Z
|
2019-10-30T02:38:42.000Z
|
pymnn/pip_package/MNN/tools/mnn_fb/Reshape.py
|
xhuan28/MNN
|
81df3a48d79cbc0b75251d12934345948866f7be
|
[
"Apache-2.0"
] | 1
|
2020-03-10T02:17:47.000Z
|
2020-03-10T02:17:47.000Z
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: MNN
import flatbuffers
class Reshape(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsReshape(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Reshape()
x.Init(buf, n + offset)
return x
# Reshape
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Reshape
def Dims(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# Reshape
def DimsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# Reshape
def DimsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Reshape
def DimType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
def ReshapeStart(builder): builder.StartObject(2)
def ReshapeAddDims(builder, dims): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(dims), 0)
def ReshapeStartDimsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def ReshapeAddDimType(builder, dimType): builder.PrependInt8Slot(1, dimType, 0)
def ReshapeEnd(builder): return builder.EndObject()
| 34.036364
| 130
| 0.675748
| 230
| 1,872
| 5.365217
| 0.3
| 0.062399
| 0.160454
| 0.170178
| 0.381686
| 0.352512
| 0.28282
| 0.221232
| 0.221232
| 0.221232
| 0
| 0.017735
| 0.21688
| 1,872
| 54
| 131
| 34.666667
| 0.824011
| 0.064637
| 0
| 0.297297
| 1
| 0
| 0.002295
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.297297
| false
| 0
| 0.027027
| 0.054054
| 0.621622
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
864175d43defdf0dd20e70af564cca44ebafb4b8
| 13,457
|
py
|
Python
|
chj/index/JType.py
|
aemcgraw/CodeHawk-Java
|
8e43877d0357579f6509d3fc52c69c2d4568d288
|
[
"MIT"
] | null | null | null |
chj/index/JType.py
|
aemcgraw/CodeHawk-Java
|
8e43877d0357579f6509d3fc52c69c2d4568d288
|
[
"MIT"
] | null | null | null |
chj/index/JType.py
|
aemcgraw/CodeHawk-Java
|
8e43877d0357579f6509d3fc52c69c2d4568d288
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# CodeHawk Java Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2019 Kestrel Technology LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
import chj.index.JDictionaryRecord as JD
class JavaTypesBase(JD.JDictionaryRecord):
def __init__(self,tpd,index,tags,args):
JD.JDictionaryRecord.__init__(self,index,tags,args)
self.tpd = tpd
def get_scalar_size(self): return 4
def is_scalar(self): return False
def is_array(self): return False
def is_object(self): return False
def __str__(self): return 'javatypesbase'
class StringConstant(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_string(self):
if len(self.tags) > 0:
return self.tags[0]
else:
return ''
def get_string_length(self): return int(self.args[0])
def is_hex(self): return len(self.tags) > 1
def __str__(self):
if self.is_hex():
return ('(' + str(self.get_string_length()) + '-char-string' +')')
else:
return self.get_string()
class ClassObjectType(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_class(self): return self.tpd.jd.get_cn(int(self.args[0]))
def is_object(self): return True
def __str__(self): return str(self.get_class())
class ArrayObjectType(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def is_object_array_type(self): return True
def is_array(self): return True
def get_value_type(self): return self.tpd.get_value_type(int(self.args[0]))
def __str__(self): return str(self.get_value_type())
class ObjectValueType(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def is_object_value_type(self): return True
def is_object_type(self): return True
def is_object(self): return True
def is_array_type(self): return self.get_object_type().is_object_array_type()
def get_object_type(self): return self.tpd.get_object_type(int(self.args[0]))
def get_class(self): return self.get_object_type().get_class()
def __str__(self): return str(self.get_object_type())
class BasicValueType(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_scalar_size(self):
if self.is_long() or self.is_double():
return 8
else:
return 4
def is_basic_type(self): return True
def is_scalara(self): return True
def is_long(self): return self.tags[1] == 'L'
def is_double(self): return self.tags[1] == 'D'
def get_basic_type(self): return self.tags[1]
def __str__(self): return str(self.get_basic_type())
class MethodDescriptor(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def has_return_value(self): return int(self.args[0]) == 1
def get_return_type(self):
if self.has_return_value():
return self.tpd.get_value_type(int(self.args[1]))
def get_argument_types(self):
if self.has_return_value():
return [ self.tpd.get_value_type(int(x)) for x in self.args[2:] ]
else:
return [ self.tpd.get_value_type(int(x)) for x in self.args[1:] ]
def __str__(self):
sreturn = '' if self.get_return_type() is None else str(self.get_return_type())
return ('(' + ','.join([ str(x) for x in self.get_argument_types()])
+ ')' + sreturn )
class ValueDescriptor(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_value_type(self): return self.tpd.get_value_type(int(self.args[0]))
def __str__(self): return 'descr:' + str(self.get_value_type())
class ConstString(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_string(self): return self.tpd.get_string(int(self.args[0]))
def __str__(self): return self.get_string()
class ConstInt(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_int(self): return int(self.args[0])
def __str__(self): return str(self.get_int())
class ConstFloat(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_float(self): return float(self.tags[1])
def __str__(self): return self.tags[1]
def ConstLong(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_long(self): return int(self.tags[1])
def __str__(self): return self.tags[1]
class ConstDouble(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_double(self): return float(tags[1])
def __str__(self): return tags[1]
class ConstClass(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_class(self):
return self.tpd.get_object_type(int(self.args[0]))
def __str__(self): return str(self.get_class())
class FieldHandle(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_class_name(self):
return self.tpd.jd.get_class(int(self.args[0]))
def get_field_signature(self):
return self.tpd.jd.get_field_signature(int(self.args[1]))
def __str__(self):
return str(self.get_class_name()) + ':' + str(self.get_field_signature())
class MethodHandle(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_object_type(self):
return self.tpd.get_object_type(int(self.args[0]))
def get_method_signature(self):
return self.tpd.jd.get_method_signature(int(self.args[1]))
def __str__(self):
return str(self.get_object_type()) + ':' + str(self.get_method_signature())
class InterfaceHandle(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_class_name(self):
return self.tpd.jd.get_class(int(self.args[0]))
def get_method_signature(self):
return self.tpd.jd.get_method+signature(int(self.args[1]))
def __str__(self):
return str(self.get_class_name()) + ':' + str(self.get_method_signature())
class ConstValue(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_constant_value(self):
return self.jd.get_constant_value(self.args[0])
def __str__(self): return 'C:' + str(self.get_constant_value())
class ConstField(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_class_name(self):
return self.tpd.jd.get_cn(int(self.args[0]))
def get_field_signature(self):
return self.tpd.jd.get_field_signature(int(self.args[1]))
def __str__(self):
return 'C:' + str(self.get_class_name()) + '.' + str(self.get_field_signature())
class ConstMethod(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_object_type(self):
return self.tpd.get_object_type(int(args[0]))
def get_method_signature(self):
return self.tpd.jd.get_method_signature(int(args[1]))
def __str__(self):
return 'C:' + str(self.get_object_type()) + '.' + str(self.get_method_signature())
class ConstInterfaceMethod(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_class_name(self):
return self.tpd.jd.get_cn(int(self.args[0]))
def get_method_signature(self):
return self.tpd.jd.get_method_signature(int(self.args[1]))
def __str__(self):
return 'C:' + str(self.get_class_name()) + '.' + str(self.get_method_signature())
class ConstDynamicMethod(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_bootstrap_method_index(self):
return int(self.args[0])
def get_method_signature(self):
return self.tpd.jd.get_method_signature(int(self.args[1]))
def __str__(self):
return ('C:Dynamic(' + str(self.get_bootstrap_mehtod_index()) + ').'
+ str(self.get_method_signature()))
class ConstNameAndType(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_name(self):
return self.tpd.get_string(int(self.args[0]))
def get_type(self):
return self.tpd.get_descriptor(int(self.args[1]))
def __str__(self): return 'CNT:' + self.get_name() + ':' + str(self.get_type())
class ConstStringUTF8(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_string(self):
return self.tpd.get_string(int(args[0]))
def __str__(self): return 'C:' + self.get_string()
class ConstMethodHandle(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_reference_kind(self): return self.tags[1]
def get_method_handle_type(self):
return self.tpd.get_method_handle_type(int(self.args[0]))
def __str__(self):
return ('C:' + str(self.get_method_handle_type())
+ '(' + self.get_reference_kind() + ')')
class ConstMethodType(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_method_descriptor(self):
return self.tpd.get_method_descriptor(int(self.args[0]))
def __str__(self):
return 'C:' + str(self.get_method_descriptor())
class ConstUnusable(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def __str__(self): return 'unusable'
class BootstrapArgConstantValue(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_constant_value(self):
return self.tpd.get_constant_value(int(self.args[0]))
def __str__(self): return str(self.get_constant_value())
class BootstrapArgMethodHandle(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_reference_kind(self): return self.tags[1]
def get_method_handle_type(self):
return self.jd.get_method_handle_type(int(self.args[0]))
def __str__(self):
return (str(self.get_method_handle_type())
+ '(' + self.get_reference_kind() + ')')
class BootstrapArgMethodType(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_method_descriptor(self):
return self.tpd.get_method_descriptor(int(self.args[0]))
def __str__(self): return str(self.get_method_descriptor())
class BootstrapMethodData(JavaTypesBase):
def __init__(self,tpd,index,tags,args):
JavaTypesBase.__init__(self,tpd,index,tags,args)
def get_reference_kind(self): return self.tags[1]
def get_method_handle_type(self):
return self.tpd.get_method_handle_type(int(self.args[0]))
def get_arguments(self):
return [ self.tpd.get_bootstrap_argument(int(x)) for x in self.args[1:] ]
def __str__(self):
return (str(self.get_method_handle_types()) + '('
+ ','.join([ str(x) for x in self.get_arguments() ]) + ')')
| 29.064795
| 90
| 0.678011
| 1,852
| 13,457
| 4.587473
| 0.109071
| 0.076624
| 0.094868
| 0.114878
| 0.735169
| 0.706332
| 0.664548
| 0.640772
| 0.627825
| 0.615819
| 0
| 0.005899
| 0.18117
| 13,457
| 462
| 91
| 29.127706
| 0.765133
| 0.101806
| 0
| 0.52
| 0
| 0
| 0.007463
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0.004
| 0.348
| 0.812
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
864738a7a458e196f5ba5a7c6b4db76f5f8b1965
| 291
|
py
|
Python
|
ingenico/connect/sdk/param_request.py
|
festicket/connect-sdk-python3
|
c399c6443789dd978f319c89e1ebd387c812a77b
|
[
"MIT"
] | 12
|
2016-09-26T21:46:31.000Z
|
2020-12-23T18:44:54.000Z
|
ingenico/connect/sdk/param_request.py
|
festicket/connect-sdk-python3
|
c399c6443789dd978f319c89e1ebd387c812a77b
|
[
"MIT"
] | 3
|
2020-05-02T16:53:02.000Z
|
2020-06-02T12:49:51.000Z
|
ingenico/connect/sdk/param_request.py
|
festicket/connect-sdk-python3
|
c399c6443789dd978f319c89e1ebd387c812a77b
|
[
"MIT"
] | 11
|
2017-07-16T00:55:28.000Z
|
2021-09-24T17:00:49.000Z
|
class ParamRequest(object):
"""
Represents a set of request parameters.
"""
def to_request_parameters(self):
"""
:return: list[:class:`ingenico.connect.sdk.RequestParam`] representing the HTTP request parameters
"""
raise NotImplementedError
| 26.454545
| 106
| 0.649485
| 28
| 291
| 6.678571
| 0.821429
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.247423
| 291
| 10
| 107
| 29.1
| 0.853881
| 0.474227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
8654e681d99c130c92c48982cd46edf3405d0707
| 296
|
py
|
Python
|
5.Operators/2.membership_operator.py
|
Tazri/Python
|
f7ca625800229c8a7e20b64810d6e162ccb6b09f
|
[
"DOC"
] | null | null | null |
5.Operators/2.membership_operator.py
|
Tazri/Python
|
f7ca625800229c8a7e20b64810d6e162ccb6b09f
|
[
"DOC"
] | null | null | null |
5.Operators/2.membership_operator.py
|
Tazri/Python
|
f7ca625800229c8a7e20b64810d6e162ccb6b09f
|
[
"DOC"
] | null | null | null |
list = ['Apple','Orange','Benana','Mango'];
name = "Md Tazri";
print('"Apple" in list : ',"Apple" in list);
print('"Kiwi" in list : ',"Kiwi" in list);
print('"Water" not in list : ',"Water" not in list);
print("'Md' in name : ",'Md' in name);
print("'Tazri' not in name : ",'Tazri' not in name);
| 37
| 52
| 0.594595
| 47
| 296
| 3.744681
| 0.276596
| 0.204545
| 0.1875
| 0.159091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155405
| 296
| 8
| 53
| 37
| 0.704
| 0
| 0
| 0
| 0
| 0
| 0.488215
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.714286
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
868d2ac484eff3103da52edb3a5294d8b96a8b40
| 212
|
py
|
Python
|
ResueltosCOJenPython/1495.py
|
raulcr98/ProgrammingTeamBookScoobyDoo
|
0fcb98e012e0f2db2dda68cbf01b96f567a12578
|
[
"MIT"
] | 1
|
2020-03-17T01:44:09.000Z
|
2020-03-17T01:44:09.000Z
|
ResueltosCOJenPython/1495.py
|
raulcr98/teambook-scooby-doo
|
0fcb98e012e0f2db2dda68cbf01b96f567a12578
|
[
"MIT"
] | null | null | null |
ResueltosCOJenPython/1495.py
|
raulcr98/teambook-scooby-doo
|
0fcb98e012e0f2db2dda68cbf01b96f567a12578
|
[
"MIT"
] | null | null | null |
import sys
raw_input=lambda:sys.stdin.readline().rstrip()
input=lambda:int(raw_input())
a = int(raw_input())
list = []
while a > 0:
list.append(int(raw_input()))
a -= 1
list.sort()
for i in list:
print i
| 13.25
| 46
| 0.665094
| 37
| 212
| 3.702703
| 0.540541
| 0.233577
| 0.240876
| 0.175182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011173
| 0.15566
| 212
| 16
| 47
| 13.25
| 0.75419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.090909
| null | null | 0.090909
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
86a30b755773e53ed9fb1c457b6ab854fa4578f8
| 177
|
py
|
Python
|
resources/dot_PyCharm/system/python_stubs/cache/4d8444ac16281560bbebafd25ab3df7074617f7fea7b5b79ae4acce4f111640a/cython_runtime.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | 1
|
2020-04-20T02:27:20.000Z
|
2020-04-20T02:27:20.000Z
|
resources/dot_PyCharm/system/python_stubs/cache/4d8444ac16281560bbebafd25ab3df7074617f7fea7b5b79ae4acce4f111640a/cython_runtime.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | null | null | null |
resources/dot_PyCharm/system/python_stubs/cache/4d8444ac16281560bbebafd25ab3df7074617f7fea7b5b79ae4acce4f111640a/cython_runtime.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# module cython_runtime
# from C:\Python27\lib\site-packages\pandas\_libs\skiplist.pyd
# by generator 1.147
# no doc
# no imports
# no functions
# no classes
| 17.7
| 62
| 0.740113
| 28
| 177
| 4.607143
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046358
| 0.146893
| 177
| 9
| 63
| 19.666667
| 0.807947
| 0.898305
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
86a4361a23e8cce11270f98f6b2460012e62d3d7
| 102
|
py
|
Python
|
server/apps/orgtemplate/apps.py
|
iotile/iotile_cloud
|
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
|
[
"MIT"
] | null | null | null |
server/apps/orgtemplate/apps.py
|
iotile/iotile_cloud
|
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
|
[
"MIT"
] | null | null | null |
server/apps/orgtemplate/apps.py
|
iotile/iotile_cloud
|
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class OrgtemplateConfig(AppConfig):
name = 'apps.orgtemplate'
| 17
| 35
| 0.77451
| 11
| 102
| 7.181818
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 102
| 5
| 36
| 20.4
| 0.908046
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
86b5f4d595a15c4edef1e8fdfd1217400f718857
| 91
|
py
|
Python
|
src/bot.py
|
shirin1996/PyBot
|
4676ccca6b47fce4d3f20a7e158ea9278eb1b508
|
[
"MIT"
] | 1
|
2022-01-30T20:27:31.000Z
|
2022-01-30T20:27:31.000Z
|
src/bot.py
|
shirinyamani/MsgBot
|
0b95cea203ff97d631fba7cbf48c23c76f7d91e9
|
[
"MIT"
] | null | null | null |
src/bot.py
|
shirinyamani/MsgBot
|
0b95cea203ff97d631fba7cbf48c23c76f7d91e9
|
[
"MIT"
] | null | null | null |
import telebot
import os
bot = telebot.TeleBot(os.environ['BOT_TOKEN'], parse_mode='HTML')
| 22.75
| 65
| 0.769231
| 14
| 91
| 4.857143
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087912
| 91
| 4
| 65
| 22.75
| 0.819277
| 0
| 0
| 0
| 0
| 0
| 0.141304
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
86c69658b971442f01c858593a8db80f8833c0af
| 447
|
py
|
Python
|
geometry.py
|
Gowpenful/watson_compute
|
7e15bd7b482bf342c66819ea09ae9832488e5e0e
|
[
"MIT"
] | 1
|
2020-06-03T11:47:11.000Z
|
2020-06-03T11:47:11.000Z
|
geometry.py
|
Gowpenful/watson_compute
|
7e15bd7b482bf342c66819ea09ae9832488e5e0e
|
[
"MIT"
] | null | null | null |
geometry.py
|
Gowpenful/watson_compute
|
7e15bd7b482bf342c66819ea09ae9832488e5e0e
|
[
"MIT"
] | null | null | null |
import sys
from termcolor import cprint
from colorama import init
from pyfiglet import figlet_format
import pyperclip
cprint(figlet_format('Geometry', font='small'), 'blue', attrs=['bold', 'blink'])
cprint('==============================================', 'white', attrs=['blink'])
cprint('Scientific Calculator v.0.0.0', 'blue', attrs=['bold'])
cprint('==============================================', 'white', attrs=['blink'])
print()
| 40.636364
| 83
| 0.550336
| 46
| 447
| 5.304348
| 0.521739
| 0.098361
| 0.106557
| 0.172131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007426
| 0.096197
| 447
| 11
| 84
| 40.636364
| 0.596535
| 0
| 0
| 0.2
| 0
| 0
| 0.399543
| 0.210046
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.6
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 4
|
86e753cdc1631885c966a27dc78ef60eaff0b0ea
| 79,943
|
py
|
Python
|
sdk/python/pulumi_aws/ecs/outputs.py
|
RafalSumislawski/pulumi-aws
|
7c8a335d327c173aa32c8b3d98816e760db329fa
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ecs/outputs.py
|
RafalSumislawski/pulumi-aws
|
7c8a335d327c173aa32c8b3d98816e760db329fa
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ecs/outputs.py
|
RafalSumislawski/pulumi-aws
|
7c8a335d327c173aa32c8b3d98816e760db329fa
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'CapacityProviderAutoScalingGroupProvider',
'CapacityProviderAutoScalingGroupProviderManagedScaling',
'ClusterConfiguration',
'ClusterConfigurationExecuteCommandConfiguration',
'ClusterConfigurationExecuteCommandConfigurationLogConfiguration',
'ClusterDefaultCapacityProviderStrategy',
'ClusterSetting',
'ServiceCapacityProviderStrategy',
'ServiceDeploymentCircuitBreaker',
'ServiceDeploymentController',
'ServiceLoadBalancer',
'ServiceNetworkConfiguration',
'ServiceOrderedPlacementStrategy',
'ServicePlacementConstraint',
'ServiceServiceRegistries',
'TaskDefinitionEphemeralStorage',
'TaskDefinitionInferenceAccelerator',
'TaskDefinitionPlacementConstraint',
'TaskDefinitionProxyConfiguration',
'TaskDefinitionRuntimePlatform',
'TaskDefinitionVolume',
'TaskDefinitionVolumeDockerVolumeConfiguration',
'TaskDefinitionVolumeEfsVolumeConfiguration',
'TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfig',
'TaskDefinitionVolumeFsxWindowsFileServerVolumeConfiguration',
'TaskDefinitionVolumeFsxWindowsFileServerVolumeConfigurationAuthorizationConfig',
'TaskSetCapacityProviderStrategy',
'TaskSetLoadBalancer',
'TaskSetNetworkConfiguration',
'TaskSetScale',
'TaskSetServiceRegistries',
'GetClusterSettingResult',
]
@pulumi.output_type
class CapacityProviderAutoScalingGroupProvider(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "autoScalingGroupArn":
suggest = "auto_scaling_group_arn"
elif key == "managedScaling":
suggest = "managed_scaling"
elif key == "managedTerminationProtection":
suggest = "managed_termination_protection"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CapacityProviderAutoScalingGroupProvider. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CapacityProviderAutoScalingGroupProvider.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CapacityProviderAutoScalingGroupProvider.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
auto_scaling_group_arn: str,
managed_scaling: Optional['outputs.CapacityProviderAutoScalingGroupProviderManagedScaling'] = None,
managed_termination_protection: Optional[str] = None):
"""
:param str auto_scaling_group_arn: - ARN of the associated auto scaling group.
:param 'CapacityProviderAutoScalingGroupProviderManagedScalingArgs' managed_scaling: - Configuration block defining the parameters of the auto scaling. Detailed below.
:param str managed_termination_protection: - Enables or disables container-aware termination of instances in the auto scaling group when scale-in happens. Valid values are `ENABLED` and `DISABLED`.
"""
pulumi.set(__self__, "auto_scaling_group_arn", auto_scaling_group_arn)
if managed_scaling is not None:
pulumi.set(__self__, "managed_scaling", managed_scaling)
if managed_termination_protection is not None:
pulumi.set(__self__, "managed_termination_protection", managed_termination_protection)
@property
@pulumi.getter(name="autoScalingGroupArn")
def auto_scaling_group_arn(self) -> str:
"""
- ARN of the associated auto scaling group.
"""
return pulumi.get(self, "auto_scaling_group_arn")
@property
@pulumi.getter(name="managedScaling")
def managed_scaling(self) -> Optional['outputs.CapacityProviderAutoScalingGroupProviderManagedScaling']:
"""
- Configuration block defining the parameters of the auto scaling. Detailed below.
"""
return pulumi.get(self, "managed_scaling")
@property
@pulumi.getter(name="managedTerminationProtection")
def managed_termination_protection(self) -> Optional[str]:
"""
- Enables or disables container-aware termination of instances in the auto scaling group when scale-in happens. Valid values are `ENABLED` and `DISABLED`.
"""
return pulumi.get(self, "managed_termination_protection")
@pulumi.output_type
class CapacityProviderAutoScalingGroupProviderManagedScaling(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "instanceWarmupPeriod":
suggest = "instance_warmup_period"
elif key == "maximumScalingStepSize":
suggest = "maximum_scaling_step_size"
elif key == "minimumScalingStepSize":
suggest = "minimum_scaling_step_size"
elif key == "targetCapacity":
suggest = "target_capacity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CapacityProviderAutoScalingGroupProviderManagedScaling. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CapacityProviderAutoScalingGroupProviderManagedScaling.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CapacityProviderAutoScalingGroupProviderManagedScaling.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
instance_warmup_period: Optional[int] = None,
maximum_scaling_step_size: Optional[int] = None,
minimum_scaling_step_size: Optional[int] = None,
status: Optional[str] = None,
target_capacity: Optional[int] = None):
"""
:param int instance_warmup_period: Period of time, in seconds, after a newly launched Amazon EC2 instance can contribute to CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value of 300 seconds is used.
:param int maximum_scaling_step_size: Maximum step adjustment size. A number between 1 and 10,000.
:param int minimum_scaling_step_size: Minimum step adjustment size. A number between 1 and 10,000.
:param str status: Whether auto scaling is managed by ECS. Valid values are `ENABLED` and `DISABLED`.
:param int target_capacity: Target utilization for the capacity provider. A number between 1 and 100.
"""
if instance_warmup_period is not None:
pulumi.set(__self__, "instance_warmup_period", instance_warmup_period)
if maximum_scaling_step_size is not None:
pulumi.set(__self__, "maximum_scaling_step_size", maximum_scaling_step_size)
if minimum_scaling_step_size is not None:
pulumi.set(__self__, "minimum_scaling_step_size", minimum_scaling_step_size)
if status is not None:
pulumi.set(__self__, "status", status)
if target_capacity is not None:
pulumi.set(__self__, "target_capacity", target_capacity)
@property
@pulumi.getter(name="instanceWarmupPeriod")
def instance_warmup_period(self) -> Optional[int]:
"""
Period of time, in seconds, after a newly launched Amazon EC2 instance can contribute to CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value of 300 seconds is used.
"""
return pulumi.get(self, "instance_warmup_period")
@property
@pulumi.getter(name="maximumScalingStepSize")
def maximum_scaling_step_size(self) -> Optional[int]:
"""
Maximum step adjustment size. A number between 1 and 10,000.
"""
return pulumi.get(self, "maximum_scaling_step_size")
@property
@pulumi.getter(name="minimumScalingStepSize")
def minimum_scaling_step_size(self) -> Optional[int]:
"""
Minimum step adjustment size. A number between 1 and 10,000.
"""
return pulumi.get(self, "minimum_scaling_step_size")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Whether auto scaling is managed by ECS. Valid values are `ENABLED` and `DISABLED`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="targetCapacity")
def target_capacity(self) -> Optional[int]:
"""
Target utilization for the capacity provider. A number between 1 and 100.
"""
return pulumi.get(self, "target_capacity")
@pulumi.output_type
class ClusterConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "executeCommandConfiguration":
suggest = "execute_command_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
execute_command_configuration: Optional['outputs.ClusterConfigurationExecuteCommandConfiguration'] = None):
"""
:param 'ClusterConfigurationExecuteCommandConfigurationArgs' execute_command_configuration: The details of the execute command configuration. Detailed below.
"""
if execute_command_configuration is not None:
pulumi.set(__self__, "execute_command_configuration", execute_command_configuration)
@property
@pulumi.getter(name="executeCommandConfiguration")
def execute_command_configuration(self) -> Optional['outputs.ClusterConfigurationExecuteCommandConfiguration']:
"""
The details of the execute command configuration. Detailed below.
"""
return pulumi.get(self, "execute_command_configuration")
@pulumi.output_type
class ClusterConfigurationExecuteCommandConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "kmsKeyId":
suggest = "kms_key_id"
elif key == "logConfiguration":
suggest = "log_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterConfigurationExecuteCommandConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterConfigurationExecuteCommandConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterConfigurationExecuteCommandConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
kms_key_id: Optional[str] = None,
log_configuration: Optional['outputs.ClusterConfigurationExecuteCommandConfigurationLogConfiguration'] = None,
logging: Optional[str] = None):
"""
:param str kms_key_id: The AWS Key Management Service key ID to encrypt the data between the local client and the container.
:param 'ClusterConfigurationExecuteCommandConfigurationLogConfigurationArgs' log_configuration: The log configuration for the results of the execute command actions Required when `logging` is `OVERRIDE`. Detailed below.
:param str logging: The log setting to use for redirecting logs for your execute command results. Valid values are `NONE`, `DEFAULT`, and `OVERRIDE`.
"""
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if log_configuration is not None:
pulumi.set(__self__, "log_configuration", log_configuration)
if logging is not None:
pulumi.set(__self__, "logging", logging)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[str]:
"""
The AWS Key Management Service key ID to encrypt the data between the local client and the container.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="logConfiguration")
def log_configuration(self) -> Optional['outputs.ClusterConfigurationExecuteCommandConfigurationLogConfiguration']:
"""
The log configuration for the results of the execute command actions Required when `logging` is `OVERRIDE`. Detailed below.
"""
return pulumi.get(self, "log_configuration")
@property
@pulumi.getter
def logging(self) -> Optional[str]:
"""
The log setting to use for redirecting logs for your execute command results. Valid values are `NONE`, `DEFAULT`, and `OVERRIDE`.
"""
return pulumi.get(self, "logging")
@pulumi.output_type
class ClusterConfigurationExecuteCommandConfigurationLogConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cloudWatchEncryptionEnabled":
suggest = "cloud_watch_encryption_enabled"
elif key == "cloudWatchLogGroupName":
suggest = "cloud_watch_log_group_name"
elif key == "s3BucketEncryptionEnabled":
suggest = "s3_bucket_encryption_enabled"
elif key == "s3BucketName":
suggest = "s3_bucket_name"
elif key == "s3KeyPrefix":
suggest = "s3_key_prefix"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterConfigurationExecuteCommandConfigurationLogConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterConfigurationExecuteCommandConfigurationLogConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterConfigurationExecuteCommandConfigurationLogConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cloud_watch_encryption_enabled: Optional[bool] = None,
cloud_watch_log_group_name: Optional[str] = None,
s3_bucket_encryption_enabled: Optional[bool] = None,
s3_bucket_name: Optional[str] = None,
s3_key_prefix: Optional[str] = None):
"""
:param bool cloud_watch_encryption_enabled: Whether or not to enable encryption on the CloudWatch logs. If not specified, encryption will be disabled.
:param str cloud_watch_log_group_name: The name of the CloudWatch log group to send logs to.
:param bool s3_bucket_encryption_enabled: Whether or not to enable encryption on the logs sent to S3. If not specified, encryption will be disabled.
:param str s3_bucket_name: The name of the S3 bucket to send logs to.
:param str s3_key_prefix: An optional folder in the S3 bucket to place logs in.
"""
if cloud_watch_encryption_enabled is not None:
pulumi.set(__self__, "cloud_watch_encryption_enabled", cloud_watch_encryption_enabled)
if cloud_watch_log_group_name is not None:
pulumi.set(__self__, "cloud_watch_log_group_name", cloud_watch_log_group_name)
if s3_bucket_encryption_enabled is not None:
pulumi.set(__self__, "s3_bucket_encryption_enabled", s3_bucket_encryption_enabled)
if s3_bucket_name is not None:
pulumi.set(__self__, "s3_bucket_name", s3_bucket_name)
if s3_key_prefix is not None:
pulumi.set(__self__, "s3_key_prefix", s3_key_prefix)
@property
@pulumi.getter(name="cloudWatchEncryptionEnabled")
def cloud_watch_encryption_enabled(self) -> Optional[bool]:
"""
Whether or not to enable encryption on the CloudWatch logs. If not specified, encryption will be disabled.
"""
return pulumi.get(self, "cloud_watch_encryption_enabled")
@property
@pulumi.getter(name="cloudWatchLogGroupName")
def cloud_watch_log_group_name(self) -> Optional[str]:
"""
The name of the CloudWatch log group to send logs to.
"""
return pulumi.get(self, "cloud_watch_log_group_name")
@property
@pulumi.getter(name="s3BucketEncryptionEnabled")
def s3_bucket_encryption_enabled(self) -> Optional[bool]:
"""
Whether or not to enable encryption on the logs sent to S3. If not specified, encryption will be disabled.
"""
return pulumi.get(self, "s3_bucket_encryption_enabled")
@property
@pulumi.getter(name="s3BucketName")
def s3_bucket_name(self) -> Optional[str]:
"""
The name of the S3 bucket to send logs to.
"""
return pulumi.get(self, "s3_bucket_name")
@property
@pulumi.getter(name="s3KeyPrefix")
def s3_key_prefix(self) -> Optional[str]:
"""
An optional folder in the S3 bucket to place logs in.
"""
return pulumi.get(self, "s3_key_prefix")
@pulumi.output_type
class ClusterDefaultCapacityProviderStrategy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "capacityProvider":
suggest = "capacity_provider"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterDefaultCapacityProviderStrategy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterDefaultCapacityProviderStrategy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterDefaultCapacityProviderStrategy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
capacity_provider: str,
base: Optional[int] = None,
weight: Optional[int] = None):
"""
:param str capacity_provider: The short name of the capacity provider.
:param int base: The number of tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined.
:param int weight: The relative percentage of the total number of launched tasks that should use the specified capacity provider.
"""
pulumi.set(__self__, "capacity_provider", capacity_provider)
if base is not None:
pulumi.set(__self__, "base", base)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="capacityProvider")
def capacity_provider(self) -> str:
"""
The short name of the capacity provider.
"""
return pulumi.get(self, "capacity_provider")
@property
@pulumi.getter
def base(self) -> Optional[int]:
"""
The number of tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined.
"""
return pulumi.get(self, "base")
@property
@pulumi.getter
def weight(self) -> Optional[int]:
"""
The relative percentage of the total number of launched tasks that should use the specified capacity provider.
"""
return pulumi.get(self, "weight")
@pulumi.output_type
class ClusterSetting(dict):
def __init__(__self__, *,
name: str,
value: str):
"""
:param str name: Name of the setting to manage. Valid values: `containerInsights`.
:param str value: The value to assign to the setting. Value values are `enabled` and `disabled`.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the setting to manage. Valid values: `containerInsights`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> str:
"""
The value to assign to the setting. Value values are `enabled` and `disabled`.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class ServiceCapacityProviderStrategy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "capacityProvider":
suggest = "capacity_provider"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServiceCapacityProviderStrategy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServiceCapacityProviderStrategy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServiceCapacityProviderStrategy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
capacity_provider: str,
base: Optional[int] = None,
weight: Optional[int] = None):
"""
:param str capacity_provider: Short name of the capacity provider.
:param int base: Number of tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined.
:param int weight: Relative percentage of the total number of launched tasks that should use the specified capacity provider.
"""
pulumi.set(__self__, "capacity_provider", capacity_provider)
if base is not None:
pulumi.set(__self__, "base", base)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="capacityProvider")
def capacity_provider(self) -> str:
"""
Short name of the capacity provider.
"""
return pulumi.get(self, "capacity_provider")
@property
@pulumi.getter
def base(self) -> Optional[int]:
"""
Number of tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined.
"""
return pulumi.get(self, "base")
@property
@pulumi.getter
def weight(self) -> Optional[int]:
"""
Relative percentage of the total number of launched tasks that should use the specified capacity provider.
"""
return pulumi.get(self, "weight")
@pulumi.output_type
class ServiceDeploymentCircuitBreaker(dict):
def __init__(__self__, *,
enable: bool,
rollback: bool):
"""
:param bool enable: Whether to enable the deployment circuit breaker logic for the service.
:param bool rollback: Whether to enable Amazon ECS to roll back the service if a service deployment fails. If rollback is enabled, when a service deployment fails, the service is rolled back to the last deployment that completed successfully.
"""
pulumi.set(__self__, "enable", enable)
pulumi.set(__self__, "rollback", rollback)
@property
@pulumi.getter
def enable(self) -> bool:
"""
Whether to enable the deployment circuit breaker logic for the service.
"""
return pulumi.get(self, "enable")
@property
@pulumi.getter
def rollback(self) -> bool:
"""
Whether to enable Amazon ECS to roll back the service if a service deployment fails. If rollback is enabled, when a service deployment fails, the service is rolled back to the last deployment that completed successfully.
"""
return pulumi.get(self, "rollback")
@pulumi.output_type
class ServiceDeploymentController(dict):
def __init__(__self__, *,
type: Optional[str] = None):
"""
:param str type: Type of deployment controller. Valid values: `CODE_DEPLOY`, `ECS`, `EXTERNAL`. Default: `ECS`.
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of deployment controller. Valid values: `CODE_DEPLOY`, `ECS`, `EXTERNAL`. Default: `ECS`.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class ServiceLoadBalancer(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "containerName":
suggest = "container_name"
elif key == "containerPort":
suggest = "container_port"
elif key == "elbName":
suggest = "elb_name"
elif key == "targetGroupArn":
suggest = "target_group_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServiceLoadBalancer. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServiceLoadBalancer.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServiceLoadBalancer.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
container_name: str,
container_port: int,
elb_name: Optional[str] = None,
target_group_arn: Optional[str] = None):
"""
:param str container_name: Name of the container to associate with the load balancer (as it appears in a container definition).
:param int container_port: Port on the container to associate with the load balancer.
:param str elb_name: Name of the ELB (Classic) to associate with the service.
:param str target_group_arn: ARN of the Load Balancer target group to associate with the service.
"""
pulumi.set(__self__, "container_name", container_name)
pulumi.set(__self__, "container_port", container_port)
if elb_name is not None:
pulumi.set(__self__, "elb_name", elb_name)
if target_group_arn is not None:
pulumi.set(__self__, "target_group_arn", target_group_arn)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> str:
"""
Name of the container to associate with the load balancer (as it appears in a container definition).
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="containerPort")
def container_port(self) -> int:
"""
Port on the container to associate with the load balancer.
"""
return pulumi.get(self, "container_port")
@property
@pulumi.getter(name="elbName")
def elb_name(self) -> Optional[str]:
"""
Name of the ELB (Classic) to associate with the service.
"""
return pulumi.get(self, "elb_name")
@property
@pulumi.getter(name="targetGroupArn")
def target_group_arn(self) -> Optional[str]:
"""
ARN of the Load Balancer target group to associate with the service.
"""
return pulumi.get(self, "target_group_arn")
@pulumi.output_type
class ServiceNetworkConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "assignPublicIp":
suggest = "assign_public_ip"
elif key == "securityGroups":
suggest = "security_groups"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServiceNetworkConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServiceNetworkConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServiceNetworkConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
subnets: Sequence[str],
assign_public_ip: Optional[bool] = None,
security_groups: Optional[Sequence[str]] = None):
"""
:param Sequence[str] subnets: Subnets associated with the task or service.
:param bool assign_public_ip: Assign a public IP address to the ENI (Fargate launch type only). Valid values are `true` or `false`. Default `false`.
:param Sequence[str] security_groups: Security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used.
"""
pulumi.set(__self__, "subnets", subnets)
if assign_public_ip is not None:
pulumi.set(__self__, "assign_public_ip", assign_public_ip)
if security_groups is not None:
pulumi.set(__self__, "security_groups", security_groups)
@property
@pulumi.getter
def subnets(self) -> Sequence[str]:
"""
Subnets associated with the task or service.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter(name="assignPublicIp")
def assign_public_ip(self) -> Optional[bool]:
"""
Assign a public IP address to the ENI (Fargate launch type only). Valid values are `true` or `false`. Default `false`.
"""
return pulumi.get(self, "assign_public_ip")
@property
@pulumi.getter(name="securityGroups")
def security_groups(self) -> Optional[Sequence[str]]:
"""
Security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used.
"""
return pulumi.get(self, "security_groups")
@pulumi.output_type
class ServiceOrderedPlacementStrategy(dict):
def __init__(__self__, *,
type: str,
field: Optional[str] = None):
"""
:param str type: Type of placement strategy. Must be one of: `binpack`, `random`, or `spread`
:param str field: For the `spread` placement strategy, valid values are `instanceId` (or `host`,
which has the same effect), or any platform or custom attribute that is applied to a container instance.
For the `binpack` type, valid values are `memory` and `cpu`. For the `random` type, this attribute is not
needed. For more information, see [Placement Strategy](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PlacementStrategy.html).
"""
pulumi.set(__self__, "type", type)
if field is not None:
pulumi.set(__self__, "field", field)
@property
@pulumi.getter
def type(self) -> str:
"""
Type of placement strategy. Must be one of: `binpack`, `random`, or `spread`
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def field(self) -> Optional[str]:
"""
For the `spread` placement strategy, valid values are `instanceId` (or `host`,
which has the same effect), or any platform or custom attribute that is applied to a container instance.
For the `binpack` type, valid values are `memory` and `cpu`. For the `random` type, this attribute is not
needed. For more information, see [Placement Strategy](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PlacementStrategy.html).
"""
return pulumi.get(self, "field")
@pulumi.output_type
class ServicePlacementConstraint(dict):
def __init__(__self__, *,
type: str,
expression: Optional[str] = None):
"""
:param str type: Type of constraint. The only valid values at this time are `memberOf` and `distinctInstance`.
:param str expression: Cluster Query Language expression to apply to the constraint. Does not need to be specified for the `distinctInstance` type. For more information, see [Cluster Query Language in the Amazon EC2 Container Service Developer Guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html).
"""
pulumi.set(__self__, "type", type)
if expression is not None:
pulumi.set(__self__, "expression", expression)
@property
@pulumi.getter
def type(self) -> str:
"""
Type of constraint. The only valid values at this time are `memberOf` and `distinctInstance`.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def expression(self) -> Optional[str]:
"""
Cluster Query Language expression to apply to the constraint. Does not need to be specified for the `distinctInstance` type. For more information, see [Cluster Query Language in the Amazon EC2 Container Service Developer Guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html).
"""
return pulumi.get(self, "expression")
@pulumi.output_type
class ServiceServiceRegistries(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "registryArn":
suggest = "registry_arn"
elif key == "containerName":
suggest = "container_name"
elif key == "containerPort":
suggest = "container_port"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServiceServiceRegistries. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServiceServiceRegistries.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServiceServiceRegistries.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
registry_arn: str,
container_name: Optional[str] = None,
container_port: Optional[int] = None,
port: Optional[int] = None):
"""
:param str registry_arn: ARN of the Service Registry. The currently supported service registry is Amazon Route 53 Auto Naming Service(`servicediscovery.Service`). For more information, see [Service](https://docs.aws.amazon.com/Route53/latest/APIReference/API_autonaming_Service.html)
:param str container_name: Container name value, already specified in the task definition, to be used for your service discovery service.
:param int container_port: Port value, already specified in the task definition, to be used for your service discovery service.
:param int port: Port value used if your Service Discovery service specified an SRV record.
"""
pulumi.set(__self__, "registry_arn", registry_arn)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if container_port is not None:
pulumi.set(__self__, "container_port", container_port)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter(name="registryArn")
def registry_arn(self) -> str:
"""
ARN of the Service Registry. The currently supported service registry is Amazon Route 53 Auto Naming Service(`servicediscovery.Service`). For more information, see [Service](https://docs.aws.amazon.com/Route53/latest/APIReference/API_autonaming_Service.html)
"""
return pulumi.get(self, "registry_arn")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
Container name value, already specified in the task definition, to be used for your service discovery service.
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="containerPort")
def container_port(self) -> Optional[int]:
"""
Port value, already specified in the task definition, to be used for your service discovery service.
"""
return pulumi.get(self, "container_port")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
Port value used if your Service Discovery service specified an SRV record.
"""
return pulumi.get(self, "port")
@pulumi.output_type
class TaskDefinitionEphemeralStorage(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sizeInGib":
suggest = "size_in_gib"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskDefinitionEphemeralStorage. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskDefinitionEphemeralStorage.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskDefinitionEphemeralStorage.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
size_in_gib: int):
"""
:param int size_in_gib: The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `21` GiB and the maximum supported value is `200` GiB.
"""
pulumi.set(__self__, "size_in_gib", size_in_gib)
@property
@pulumi.getter(name="sizeInGib")
def size_in_gib(self) -> int:
"""
The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `21` GiB and the maximum supported value is `200` GiB.
"""
return pulumi.get(self, "size_in_gib")
@pulumi.output_type
class TaskDefinitionInferenceAccelerator(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "deviceName":
suggest = "device_name"
elif key == "deviceType":
suggest = "device_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskDefinitionInferenceAccelerator. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskDefinitionInferenceAccelerator.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskDefinitionInferenceAccelerator.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
device_name: str,
device_type: str):
"""
:param str device_name: Elastic Inference accelerator device name. The deviceName must also be referenced in a container definition as a ResourceRequirement.
:param str device_type: Elastic Inference accelerator type to use.
"""
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "device_type", device_type)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> str:
"""
Elastic Inference accelerator device name. The deviceName must also be referenced in a container definition as a ResourceRequirement.
"""
return pulumi.get(self, "device_name")
@property
@pulumi.getter(name="deviceType")
def device_type(self) -> str:
"""
Elastic Inference accelerator type to use.
"""
return pulumi.get(self, "device_type")
@pulumi.output_type
class TaskDefinitionPlacementConstraint(dict):
def __init__(__self__, *,
type: str,
expression: Optional[str] = None):
"""
:param str type: Proxy type. The default value is `APPMESH`. The only supported value is `APPMESH`.
:param str expression: Cluster Query Language expression to apply to the constraint. For more information, see [Cluster Query Language in the Amazon EC2 Container Service Developer Guide](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html).
"""
pulumi.set(__self__, "type", type)
if expression is not None:
pulumi.set(__self__, "expression", expression)
@property
@pulumi.getter
def type(self) -> str:
"""
Proxy type. The default value is `APPMESH`. The only supported value is `APPMESH`.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def expression(self) -> Optional[str]:
"""
Cluster Query Language expression to apply to the constraint. For more information, see [Cluster Query Language in the Amazon EC2 Container Service Developer Guide](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html).
"""
return pulumi.get(self, "expression")
@pulumi.output_type
class TaskDefinitionProxyConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "containerName":
suggest = "container_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskDefinitionProxyConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskDefinitionProxyConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskDefinitionProxyConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
container_name: str,
properties: Optional[Mapping[str, str]] = None,
type: Optional[str] = None):
"""
:param str container_name: Name of the container that will serve as the App Mesh proxy.
:param Mapping[str, str] properties: Set of network configuration parameters to provide the Container Network Interface (CNI) plugin, specified a key-value mapping.
:param str type: Proxy type. The default value is `APPMESH`. The only supported value is `APPMESH`.
"""
pulumi.set(__self__, "container_name", container_name)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> str:
"""
Name of the container that will serve as the App Mesh proxy.
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter
def properties(self) -> Optional[Mapping[str, str]]:
"""
Set of network configuration parameters to provide the Container Network Interface (CNI) plugin, specified a key-value mapping.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Proxy type. The default value is `APPMESH`. The only supported value is `APPMESH`.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class TaskDefinitionRuntimePlatform(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cpuArchitecture":
suggest = "cpu_architecture"
elif key == "operatingSystemFamily":
suggest = "operating_system_family"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskDefinitionRuntimePlatform. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskDefinitionRuntimePlatform.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskDefinitionRuntimePlatform.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cpu_architecture: Optional[str] = None,
operating_system_family: Optional[str] = None):
"""
:param str cpu_architecture: Must be set to either `X86_64` or `ARM64`; see [cpu architecture](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#runtime-platform)
:param str operating_system_family: If the `requires_compatibilities` is `FARGATE` this field is required; must be set to a valid option from the [operating system family in the runtime platform](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#runtime-platform) setting
"""
if cpu_architecture is not None:
pulumi.set(__self__, "cpu_architecture", cpu_architecture)
if operating_system_family is not None:
pulumi.set(__self__, "operating_system_family", operating_system_family)
@property
@pulumi.getter(name="cpuArchitecture")
def cpu_architecture(self) -> Optional[str]:
"""
Must be set to either `X86_64` or `ARM64`; see [cpu architecture](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#runtime-platform)
"""
return pulumi.get(self, "cpu_architecture")
@property
@pulumi.getter(name="operatingSystemFamily")
def operating_system_family(self) -> Optional[str]:
"""
If the `requires_compatibilities` is `FARGATE` this field is required; must be set to a valid option from the [operating system family in the runtime platform](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#runtime-platform) setting
"""
return pulumi.get(self, "operating_system_family")
@pulumi.output_type
class TaskDefinitionVolume(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dockerVolumeConfiguration":
suggest = "docker_volume_configuration"
elif key == "efsVolumeConfiguration":
suggest = "efs_volume_configuration"
elif key == "fsxWindowsFileServerVolumeConfiguration":
suggest = "fsx_windows_file_server_volume_configuration"
elif key == "hostPath":
suggest = "host_path"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskDefinitionVolume. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskDefinitionVolume.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskDefinitionVolume.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
docker_volume_configuration: Optional['outputs.TaskDefinitionVolumeDockerVolumeConfiguration'] = None,
efs_volume_configuration: Optional['outputs.TaskDefinitionVolumeEfsVolumeConfiguration'] = None,
fsx_windows_file_server_volume_configuration: Optional['outputs.TaskDefinitionVolumeFsxWindowsFileServerVolumeConfiguration'] = None,
host_path: Optional[str] = None):
"""
:param str name: Name of the volume. This name is referenced in the `sourceVolume`
parameter of container definition in the `mountPoints` section.
:param 'TaskDefinitionVolumeDockerVolumeConfigurationArgs' docker_volume_configuration: Configuration block to configure a docker volume. Detailed below.
:param 'TaskDefinitionVolumeEfsVolumeConfigurationArgs' efs_volume_configuration: Configuration block for an EFS volume. Detailed below.
:param 'TaskDefinitionVolumeFsxWindowsFileServerVolumeConfigurationArgs' fsx_windows_file_server_volume_configuration: Configuration block for an FSX Windows File Server volume. Detailed below.
:param str host_path: Path on the host container instance that is presented to the container. If not set, ECS will create a nonpersistent data volume that starts empty and is deleted after the task has finished.
"""
pulumi.set(__self__, "name", name)
if docker_volume_configuration is not None:
pulumi.set(__self__, "docker_volume_configuration", docker_volume_configuration)
if efs_volume_configuration is not None:
pulumi.set(__self__, "efs_volume_configuration", efs_volume_configuration)
if fsx_windows_file_server_volume_configuration is not None:
pulumi.set(__self__, "fsx_windows_file_server_volume_configuration", fsx_windows_file_server_volume_configuration)
if host_path is not None:
pulumi.set(__self__, "host_path", host_path)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the volume. This name is referenced in the `sourceVolume`
parameter of container definition in the `mountPoints` section.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="dockerVolumeConfiguration")
def docker_volume_configuration(self) -> Optional['outputs.TaskDefinitionVolumeDockerVolumeConfiguration']:
"""
Configuration block to configure a docker volume. Detailed below.
"""
return pulumi.get(self, "docker_volume_configuration")
@property
@pulumi.getter(name="efsVolumeConfiguration")
def efs_volume_configuration(self) -> Optional['outputs.TaskDefinitionVolumeEfsVolumeConfiguration']:
"""
Configuration block for an EFS volume. Detailed below.
"""
return pulumi.get(self, "efs_volume_configuration")
@property
@pulumi.getter(name="fsxWindowsFileServerVolumeConfiguration")
def fsx_windows_file_server_volume_configuration(self) -> Optional['outputs.TaskDefinitionVolumeFsxWindowsFileServerVolumeConfiguration']:
"""
Configuration block for an FSX Windows File Server volume. Detailed below.
"""
return pulumi.get(self, "fsx_windows_file_server_volume_configuration")
@property
@pulumi.getter(name="hostPath")
def host_path(self) -> Optional[str]:
"""
Path on the host container instance that is presented to the container. If not set, ECS will create a nonpersistent data volume that starts empty and is deleted after the task has finished.
"""
return pulumi.get(self, "host_path")
@pulumi.output_type
class TaskDefinitionVolumeDockerVolumeConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "driverOpts":
suggest = "driver_opts"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskDefinitionVolumeDockerVolumeConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskDefinitionVolumeDockerVolumeConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskDefinitionVolumeDockerVolumeConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
autoprovision: Optional[bool] = None,
driver: Optional[str] = None,
driver_opts: Optional[Mapping[str, str]] = None,
labels: Optional[Mapping[str, str]] = None,
scope: Optional[str] = None):
"""
:param bool autoprovision: If this value is `true`, the Docker volume is created if it does not already exist. *Note*: This field is only used if the scope is `shared`.
:param str driver: Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement.
:param Mapping[str, str] driver_opts: Map of Docker driver specific options.
:param Mapping[str, str] labels: Map of custom metadata to add to your Docker volume.
:param str scope: Scope for the Docker volume, which determines its lifecycle, either `task` or `shared`. Docker volumes that are scoped to a `task` are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are scoped as `shared` persist after the task stops.
"""
if autoprovision is not None:
pulumi.set(__self__, "autoprovision", autoprovision)
if driver is not None:
pulumi.set(__self__, "driver", driver)
if driver_opts is not None:
pulumi.set(__self__, "driver_opts", driver_opts)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if scope is not None:
pulumi.set(__self__, "scope", scope)
@property
@pulumi.getter
def autoprovision(self) -> Optional[bool]:
"""
If this value is `true`, the Docker volume is created if it does not already exist. *Note*: This field is only used if the scope is `shared`.
"""
return pulumi.get(self, "autoprovision")
@property
@pulumi.getter
def driver(self) -> Optional[str]:
"""
Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement.
"""
return pulumi.get(self, "driver")
@property
@pulumi.getter(name="driverOpts")
def driver_opts(self) -> Optional[Mapping[str, str]]:
"""
Map of Docker driver specific options.
"""
return pulumi.get(self, "driver_opts")
@property
@pulumi.getter
def labels(self) -> Optional[Mapping[str, str]]:
"""
Map of custom metadata to add to your Docker volume.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
"""
Scope for the Docker volume, which determines its lifecycle, either `task` or `shared`. Docker volumes that are scoped to a `task` are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are scoped as `shared` persist after the task stops.
"""
return pulumi.get(self, "scope")
@pulumi.output_type
class TaskDefinitionVolumeEfsVolumeConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "fileSystemId":
suggest = "file_system_id"
elif key == "authorizationConfig":
suggest = "authorization_config"
elif key == "rootDirectory":
suggest = "root_directory"
elif key == "transitEncryption":
suggest = "transit_encryption"
elif key == "transitEncryptionPort":
suggest = "transit_encryption_port"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskDefinitionVolumeEfsVolumeConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskDefinitionVolumeEfsVolumeConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskDefinitionVolumeEfsVolumeConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
file_system_id: str,
authorization_config: Optional['outputs.TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfig'] = None,
root_directory: Optional[str] = None,
transit_encryption: Optional[str] = None,
transit_encryption_port: Optional[int] = None):
"""
:param str file_system_id: The Amazon FSx for Windows File Server file system ID to use.
:param 'TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfigArgs' authorization_config: Configuration block for authorization for the Amazon FSx for Windows File Server file system detailed below.
:param str root_directory: The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host.
:param str transit_encryption: Whether or not to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. Valid values: `ENABLED`, `DISABLED`. If this parameter is omitted, the default value of `DISABLED` is used.
:param int transit_encryption_port: Port to use for transit encryption. If you do not specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount helper uses.
"""
pulumi.set(__self__, "file_system_id", file_system_id)
if authorization_config is not None:
pulumi.set(__self__, "authorization_config", authorization_config)
if root_directory is not None:
pulumi.set(__self__, "root_directory", root_directory)
if transit_encryption is not None:
pulumi.set(__self__, "transit_encryption", transit_encryption)
if transit_encryption_port is not None:
pulumi.set(__self__, "transit_encryption_port", transit_encryption_port)
@property
@pulumi.getter(name="fileSystemId")
def file_system_id(self) -> str:
"""
The Amazon FSx for Windows File Server file system ID to use.
"""
return pulumi.get(self, "file_system_id")
@property
@pulumi.getter(name="authorizationConfig")
def authorization_config(self) -> Optional['outputs.TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfig']:
"""
Configuration block for authorization for the Amazon FSx for Windows File Server file system detailed below.
"""
return pulumi.get(self, "authorization_config")
@property
@pulumi.getter(name="rootDirectory")
def root_directory(self) -> Optional[str]:
"""
The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host.
"""
return pulumi.get(self, "root_directory")
@property
@pulumi.getter(name="transitEncryption")
def transit_encryption(self) -> Optional[str]:
"""
Whether or not to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. Valid values: `ENABLED`, `DISABLED`. If this parameter is omitted, the default value of `DISABLED` is used.
"""
return pulumi.get(self, "transit_encryption")
@property
@pulumi.getter(name="transitEncryptionPort")
def transit_encryption_port(self) -> Optional[int]:
"""
Port to use for transit encryption. If you do not specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount helper uses.
"""
return pulumi.get(self, "transit_encryption_port")
@pulumi.output_type
class TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accessPointId":
suggest = "access_point_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
access_point_id: Optional[str] = None,
iam: Optional[str] = None):
"""
:param str access_point_id: Access point ID to use. If an access point is specified, the root directory value will be relative to the directory set for the access point. If specified, transit encryption must be enabled in the EFSVolumeConfiguration.
:param str iam: Whether or not to use the Amazon ECS task IAM role defined in a task definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the EFSVolumeConfiguration. Valid values: `ENABLED`, `DISABLED`. If this parameter is omitted, the default value of `DISABLED` is used.
"""
if access_point_id is not None:
pulumi.set(__self__, "access_point_id", access_point_id)
if iam is not None:
pulumi.set(__self__, "iam", iam)
@property
@pulumi.getter(name="accessPointId")
def access_point_id(self) -> Optional[str]:
"""
Access point ID to use. If an access point is specified, the root directory value will be relative to the directory set for the access point. If specified, transit encryption must be enabled in the EFSVolumeConfiguration.
"""
return pulumi.get(self, "access_point_id")
@property
@pulumi.getter
def iam(self) -> Optional[str]:
"""
Whether or not to use the Amazon ECS task IAM role defined in a task definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the EFSVolumeConfiguration. Valid values: `ENABLED`, `DISABLED`. If this parameter is omitted, the default value of `DISABLED` is used.
"""
return pulumi.get(self, "iam")
@pulumi.output_type
class TaskDefinitionVolumeFsxWindowsFileServerVolumeConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authorizationConfig":
suggest = "authorization_config"
elif key == "fileSystemId":
suggest = "file_system_id"
elif key == "rootDirectory":
suggest = "root_directory"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskDefinitionVolumeFsxWindowsFileServerVolumeConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskDefinitionVolumeFsxWindowsFileServerVolumeConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskDefinitionVolumeFsxWindowsFileServerVolumeConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
authorization_config: 'outputs.TaskDefinitionVolumeFsxWindowsFileServerVolumeConfigurationAuthorizationConfig',
file_system_id: str,
root_directory: str):
"""
:param 'TaskDefinitionVolumeFsxWindowsFileServerVolumeConfigurationAuthorizationConfigArgs' authorization_config: Configuration block for authorization for the Amazon FSx for Windows File Server file system detailed below.
:param str file_system_id: The Amazon FSx for Windows File Server file system ID to use.
:param str root_directory: The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host.
"""
pulumi.set(__self__, "authorization_config", authorization_config)
pulumi.set(__self__, "file_system_id", file_system_id)
pulumi.set(__self__, "root_directory", root_directory)
@property
@pulumi.getter(name="authorizationConfig")
def authorization_config(self) -> 'outputs.TaskDefinitionVolumeFsxWindowsFileServerVolumeConfigurationAuthorizationConfig':
"""
Configuration block for authorization for the Amazon FSx for Windows File Server file system detailed below.
"""
return pulumi.get(self, "authorization_config")
@property
@pulumi.getter(name="fileSystemId")
def file_system_id(self) -> str:
"""
The Amazon FSx for Windows File Server file system ID to use.
"""
return pulumi.get(self, "file_system_id")
@property
@pulumi.getter(name="rootDirectory")
def root_directory(self) -> str:
"""
The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host.
"""
return pulumi.get(self, "root_directory")
@pulumi.output_type
class TaskDefinitionVolumeFsxWindowsFileServerVolumeConfigurationAuthorizationConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "credentialsParameter":
suggest = "credentials_parameter"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskDefinitionVolumeFsxWindowsFileServerVolumeConfigurationAuthorizationConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskDefinitionVolumeFsxWindowsFileServerVolumeConfigurationAuthorizationConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskDefinitionVolumeFsxWindowsFileServerVolumeConfigurationAuthorizationConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
credentials_parameter: str,
domain: str):
"""
:param str credentials_parameter: The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or AWS Systems Manager Parameter Store parameter. The ARNs refer to the stored credentials.
:param str domain: A fully qualified domain name hosted by an AWS Directory Service Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2.
"""
pulumi.set(__self__, "credentials_parameter", credentials_parameter)
pulumi.set(__self__, "domain", domain)
@property
@pulumi.getter(name="credentialsParameter")
def credentials_parameter(self) -> str:
"""
The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or AWS Systems Manager Parameter Store parameter. The ARNs refer to the stored credentials.
"""
return pulumi.get(self, "credentials_parameter")
@property
@pulumi.getter
def domain(self) -> str:
"""
A fully qualified domain name hosted by an AWS Directory Service Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2.
"""
return pulumi.get(self, "domain")
@pulumi.output_type
class TaskSetCapacityProviderStrategy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "capacityProvider":
suggest = "capacity_provider"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskSetCapacityProviderStrategy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskSetCapacityProviderStrategy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskSetCapacityProviderStrategy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
capacity_provider: str,
weight: int,
base: Optional[int] = None):
"""
:param str capacity_provider: The short name or full Amazon Resource Name (ARN) of the capacity provider.
:param int weight: The relative percentage of the total number of launched tasks that should use the specified capacity provider.
:param int base: The number of tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined.
"""
pulumi.set(__self__, "capacity_provider", capacity_provider)
pulumi.set(__self__, "weight", weight)
if base is not None:
pulumi.set(__self__, "base", base)
@property
@pulumi.getter(name="capacityProvider")
def capacity_provider(self) -> str:
"""
The short name or full Amazon Resource Name (ARN) of the capacity provider.
"""
return pulumi.get(self, "capacity_provider")
@property
@pulumi.getter
def weight(self) -> int:
"""
The relative percentage of the total number of launched tasks that should use the specified capacity provider.
"""
return pulumi.get(self, "weight")
@property
@pulumi.getter
def base(self) -> Optional[int]:
"""
The number of tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined.
"""
return pulumi.get(self, "base")
@pulumi.output_type
class TaskSetLoadBalancer(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "containerName":
suggest = "container_name"
elif key == "containerPort":
suggest = "container_port"
elif key == "loadBalancerName":
suggest = "load_balancer_name"
elif key == "targetGroupArn":
suggest = "target_group_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskSetLoadBalancer. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskSetLoadBalancer.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskSetLoadBalancer.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
container_name: str,
container_port: Optional[int] = None,
load_balancer_name: Optional[str] = None,
target_group_arn: Optional[str] = None):
"""
:param str container_name: The name of the container to associate with the load balancer (as it appears in a container definition).
:param int container_port: The port on the container to associate with the load balancer. Defaults to `0` if not specified.
:param str load_balancer_name: The name of the ELB (Classic) to associate with the service.
:param str target_group_arn: The ARN of the Load Balancer target group to associate with the service.
"""
pulumi.set(__self__, "container_name", container_name)
if container_port is not None:
pulumi.set(__self__, "container_port", container_port)
if load_balancer_name is not None:
pulumi.set(__self__, "load_balancer_name", load_balancer_name)
if target_group_arn is not None:
pulumi.set(__self__, "target_group_arn", target_group_arn)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> str:
"""
The name of the container to associate with the load balancer (as it appears in a container definition).
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="containerPort")
def container_port(self) -> Optional[int]:
"""
The port on the container to associate with the load balancer. Defaults to `0` if not specified.
"""
return pulumi.get(self, "container_port")
@property
@pulumi.getter(name="loadBalancerName")
def load_balancer_name(self) -> Optional[str]:
"""
The name of the ELB (Classic) to associate with the service.
"""
return pulumi.get(self, "load_balancer_name")
@property
@pulumi.getter(name="targetGroupArn")
def target_group_arn(self) -> Optional[str]:
"""
The ARN of the Load Balancer target group to associate with the service.
"""
return pulumi.get(self, "target_group_arn")
@pulumi.output_type
class TaskSetNetworkConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "assignPublicIp":
suggest = "assign_public_ip"
elif key == "securityGroups":
suggest = "security_groups"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskSetNetworkConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskSetNetworkConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskSetNetworkConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
subnets: Sequence[str],
assign_public_ip: Optional[bool] = None,
security_groups: Optional[Sequence[str]] = None):
"""
:param Sequence[str] subnets: The subnets associated with the task or service. Maximum of 16.
:param bool assign_public_ip: Whether to assign a public IP address to the ENI (`FARGATE` launch type only). Valid values are `true` or `false`. Default `false`.
:param Sequence[str] security_groups: The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. Maximum of 5.
"""
pulumi.set(__self__, "subnets", subnets)
if assign_public_ip is not None:
pulumi.set(__self__, "assign_public_ip", assign_public_ip)
if security_groups is not None:
pulumi.set(__self__, "security_groups", security_groups)
@property
@pulumi.getter
def subnets(self) -> Sequence[str]:
"""
The subnets associated with the task or service. Maximum of 16.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter(name="assignPublicIp")
def assign_public_ip(self) -> Optional[bool]:
"""
Whether to assign a public IP address to the ENI (`FARGATE` launch type only). Valid values are `true` or `false`. Default `false`.
"""
return pulumi.get(self, "assign_public_ip")
@property
@pulumi.getter(name="securityGroups")
def security_groups(self) -> Optional[Sequence[str]]:
"""
The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. Maximum of 5.
"""
return pulumi.get(self, "security_groups")
@pulumi.output_type
class TaskSetScale(dict):
def __init__(__self__, *,
unit: Optional[str] = None,
value: Optional[float] = None):
"""
:param str unit: The unit of measure for the scale value. Default: `PERCENT`.
:param float value: The value, specified as a percent total of a service's `desiredCount`, to scale the task set. Defaults to `0` if not specified. Accepted values are numbers between 0.0 and 100.0.
"""
if unit is not None:
pulumi.set(__self__, "unit", unit)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def unit(self) -> Optional[str]:
"""
The unit of measure for the scale value. Default: `PERCENT`.
"""
return pulumi.get(self, "unit")
@property
@pulumi.getter
def value(self) -> Optional[float]:
"""
The value, specified as a percent total of a service's `desiredCount`, to scale the task set. Defaults to `0` if not specified. Accepted values are numbers between 0.0 and 100.0.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class TaskSetServiceRegistries(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "registryArn":
suggest = "registry_arn"
elif key == "containerName":
suggest = "container_name"
elif key == "containerPort":
suggest = "container_port"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskSetServiceRegistries. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskSetServiceRegistries.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskSetServiceRegistries.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
registry_arn: str,
container_name: Optional[str] = None,
container_port: Optional[int] = None,
port: Optional[int] = None):
"""
:param str registry_arn: The ARN of the Service Registry. The currently supported service registry is Amazon Route 53 Auto Naming Service(`servicediscovery.Service` resource). For more information, see [Service](https://docs.aws.amazon.com/Route53/latest/APIReference/API_autonaming_Service.html).
:param str container_name: The container name value, already specified in the task definition, to be used for your service discovery service.
:param int container_port: The port value, already specified in the task definition, to be used for your service discovery service.
:param int port: The port value used if your Service Discovery service specified an SRV record.
"""
pulumi.set(__self__, "registry_arn", registry_arn)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if container_port is not None:
pulumi.set(__self__, "container_port", container_port)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter(name="registryArn")
def registry_arn(self) -> str:
"""
The ARN of the Service Registry. The currently supported service registry is Amazon Route 53 Auto Naming Service(`servicediscovery.Service` resource). For more information, see [Service](https://docs.aws.amazon.com/Route53/latest/APIReference/API_autonaming_Service.html).
"""
return pulumi.get(self, "registry_arn")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
The container name value, already specified in the task definition, to be used for your service discovery service.
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="containerPort")
def container_port(self) -> Optional[int]:
"""
The port value, already specified in the task definition, to be used for your service discovery service.
"""
return pulumi.get(self, "container_port")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
The port value used if your Service Discovery service specified an SRV record.
"""
return pulumi.get(self, "port")
@pulumi.output_type
class GetClusterSettingResult(dict):
def __init__(__self__, *,
name: str,
value: str):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
| 43.756431
| 348
| 0.669615
| 9,091
| 79,943
| 5.688373
| 0.055989
| 0.015837
| 0.023379
| 0.034169
| 0.766616
| 0.73761
| 0.706824
| 0.65177
| 0.63367
| 0.613579
| 0
| 0.002359
| 0.241697
| 79,943
| 1,826
| 349
| 43.780394
| 0.850695
| 0.318677
| 0
| 0.626655
| 1
| 0.021183
| 0.202594
| 0.091759
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173875
| false
| 0
| 0.005296
| 0.001765
| 0.331862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
86ea66e0e4ef92c7f105de913c5a3cee04059e3f
| 244
|
py
|
Python
|
src/matrix/views.py
|
SarahLightBourne/matrix-text
|
b02e1581daea8cb057ce85caa0c1befc9d1c9f18
|
[
"MIT"
] | 3
|
2021-11-24T11:22:18.000Z
|
2021-11-24T11:22:25.000Z
|
src/matrix/views.py
|
SarahLightBourne/matrix-text
|
b02e1581daea8cb057ce85caa0c1befc9d1c9f18
|
[
"MIT"
] | null | null | null |
src/matrix/views.py
|
SarahLightBourne/matrix-text
|
b02e1581daea8cb057ce85caa0c1befc9d1c9f18
|
[
"MIT"
] | null | null | null |
from django.views import generic
from django.shortcuts import redirect
def index_view(request):
return redirect('matrix:collection', name='hello')
class CollectionView(generic.TemplateView):
template_name = 'matrix/collection.html'
| 22.181818
| 54
| 0.782787
| 29
| 244
| 6.517241
| 0.724138
| 0.10582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122951
| 244
| 10
| 55
| 24.4
| 0.883178
| 0
| 0
| 0
| 0
| 0
| 0.180328
| 0.090164
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
86ec6920991ba94158c928627556a00fb41aebaf
| 67
|
py
|
Python
|
Thesis@3.9.1/Lib/site-packages/django/contrib/humanize/__init__.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | null | null | null |
Thesis@3.9.1/Lib/site-packages/django/contrib/humanize/__init__.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | null | null | null |
Thesis@3.9.1/Lib/site-packages/django/contrib/humanize/__init__.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | null | null | null |
default_app_config = "django.contrib.humanize.apps.HumanizeConfig"
| 33.5
| 66
| 0.850746
| 8
| 67
| 6.875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044776
| 67
| 1
| 67
| 67
| 0.859375
| 0
| 0
| 0
| 0
| 0
| 0.641791
| 0.641791
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8105257ee5e98da5c1b7cca25ff487f8b3db74b3
| 112
|
py
|
Python
|
swap_start/tf_train/special_train/begin3.py
|
yudongqiu/gomoku
|
4a95f2a5008f31fed5cb92c6bd6d55f9669ddd06
|
[
"MIT"
] | 3
|
2018-06-12T09:03:41.000Z
|
2019-01-14T05:34:57.000Z
|
swap_start/tf_train/special_train/begin3.py
|
yudongqiu/gomoku
|
4a95f2a5008f31fed5cb92c6bd6d55f9669ddd06
|
[
"MIT"
] | null | null | null |
swap_start/tf_train/special_train/begin3.py
|
yudongqiu/gomoku
|
4a95f2a5008f31fed5cb92c6bd6d55f9669ddd06
|
[
"MIT"
] | null | null | null |
# black white black
begin_lib = [[ ( 8,8), ( 4, 12), ( 9, 3)]]#, (6,12), (6,6), (7,11)]]
| 37.333333
| 69
| 0.348214
| 17
| 112
| 2.235294
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211268
| 0.366071
| 112
| 2
| 70
| 56
| 0.323944
| 0.446429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8128c4b73c6024161078f01ea30e68fb0de15baf
| 212
|
py
|
Python
|
aiocloudpayments/endpoints/test.py
|
drforse/aiocloudpayments
|
25b8827250279335d037754dca6978bc79c9b18d
|
[
"MIT"
] | null | null | null |
aiocloudpayments/endpoints/test.py
|
drforse/aiocloudpayments
|
25b8827250279335d037754dca6978bc79c9b18d
|
[
"MIT"
] | null | null | null |
aiocloudpayments/endpoints/test.py
|
drforse/aiocloudpayments
|
25b8827250279335d037754dca6978bc79c9b18d
|
[
"MIT"
] | null | null | null |
from .base import CpEndpoint, Request
class CpTestEndpoint(CpEndpoint):
__returning__ = None
def build_request(self) -> Request:
return Request(endpoint="test", x_request_id=self.x_request_id)
| 23.555556
| 71
| 0.740566
| 26
| 212
| 5.692308
| 0.653846
| 0.108108
| 0.135135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169811
| 212
| 8
| 72
| 26.5
| 0.840909
| 0
| 0
| 0
| 0
| 0
| 0.018868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
d4e60a32cb0c7b23ef82c4f2bffda54bb007f408
| 5,532
|
py
|
Python
|
src/mango/test/18-json-sort.py
|
mtenrero/couchdb-vetcontrol
|
b7ede3ededdf0072c73f08d8f1217cb723b03f7a
|
[
"Apache-2.0"
] | 1
|
2022-01-14T20:52:55.000Z
|
2022-01-14T20:52:55.000Z
|
src/mango/test/18-json-sort.py
|
mtenrero/couchdb-vetcontrol
|
b7ede3ededdf0072c73f08d8f1217cb723b03f7a
|
[
"Apache-2.0"
] | 1
|
2017-09-05T15:46:20.000Z
|
2017-09-05T15:46:20.000Z
|
src/mango/test/18-json-sort.py
|
garrensmith/couchdb
|
25838d078b1cf8ef5554f41c0b51d8628ca712ba
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import mango
import copy
import unittest
DOCS = [
{"_id": "1", "name": "Jimi", "age": 10, "cars": 1},
{"_id": "2", "name": "Eddie", "age": 20, "cars": 1},
{"_id": "3", "name": "Jane", "age": 30, "cars": 2},
{"_id": "4", "name": "Mary", "age": 40, "cars": 2},
{"_id": "5", "name": "Sam", "age": 50, "cars": 3},
]
class JSONIndexSortOptimisations(mango.DbPerClass):
def setUp(self):
self.db.recreate()
self.db.save_docs(copy.deepcopy(DOCS))
def test_works_for_basic_case(self):
self.db.create_index(["cars", "age"], name="cars-age")
selector = {"cars": "2", "age": {"$gt": 10}}
explain = self.db.find(selector, sort=["age"], explain=True)
self.assertEqual(explain["index"]["name"], "cars-age")
self.assertEqual(explain["mrargs"]["direction"], "fwd")
def test_works_for_all_fields_specified(self):
self.db.create_index(["cars", "age"], name="cars-age")
selector = {"cars": "2", "age": {"$gt": 10}}
explain = self.db.find(selector, sort=["cars", "age"], explain=True)
self.assertEqual(explain["index"]["name"], "cars-age")
def test_works_for_no_sort_fields_specified(self):
self.db.create_index(["cars", "age"], name="cars-age")
selector = {"cars": {"$gt": 10}, "age": {"$gt": 10}}
explain = self.db.find(selector, explain=True)
self.assertEqual(explain["index"]["name"], "cars-age")
def test_works_for_opp_dir_sort(self):
self.db.create_index(["cars", "age"], name="cars-age")
selector = {"cars": "2", "age": {"$gt": 10}}
explain = self.db.find(selector, sort=[{"age": "desc"}], explain=True)
self.assertEqual(explain["index"]["name"], "cars-age")
self.assertEqual(explain["mrargs"]["direction"], "rev")
def test_not_work_for_non_constant_field(self):
self.db.create_index(["cars", "age"], name="cars-age")
selector = {"cars": {"$gt": 10}, "age": {"$gt": 10}}
try:
self.db.find(selector, explain=True, sort=["age"])
raise Exception("Should not get here")
except Exception as e:
resp = e.response.json()
self.assertEqual(resp["error"], "no_usable_index")
def test_three_index_one(self):
self.db.create_index(["cars", "age", "name"], name="cars-age-name")
selector = {"cars": "2", "age": 10, "name": {"$gt": "AA"}}
explain = self.db.find(selector, sort=["name"], explain=True)
self.assertEqual(explain["index"]["name"], "cars-age-name")
def test_three_index_two(self):
self.db.create_index(["cars", "age", "name"], name="cars-age-name")
selector = {"cars": "2", "name": "Eddie", "age": {"$gt": 10}}
explain = self.db.find(selector, sort=["age"], explain=True)
self.assertEqual(explain["index"]["name"], "cars-age-name")
def test_three_index_fails(self):
self.db.create_index(["cars", "age", "name"], name="cars-age-name")
selector = {"name": "Eddie", "age": {"$gt": 1}, "cars": {"$gt": "1"}}
try:
self.db.find(selector, explain=True, sort=["name"])
raise Exception("Should not get here")
except Exception as e:
resp = e.response.json()
self.assertEqual(resp["error"], "no_usable_index")
def test_empty_sort(self):
self.db.create_index(["cars", "age", "name"], name="cars-age-name")
selector = {"name": {"$gt": "Eddie"}, "age": 10, "cars": {"$gt": "1"}}
explain = self.db.find(selector, explain=True)
self.assertEqual(explain["index"]["name"], "cars-age-name")
def test_in_between(self):
self.db.create_index(["cars", "age", "name"], name="cars-age-name")
selector = {"name": "Eddie", "age": 10, "cars": {"$gt": "1"}}
explain = self.db.find(selector, explain=True)
self.assertEqual(explain["index"]["name"], "cars-age-name")
try:
self.db.find(selector, sort=["cars", "name"], explain=True)
raise Exception("Should not get here")
except Exception as e:
resp = e.response.json()
self.assertEqual(resp["error"], "no_usable_index")
def test_ignore_after_set_sort_value(self):
self.db.create_index(["cars", "age", "name"], name="cars-age-name")
selector = {"age": {"$gt": 10}, "cars": 2, "name": {"$gt": "A"}}
explain = self.db.find(selector, sort=["age"], explain=True)
self.assertEqual(explain["index"]["name"], "cars-age-name")
def test_not_use_index_if_other_fields_in_sort(self):
self.db.create_index(["cars", "age"], name="cars-age")
selector = {"age": 10, "cars": {"$gt": "1"}}
try:
self.db.find(selector, sort=["cars", "name"], explain=True)
raise Exception("Should not get here")
except Exception as e:
resp = e.response.json()
self.assertEqual(resp["error"], "no_usable_index")
| 44.97561
| 79
| 0.588937
| 726
| 5,532
| 4.380165
| 0.198347
| 0.074843
| 0.07956
| 0.073585
| 0.716981
| 0.715409
| 0.705031
| 0.705031
| 0.677987
| 0.67673
| 0
| 0.013975
| 0.210954
| 5,532
| 122
| 80
| 45.344262
| 0.714548
| 0.093818
| 0
| 0.574468
| 0
| 0
| 0.187962
| 0
| 0
| 0
| 0
| 0
| 0.159574
| 1
| 0.138298
| false
| 0
| 0.031915
| 0
| 0.180851
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
be0f2b2dbb35fd337f10ad06c01908cd3c83341f
| 14,888
|
py
|
Python
|
gs_quant/test/api/test_risk_models.py
|
TopherD1992/gs-quant
|
253ed75519abbbe407e17e39ca5ed7340fa010dc
|
[
"Apache-2.0"
] | 1
|
2021-01-06T06:25:40.000Z
|
2021-01-06T06:25:40.000Z
|
gs_quant/test/api/test_risk_models.py
|
TopherD1992/gs-quant
|
253ed75519abbbe407e17e39ca5ed7340fa010dc
|
[
"Apache-2.0"
] | null | null | null |
gs_quant/test/api/test_risk_models.py
|
TopherD1992/gs-quant
|
253ed75519abbbe407e17e39ca5ed7340fa010dc
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2018 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
from gs_quant.api.gs.risk_models import GsRiskModelApi
from gs_quant.session import *
from gs_quant.target.risk_models import RiskModel, RiskModelFactor, RiskModelCalendar
def test_get_risk_models(mocker):
mock_response = {
'results': [
RiskModel.from_dict({
"coverage": "Global",
"id": "WW_TEST_MODEL",
"name": "World Wide Medium Term Test Model",
"term": "Medium",
"vendor": "Goldman Sachs",
"universeIdentifier": "gsid",
"version": 4
}),
RiskModel.from_dict({
"coverage": "Global",
"id": "WW_TEST_MODEL_2",
"name": "World Wide Medium Term Test Model 2",
"term": "Medium",
"vendor": "Goldman Sachs",
"universeIdentifier": "gsid",
"version": 2
})
],
'totalResults': 2
}
expected_response = [
RiskModel(coverage='Global', id='WW_TEST_MODEL', name='World Wide Medium Term Test Model', term='Medium',
vendor='Goldman Sachs', universe_identifier='gsid', version=4),
RiskModel(coverage='Global', id='WW_TEST_MODEL_2', name='World Wide Medium Term Test Model 2', term='Medium',
vendor='Goldman Sachs', universe_identifier='gsid', version=2)
]
# mock GsSession
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_get', return_value=mock_response)
# run test
response = GsRiskModelApi.get_risk_models()
GsSession.current._get.assert_called_with('/risk/models', cls=RiskModel)
assert response == expected_response
def test_get_risk_model(mocker):
model_id = 'WW_TEST_MODEL'
model = RiskModel.from_dict({
"coverage": "Global",
"id": "WW_TEST_MODEL",
"name": "World Wide Medium Term Test Model",
"term": "Medium",
"vendor": "Goldman Sachs",
"universeIdentifier": "gsid",
"version": 4
})
expected_response = RiskModel(coverage='Global', id='WW_TEST_MODEL', name='World Wide Medium Term Test Model',
term='Medium', vendor='Goldman Sachs', version=4, universe_identifier='gsid')
# mock GsSession
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_get', return_value=model)
# run test
response = GsRiskModelApi.get_risk_model(model_id)
GsSession.current._get.assert_called_with('/risk/models/{id}'.format(id=model_id), cls=RiskModel)
assert response == expected_response
def test_create_risk_model(mocker):
model = RiskModel.from_dict({
"coverage": "Global",
"id": "WW_TEST_MODEL",
"name": "World Wide Medium Term Test Model",
"term": "Medium",
"vendor": "Goldman Sachs",
"universeIdentifier": "gsid",
"version": 4
})
expected_response = RiskModel(coverage='Global', id='WW_TEST_MODEL', name='World Wide Medium Term Test Model',
term='Medium',
vendor='Goldman Sachs', version=4, universe_identifier='gsid')
# mock GsSession
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_post', return_value=model)
# run test
response = GsRiskModelApi.create_risk_model(model)
GsSession.current._post.assert_called_with('/risk/models', model, cls=RiskModel)
assert response == expected_response
def test_update_risk_model(mocker):
model = RiskModel.from_dict({
"coverage": "Global",
"id": "WW_TEST_MODEL",
"name": "World Wide Medium Term Test Model",
"term": "Medium",
"vendor": "Goldman Sachs",
"universeIdentifier": "gsid",
"version": 4
})
# mock GsSession
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_put', return_value=model)
# run test
response = GsRiskModelApi.update_risk_model(model)
GsSession.current._put.assert_called_with('/risk/models/{id}'.format(id='WW_TEST_MODEL'), model, cls=RiskModel)
assert response == model
def test_delete_risk_model(mocker):
mock_response = "Deleted Risk Model"
# mock GsSession
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_delete', return_value=mock_response)
# run test
response = GsRiskModelApi.delete_risk_model('model id')
GsSession.current._delete.assert_called_with('/risk/models/{id}'.format(id='model id'))
assert response == mock_response
def test_get_risk_model_calendar(mocker):
calendar = RiskModelCalendar.from_dict({
"businessDates": ["2020-01-01", "2020-11-01"]
})
# mock GsSession
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_get', return_value=calendar)
# run test
response = GsRiskModelApi.get_risk_model_calendar('id')
GsSession.current._get.assert_called_with('/risk/models/{id}/calendar'.format(id='id'), cls=RiskModelCalendar)
assert response == calendar
def test_upload_risk_model_calendar(mocker):
calendar = RiskModelCalendar.from_dict({
"businessDates": [
"2020-01-01",
"2020-11-01"
]
})
# mock GsSession
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_put', return_value=calendar)
# run test
response = GsRiskModelApi.upload_risk_model_calendar('WW_TEST_MODEL', calendar)
GsSession.current._put.assert_called_with('/risk/models/{id}/calendar'.format(id='WW_TEST_MODEL'),
calendar, cls=RiskModelCalendar)
assert response == calendar
def test_get_risk_model_factors(mocker):
factors = {'results': [
RiskModelFactor.from_dict({
"type": "Factor",
"identifier": "Factor1"
}),
RiskModelFactor.from_dict({
"type": "Category",
"identifier": "Factor2"
})
],
'totalResults': 2
}
expected_response = [
RiskModelFactor(identifier='Factor1', type='Factor'),
RiskModelFactor(identifier='Factor2', type='Category')
]
# mock GsSession
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_get', return_value=factors)
# run test
response = GsRiskModelApi.get_risk_model_factors(model_id='id')
GsSession.current._get.assert_called_with('/risk/models/id/factors', cls=RiskModelFactor)
assert response == expected_response
def test_create_risk_model_factor(mocker):
factor = RiskModelFactor.from_dict({
"identifier": "Factor1",
"type": "Factor"
})
expected_response = RiskModelFactor(identifier='Factor1', type='Factor')
# mock GsSession
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_post', return_value=factor)
# run test
response = GsRiskModelApi.create_risk_model_factor(model_id='id', factor=factor)
GsSession.current._post.assert_called_with('/risk/models/id/factors', factor, cls=RiskModelFactor)
assert response == expected_response
def test_get_risk_model_factor(mocker):
factor = RiskModelFactor.from_dict({
"identifier": "Factor1",
"type": "Factor"
})
expected_response = RiskModelFactor(identifier='Factor1', type='Factor')
# mock GsSession
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_get', return_value=factor)
# run test
response = GsRiskModelApi.get_risk_model_factor(model_id='id', factor_id='factor')
GsSession.current._get.assert_called_with('/risk/models/{id}/factors/{identifier}'
.format(id='id', identifier='factor'))
assert response == expected_response
def test_update_risk_model_factor(mocker):
factor = RiskModelFactor.from_dict({
"identifier": "factor",
"type": "Factor"
})
# mock GsSession
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_put', return_value=factor)
# run test
response = GsRiskModelApi.update_risk_model_factor(model_id='id', factor_id='factor', factor=factor)
GsSession.current._put.assert_called_with('/risk/models/{id}/factors/{identifier}'
.format(id='id', identifier='factor'), factor, cls=RiskModelFactor)
assert response == factor
def test_get_risk_model_coverage(mocker):
results = {
"results": [
RiskModelFactor.from_dict({
"model": "AXUS4S",
"businessDate": "2020-11-02"
}),
RiskModelFactor.from_dict({
"model": "AXAU4M",
"businessDate": "2020-11-03"
})
]
}
# mock GsSession
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_post', return_value=results)
# run test
response = GsRiskModelApi.get_risk_model_coverage()
GsSession.current._post.assert_called_with('/risk/models/coverage', {})
assert response == results['results']
def test_upload_risk_model_data(mocker):
risk_model_data = {
'date': '2020-02-05',
'assetData': {
'universe': ['2407966', '2046251', 'USD'],
'specificRisk': [12.09, 45.12, 3.09],
'factorExposure': [{'1': 0.23, '2': 0.023}],
'historicalBeta': [0.12, 0.45, 1.2]
},
'factorData': [{
'factorId': '1',
'factorName': 'USD',
'factorCategory': 'Currency',
'factorCategoryId': 'CUR'
}],
'covarianceMatrix': [[0.089, 0.0123, 0.345]],
'issuerSpecificCovariance': {
'universeId1': ['2407966'],
'universeId2': ['2046251'],
'covariance': [0.03754]
},
'factorPortfolios': {
'universe': ['2407966', '2046251'],
'portfolio': [{'factorId': 2, 'weights': [0.25, 0.75]}]
}
}
# mock GsSession
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_post', return_value='Successfully uploaded')
# run test
response = GsRiskModelApi.upload_risk_model_data(model_id='id', model_data=risk_model_data)
GsSession.current._post.assert_called_with('/risk/models/data/{id}'.format(id='id'), risk_model_data)
assert response == 'Successfully uploaded'
def test_get_risk_model_data(mocker):
query = {
'startDate': '2020-01-01',
'endDate': '2020-03-03'
}
results = {
'results': [{
'date': '2020-02-05',
'assetData': {
'universe': ['2407966', '2046251', 'USD'],
'specificRisk': [12.09, 45.12, 3.09],
'factorExposure': [{'1': 0.23, '2': 0.023}],
'historicalBeta': [0.12, 0.45, 1.2]
},
'factorData': [{
'factorId': '1',
'factorName': 'USD',
'factorCategory': 'Currency',
'factorCategoryId': 'CUR'
}],
'covarianceMatrix': [[0.089, 0.0123, 0.345]],
'issuerSpecificCovariance': {
'universeId1': ['2407966'],
'universeId2': ['2046251'],
'covariance': [0.03754]
},
'factorPortfolios': {
'universe': ['2407966', '2046251'],
'portfolio':[{'factorId': 2, 'weights': [0.25, 0.75]}]
}
}],
'totalResults': 1,
'missingDates': []
}
# mock GsSession
mocker.patch.object(
GsSession.__class__,
'default_value',
return_value=GsSession.get(
Environment.QA,
'client_id',
'secret'))
mocker.patch.object(GsSession.current, '_post', return_value=results)
# run test
response = GsRiskModelApi.get_risk_model_data(model_id='id', start_date=dt.date(2020, 1, 1),
end_date=dt.date(2020, 3, 3))
GsSession.current._post.assert_called_with('/risk/models/data/{id}/query'.format(id='id'), query)
assert response == results
| 32.577681
| 117
| 0.59464
| 1,515
| 14,888
| 5.616502
| 0.125413
| 0.031731
| 0.055941
| 0.085556
| 0.820308
| 0.796451
| 0.786109
| 0.709249
| 0.664003
| 0.591491
| 0
| 0.030131
| 0.27774
| 14,888
| 456
| 118
| 32.649123
| 0.761183
| 0.059713
| 0
| 0.649425
| 0
| 0
| 0.210756
| 0.020983
| 0
| 0
| 0
| 0
| 0.08046
| 1
| 0.04023
| false
| 0
| 0.011494
| 0
| 0.051724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
077c3f033dcd03f41efd0dd18cd8e6fc68737f70
| 17,480
|
py
|
Python
|
tests/CLI/modules/dedicatedhost_tests.py
|
rtpg/softlayer-python
|
c925e581ca7ba3fcf7d5cd495f171c88be9cb78b
|
[
"MIT"
] | 2
|
2016-07-06T15:31:48.000Z
|
2016-07-06T15:40:25.000Z
|
tests/CLI/modules/dedicatedhost_tests.py
|
rtpg/softlayer-python
|
c925e581ca7ba3fcf7d5cd495f171c88be9cb78b
|
[
"MIT"
] | 73
|
2016-07-05T15:17:51.000Z
|
2016-08-18T18:16:29.000Z
|
tests/CLI/modules/dedicatedhost_tests.py
|
kyubifire/softlayer-python
|
bee36eec73474a8b6a1813fbbcc0512f81bf1779
|
[
"MIT"
] | null | null | null |
"""
SoftLayer.tests.CLI.modules.dedicatedhosts_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import json
import mock
import SoftLayer
from SoftLayer.CLI import exceptions
from SoftLayer.fixtures import SoftLayer_Product_Package
from SoftLayer.fixtures import SoftLayer_Virtual_DedicatedHost
from SoftLayer import testing
class DedicatedHostsTests(testing.TestCase):
def set_up(self):
self.dedicated_host = SoftLayer.DedicatedHostManager(self.client)
def test_list_dedicated_hosts(self):
result = self.run_command(['dedicatedhost', 'list'])
self.assert_no_fail(result)
self.assertEqual(json.loads(result.output),
[{
'cpuCount': 56,
'datacenter': 'dal05',
'diskCapacity': 1200,
'guestCount': 1,
'id': 12345,
'memoryCapacity': 242,
'name': 'test-dedicated'
}]
)
def test_details(self):
mock = self.set_mock('SoftLayer_Virtual_DedicatedHost', 'getObject')
mock.return_value = SoftLayer_Virtual_DedicatedHost.getObjectById
result = self.run_command(['dedicatedhost', 'detail', '12345', '--price', '--guests'])
self.assert_no_fail(result)
self.assertEqual(json.loads(result.output),
{
'cpu count': 56,
'create date': '2017-11-02T11:40:56-07:00',
'datacenter': 'dal05',
'disk capacity': 1200,
'guest count': 1,
'guests': [{
'domain': 'test.com',
'hostname': 'test-dedicated',
'id': 12345,
'uuid': 'F9329795-4220-4B0A-B970-C86B950667FA'
}],
'id': 12345,
'memory capacity': 242,
'modify date': '2017-11-06T11:38:20-06:00',
'name': 'test-dedicated',
'owner': 'test-dedicated',
'price_rate': 1515.556,
'router hostname': 'bcr01a.dal05',
'router id': 12345}
)
def test_details_no_owner(self):
mock = self.set_mock('SoftLayer_Virtual_DedicatedHost', 'getObject')
retVal = SoftLayer_Virtual_DedicatedHost.getObjectById
retVal['billingItem'] = {}
mock.return_value = retVal
result = self.run_command(
['dedicatedhost', 'detail', '44701', '--price', '--guests'])
self.assert_no_fail(result)
self.assertEqual(json.loads(result.output), {'cpu count': 56,
'create date': '2017-11-02T11:40:56-07:00',
'datacenter': 'dal05',
'disk capacity': 1200,
'guest count': 1,
'guests': [{
'domain': 'test.com',
'hostname': 'test-dedicated',
'id': 12345,
'uuid': 'F9329795-4220-4B0A-B970-C86B950667FA'}],
'id': 12345,
'memory capacity': 242,
'modify date': '2017-11-06T11:38:20-06:00',
'name': 'test-dedicated',
'owner': None,
'price_rate': 0,
'router hostname': 'bcr01a.dal05',
'router id': 12345}
)
def test_create_options(self):
mock = self.set_mock('SoftLayer_Product_Package', 'getAllObjects')
mock.return_value = SoftLayer_Product_Package.getAllObjectsDH
result = self.run_command(['dh', 'create-options'])
self.assert_no_fail(result)
self.assertEqual(json.loads(result.output), [[
{
'datacenter': 'Dallas 5',
'value': 'dal05'
}],
[{
'Dedicated Virtual Host Flavor(s)':
'56 Cores X 242 RAM X 1.2 TB',
'value': '56_CORES_X_242_RAM_X_1_4_TB'
}
]]
)
def test_create_options_with_only_datacenter(self):
mock = self.set_mock('SoftLayer_Product_Package', 'getAllObjects')
mock.return_value = SoftLayer_Product_Package.getAllObjectsDH
result = self.run_command(['dh', 'create-options', '-d=dal05'])
self.assertIsInstance(result.exception, exceptions.ArgumentError)
def test_create_options_get_routers(self):
mock = self.set_mock('SoftLayer_Product_Package', 'getAllObjects')
mock.return_value = SoftLayer_Product_Package.getAllObjectsDH
result = self.run_command(['dh',
'create-options',
'--datacenter=dal05',
'--flavor=56_CORES_X_242_RAM_X_1_4_TB'])
self.assert_no_fail(result)
self.assertEqual(json.loads(result.output), [[
{
'Available Backend Routers': 'bcr01a.dal05'
},
{
'Available Backend Routers': 'bcr02a.dal05'
},
{
'Available Backend Routers': 'bcr03a.dal05'
},
{
'Available Backend Routers': 'bcr04a.dal05'
}
]]
)
def test_create(self):
SoftLayer.CLI.formatting.confirm = mock.Mock()
SoftLayer.CLI.formatting.confirm.return_value = True
mock_package_obj = self.set_mock('SoftLayer_Product_Package',
'getAllObjects')
mock_package_obj.return_value = SoftLayer_Product_Package.getAllObjectsDH
result = self.run_command(['dedicatedhost', 'create',
'--hostname=test-dedicated',
'--domain=test.com',
'--datacenter=dal05',
'--flavor=56_CORES_X_242_RAM_X_1_4_TB',
'--billing=hourly'])
self.assert_no_fail(result)
args = ({
'hardware': [{
'domain': 'test.com',
'primaryBackendNetworkComponent': {
'router': {
'id': 12345
}
},
'hostname': 'test-dedicated'
}],
'useHourlyPricing': True,
'location': 'DALLAS05',
'packageId': 813,
'complexType': 'SoftLayer_Container_Product_Order_Virtual_DedicatedHost',
'prices': [{
'id': 200269
}],
'quantity': 1},)
self.assert_called_with('SoftLayer_Product_Order', 'placeOrder',
args=args)
def test_create_with_gpu(self):
SoftLayer.CLI.formatting.confirm = mock.Mock()
SoftLayer.CLI.formatting.confirm.return_value = True
mock_package_obj = self.set_mock('SoftLayer_Product_Package',
'getAllObjects')
mock_package_obj.return_value = SoftLayer_Product_Package.getAllObjectsDHGpu
result = self.run_command(['dedicatedhost', 'create',
'--hostname=test-dedicated',
'--domain=test.com',
'--datacenter=dal05',
'--flavor=56_CORES_X_484_RAM_X_1_5_TB_X_2_GPU_P100',
'--billing=hourly'])
self.assert_no_fail(result)
args = ({
'hardware': [{
'domain': 'test.com',
'primaryBackendNetworkComponent': {
'router': {
'id': 12345
}
},
'hostname': 'test-dedicated'
}],
'prices': [{
'id': 200269
}],
'location': 'DALLAS05',
'packageId': 813,
'complexType': 'SoftLayer_Container_Product_Order_Virtual_DedicatedHost',
'useHourlyPricing': True,
'quantity': 1},)
self.assert_called_with('SoftLayer_Product_Order', 'placeOrder',
args=args)
def test_create_verify(self):
SoftLayer.CLI.formatting.confirm = mock.Mock()
SoftLayer.CLI.formatting.confirm.return_value = True
mock_package_obj = self.set_mock('SoftLayer_Product_Package', 'getAllObjects')
mock_package_obj.return_value = SoftLayer_Product_Package.getAllObjectsDH
mock_package = self.set_mock('SoftLayer_Product_Order', 'verifyOrder')
mock_package.return_value = SoftLayer_Product_Package.verifyOrderDH
result = self.run_command(['dedicatedhost', 'create',
'--verify',
'--hostname=test-dedicated',
'--domain=test.com',
'--datacenter=dal05',
'--flavor=56_CORES_X_242_RAM_X_1_4_TB',
'--billing=hourly'])
self.assert_no_fail(result)
args = ({
'useHourlyPricing': True,
'hardware': [{
'hostname': 'test-dedicated',
'domain': 'test.com',
'primaryBackendNetworkComponent': {
'router': {
'id': 12345
}
}
}],
'packageId': 813, 'prices': [{'id': 200269}],
'location': 'DALLAS05',
'complexType': 'SoftLayer_Container_Product_Order_Virtual_DedicatedHost',
'quantity': 1},)
self.assert_called_with('SoftLayer_Product_Order', 'verifyOrder',
args=args)
result = self.run_command(['dh', 'create',
'--verify',
'--hostname=test-dedicated',
'--domain=test.com',
'--datacenter=dal05',
'--flavor=56_CORES_X_242_RAM_X_1_4_TB',
'--billing=monthly'])
self.assert_no_fail(result)
args = ({
'useHourlyPricing': True,
'hardware': [{
'hostname': 'test-dedicated',
'domain': 'test.com',
'primaryBackendNetworkComponent': {
'router': {
'id': 12345
}
}
}],
'packageId': 813, 'prices': [{'id': 200269}],
'location': 'DALLAS05',
'complexType': 'SoftLayer_Container_Product_Order_Virtual_DedicatedHost',
'quantity': 1},)
self.assert_called_with('SoftLayer_Product_Order', 'verifyOrder',
args=args)
def test_create_aborted(self):
SoftLayer.CLI.formatting.confirm = mock.Mock()
SoftLayer.CLI.formatting.confirm.return_value = False
mock_package_obj = self.set_mock('SoftLayer_Product_Package', 'getAllObjects')
mock_package_obj.return_value = SoftLayer_Product_Package.getAllObjectsDH
result = self.run_command(['dh', 'create',
'--hostname=test-dedicated',
'--domain=test.com',
'--datacenter=dal05',
'--flavor=56_CORES_X_242_RAM_X_1_4_TB',
'--billing=monthly'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
def test_create_verify_no_price_or_more_than_one(self):
mock_package_obj = self.set_mock('SoftLayer_Product_Package', 'getAllObjects')
mock_package_obj.return_value = SoftLayer_Product_Package.getAllObjectsDH
mock_package = self.set_mock('SoftLayer_Product_Order', 'verifyOrder')
ret_val = SoftLayer_Product_Package.verifyOrderDH
ret_val['prices'] = []
mock_package.return_value = ret_val
result = self.run_command(['dedicatedhost', 'create',
'--verify',
'--hostname=test-dedicated',
'--domain=test.com',
'--datacenter=dal05',
'--flavor=56_CORES_X_242_RAM_X_1_4_TB',
'--billing=hourly'])
self.assertIsInstance(result.exception, exceptions.ArgumentError)
args = ({
'hardware': [{
'domain': 'test.com',
'primaryBackendNetworkComponent': {
'router': {
'id': 12345
}
},
'hostname': 'test-dedicated'
}],
'prices': [{
'id': 200269
}],
'location': 'DALLAS05',
'packageId': 813,
'complexType': 'SoftLayer_Container_Product_Order_Virtual_DedicatedHost',
'useHourlyPricing': True,
'quantity': 1},)
self.assert_called_with('SoftLayer_Product_Order', 'verifyOrder', args=args)
@mock.patch('SoftLayer.DedicatedHostManager.cancel_host')
def test_cancel_host(self, cancel_mock):
result = self.run_command(['--really', 'dedicatedhost', 'cancel', '12345'])
self.assert_no_fail(result)
cancel_mock.assert_called_with(12345)
self.assertEqual(str(result.output), 'Dedicated Host 12345 was cancelled\n')
def test_cancel_host_abort(self):
result = self.run_command(['dedicatedhost', 'cancel', '12345'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
def test_cancel_guests(self):
vs1 = {'id': 987, 'fullyQualifiedDomainName': 'foobar.example.com'}
vs2 = {'id': 654, 'fullyQualifiedDomainName': 'wombat.example.com'}
guests = self.set_mock('SoftLayer_Virtual_DedicatedHost', 'getGuests')
guests.return_value = [vs1, vs2]
vs_status1 = {'id': 987, 'server name': 'foobar.example.com', 'status': 'Cancelled'}
vs_status2 = {'id': 654, 'server name': 'wombat.example.com', 'status': 'Cancelled'}
expected_result = [vs_status1, vs_status2]
result = self.run_command(['--really', 'dedicatedhost', 'cancel-guests', '12345'])
self.assert_no_fail(result)
self.assertEqual(expected_result, json.loads(result.output))
def test_cancel_guests_empty_list(self):
guests = self.set_mock('SoftLayer_Virtual_DedicatedHost', 'getGuests')
guests.return_value = []
result = self.run_command(['--really', 'dedicatedhost', 'cancel-guests', '12345'])
self.assert_no_fail(result)
self.assertEqual(str(result.output), 'There is not any guest into the dedicated host 12345\n')
def test_cancel_guests_abort(self):
result = self.run_command(['dedicatedhost', 'cancel-guests', '12345'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
def test_list_guests(self):
result = self.run_command(['dh', 'list-guests', '123', '--tag=tag'])
self.assert_no_fail(result)
self.assertEqual(json.loads(result.output),
[{'hostname': 'vs-test1',
'domain': 'test.sftlyr.ws',
'primary_ip': '172.16.240.2',
'id': 200,
'power_state': 'Running',
'backend_ip': '10.45.19.37'},
{'hostname': 'vs-test2',
'domain': 'test.sftlyr.ws',
'primary_ip': '172.16.240.7',
'id': 202,
'power_state': 'Running',
'backend_ip': '10.45.19.35'}])
def _get_cancel_guests_return(self):
vs_status1 = {'id': 123, 'fqdn': 'foobar.example.com', 'status': 'Cancelled'}
vs_status2 = {'id': 456, 'fqdn': 'wombat.example.com', 'status': 'Cancelled'}
return [vs_status1, vs_status2]
| 42.948403
| 106
| 0.485812
| 1,435
| 17,480
| 5.670383
| 0.156794
| 0.051124
| 0.053705
| 0.044242
| 0.776207
| 0.7445
| 0.710581
| 0.703085
| 0.673098
| 0.63709
| 0
| 0.049795
| 0.400286
| 17,480
| 406
| 107
| 43.054187
| 0.726414
| 0.007494
| 0
| 0.636628
| 0
| 0
| 0.257834
| 0.093139
| 0
| 0
| 0
| 0
| 0.104651
| 1
| 0.055233
| false
| 0
| 0.020349
| 0
| 0.081395
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
077cd528140bb01e92e5c146dde49a484c3afcff
| 75
|
py
|
Python
|
utilities/__init__.py
|
imri0t/ri0t-bot
|
35e1f9e67d3c35a2cf06a0db3afc544d853b2c32
|
[
"MIT"
] | 2
|
2019-04-08T04:49:31.000Z
|
2019-04-17T05:13:36.000Z
|
utilities/__init__.py
|
imri0t/ri0t-bot
|
35e1f9e67d3c35a2cf06a0db3afc544d853b2c32
|
[
"MIT"
] | null | null | null |
utilities/__init__.py
|
imri0t/ri0t-bot
|
35e1f9e67d3c35a2cf06a0db3afc544d853b2c32
|
[
"MIT"
] | null | null | null |
'''package for ri0t-bot utilities'''
#bot source code created by im.ri0t
| 25
| 37
| 0.72
| 12
| 75
| 4.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031746
| 0.16
| 75
| 3
| 38
| 25
| 0.825397
| 0.866667
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
0780bfa58932ffe38571e37966ce23b6092936fe
| 44
|
py
|
Python
|
git2nd/__init__.py
|
miyagaw61/git2nd
|
ff159540264dab5cc4afae56227d3063cc2903e0
|
[
"MIT"
] | null | null | null |
git2nd/__init__.py
|
miyagaw61/git2nd
|
ff159540264dab5cc4afae56227d3063cc2903e0
|
[
"MIT"
] | null | null | null |
git2nd/__init__.py
|
miyagaw61/git2nd
|
ff159540264dab5cc4afae56227d3063cc2903e0
|
[
"MIT"
] | null | null | null |
from .git2nd import *
__version__ = '0.0.1'
| 14.666667
| 21
| 0.681818
| 7
| 44
| 3.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 0.159091
| 44
| 2
| 22
| 22
| 0.594595
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
0782acbc58f49684754f2373e5a7a5b3b7f7a8d3
| 93
|
py
|
Python
|
unitypack/exceptions.py
|
isombyt/UnityPack
|
b850a19bbc841d9a191db027040fd66b6e3b3997
|
[
"MIT"
] | 633
|
2016-07-25T08:11:44.000Z
|
2022-03-03T17:15:46.000Z
|
unitypack/exceptions.py
|
isombyt/UnityPack
|
b850a19bbc841d9a191db027040fd66b6e3b3997
|
[
"MIT"
] | 84
|
2016-08-15T16:23:33.000Z
|
2022-01-15T15:12:28.000Z
|
unitypack/exceptions.py
|
isombyt/UnityPack
|
b850a19bbc841d9a191db027040fd66b6e3b3997
|
[
"MIT"
] | 147
|
2016-07-27T07:50:27.000Z
|
2022-03-25T15:16:45.000Z
|
class UnityPackException(Exception):
pass
class ArchiveNotFound(UnityPackException):
pass
| 15.5
| 42
| 0.83871
| 8
| 93
| 9.75
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 93
| 5
| 43
| 18.6
| 0.928571
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
078493fa40490745d8789101fe58afb0a05415bf
| 1,724
|
py
|
Python
|
DailyProgrammer/DP20140908A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | 2
|
2020-12-23T18:59:22.000Z
|
2021-04-14T13:16:09.000Z
|
DailyProgrammer/DP20140908A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
DailyProgrammer/DP20140908A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
"""
[9/08/2014] Challenge #179 [Easy] You make me happy when clouds are gray...scale
https://www.reddit.com/r/dailyprogrammer/comments/2ftcb8/9082014_challenge_179_easy_you_make_me_happy_when/
#Description
The 'Daily Business' newspaper are a distributor of the most recent news concerning business. They have a problem
though, there is a new newspaper brought out every single day and up to this point, all of the images and
advertisements featured have been in full colour and this is costing the company.
If you can convert these images before they reach the publisher, then you will surely get a promotion, or at least a
raise!
#Formal Inputs & Outputs
##Input description
On console input you should enter a filepath to the image you wish to convert to grayscale.
##Output description
The program should save an image in the current directory of the image passed as input, the only difference being that
it is now in black and white.
#Notes/Hints
There are several methods to convert an image to grayscale, the easiest is to sum up all of the RGB values and divide
it by 3 (The length of the array) and fill each R,G and B value with that number.
For example
RED = (255,0,0)
Would turn to
(85,85,85) //Because 255/3 == 85.
There is a problem with this method though,
GREEN = (0,255,0)
brings back the exact same value!
There is a formula to solve this, see if you can find it.
Share any interesting methods for grayscale conversion that you come across.
#Finally
We have an IRC channel over at
irc.freenode.net in #reddit-dailyprogrammer
Stop on by :D
Have a good challenge idea?
Consider submitting it to /r/dailyprogrammer_ideas
"""
def main():
pass
if __name__ == "__main__":
main()
| 38.311111
| 118
| 0.766241
| 299
| 1,724
| 4.361204
| 0.568562
| 0.019172
| 0.018405
| 0.029141
| 0.052147
| 0.052147
| 0.052147
| 0.052147
| 0
| 0
| 0
| 0.031735
| 0.177494
| 1,724
| 44
| 119
| 39.181818
| 0.88787
| 0.958237
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
07afe2b36d8e6fcb7bc4fe2d806e8acd0d901e36
| 90
|
py
|
Python
|
src/bio2bel_sider/__init__.py
|
AldisiRana/sider
|
81b0cb4a40de44922bd69fa407e696d2a0f59922
|
[
"MIT"
] | null | null | null |
src/bio2bel_sider/__init__.py
|
AldisiRana/sider
|
81b0cb4a40de44922bd69fa407e696d2a0f59922
|
[
"MIT"
] | 1
|
2019-10-24T11:52:15.000Z
|
2019-10-24T11:52:15.000Z
|
src/bio2bel_sider/__init__.py
|
AldisiRana/sider
|
81b0cb4a40de44922bd69fa407e696d2a0f59922
|
[
"MIT"
] | 1
|
2019-10-24T10:12:57.000Z
|
2019-10-24T10:12:57.000Z
|
# -*- coding: utf-8 -*-
"""Bio2BEL SIDER."""
from .manager import Manager # noqa: F401
| 15
| 42
| 0.6
| 11
| 90
| 4.909091
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068493
| 0.188889
| 90
| 5
| 43
| 18
| 0.671233
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
07bc12e2308fc2caaa0f6b8928d7acd49efe4398
| 1,880
|
py
|
Python
|
build/scripts/check_config_h.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
build/scripts/check_config_h.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
build/scripts/check_config_h.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
import sys
data = """
#if defined(SIZEOF_LONG)
static_assert(sizeof(long) == SIZEOF_LONG, "fixme 1");
#endif
#if defined(SIZEOF_PTHREAD_T)
#include <pthread.h>
static_assert(sizeof(pthread_t) == SIZEOF_PTHREAD_T, "fixme 2");
#endif
#if defined(SIZEOF_SIZE_T)
#include <stddef.h>
static_assert(sizeof(size_t) == SIZEOF_SIZE_T, "fixme 3");
#endif
#if defined(SIZEOF_TIME_T)
#include <time.h>
static_assert(sizeof(time_t) == SIZEOF_TIME_T, "fixme 4");
#endif
#if defined(SIZEOF_UINTPTR_T)
#include <stdint.h>
static_assert(sizeof(uintptr_t) == SIZEOF_UINTPTR_T, "fixme 5");
#endif
#if defined(SIZEOF_VOID_P)
static_assert(sizeof(void*) == SIZEOF_VOID_P, "fixme 6");
#endif
#if defined(SIZEOF_FPOS_T)
#include <stdio.h>
static_assert(sizeof(fpos_t) == SIZEOF_FPOS_T, "fixme 7");
#endif
#if defined(SIZEOF_DOUBLE)
static_assert(sizeof(double) == SIZEOF_DOUBLE, "fixme 8");
#endif
#if defined(SIZEOF_LONG_DOUBLE)
static_assert(sizeof(long double) == SIZEOF_LONG_DOUBLE, "fixme 9");
#endif
#if defined(SIZEOF_FLOAT)
static_assert(sizeof(float) == SIZEOF_FLOAT, "fixme 10");
#endif
#if defined(SIZEOF_INT)
static_assert(sizeof(int) == SIZEOF_INT, "fixme 11");
#endif
#if defined(SIZEOF_LONG_LONG)
static_assert(sizeof(long long) == SIZEOF_LONG_LONG, "fixme 12");
#endif
#if defined(SIZEOF_OFF_T)
#include <stdio.h>
static_assert(sizeof(off_t) == SIZEOF_OFF_T, "fixme 13");
#endif
#if defined(SIZEOF_PID_T)
#include <unistd.h>
static_assert(sizeof(pid_t) == SIZEOF_PID_T, "fixme 14");
#endif
#if defined(SIZEOF_SHORT)
static_assert(sizeof(short) == SIZEOF_SHORT, "fixme 15");
#endif
#if defined(SIZEOF_WCHAR_T)
static_assert(sizeof(wchar_t) == SIZEOF_WCHAR_T, "fixme 16");
#endif
#if defined(SIZEOF__BOOL)
//TODO
#endif
"""
if __name__ == '__main__':
with open(sys.argv[2], 'w') as f:
f.write('#include <' + sys.argv[1] + '>\n\n')
f.write(data)
| 20.888889
| 68
| 0.726596
| 292
| 1,880
| 4.376712
| 0.205479
| 0.119718
| 0.199531
| 0.250391
| 0.128326
| 0.050078
| 0.050078
| 0
| 0
| 0
| 0
| 0.015006
| 0.11383
| 1,880
| 89
| 69
| 21.123596
| 0.752101
| 0
| 0
| 0.292308
| 0
| 0
| 0.923936
| 0.414362
| 0
| 0
| 0
| 0
| 0.246154
| 1
| 0
| false
| 0
| 0.015385
| 0
| 0.015385
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
07ceb6e21ca9ae72fde6ab3787d1b2554ba0769a
| 364
|
py
|
Python
|
lib/python/src/cat/__init__.py
|
l1905/cat
|
31389f98befa8ba7a80599596616ca3aa026f599
|
[
"Apache-2.0"
] | null | null | null |
lib/python/src/cat/__init__.py
|
l1905/cat
|
31389f98befa8ba7a80599596616ca3aa026f599
|
[
"Apache-2.0"
] | null | null | null |
lib/python/src/cat/__init__.py
|
l1905/cat
|
31389f98befa8ba7a80599596616ca3aa026f599
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: stdrickforce (Tengyuan Fan)
# Email: <stdrickforce@gmail.com> <fantengyuan@meituan.com>
from .container import * # noqa
from .const import * # noqa
from .transaction import * # noqa
from .event import * # noqa
from .metric import * # noqa
from .cat import * # noqa
from .messageid import * # noqa
| 21.411765
| 59
| 0.675824
| 46
| 364
| 5.347826
| 0.565217
| 0.284553
| 0.341463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00339
| 0.18956
| 364
| 16
| 60
| 22.75
| 0.830508
| 0.46978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
07d70141a468b0688774bbbe5d50cd2fcc8b41ca
| 126
|
py
|
Python
|
api/config_template.py
|
ArVID220u/catapedia
|
0c6d783db6116619c281cbc2b7e81ff2e44f3cb4
|
[
"MIT"
] | null | null | null |
api/config_template.py
|
ArVID220u/catapedia
|
0c6d783db6116619c281cbc2b7e81ff2e44f3cb4
|
[
"MIT"
] | null | null | null |
api/config_template.py
|
ArVID220u/catapedia
|
0c6d783db6116619c281cbc2b7e81ff2e44f3cb4
|
[
"MIT"
] | null | null | null |
# Configuration file
# Should be copied into `config.py` where real values should be entered
# `config.py` is ignored by git
| 25.2
| 71
| 0.753968
| 20
| 126
| 4.75
| 0.8
| 0.168421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174603
| 126
| 4
| 72
| 31.5
| 0.913462
| 0.936508
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
07f3fa382492570e57b12b2153c0a80c1d7283a1
| 291
|
py
|
Python
|
conans/__init__.py
|
TyRoXx/conan
|
644516b5126f78f46275a9f6a01148183c9d149f
|
[
"MIT"
] | null | null | null |
conans/__init__.py
|
TyRoXx/conan
|
644516b5126f78f46275a9f6a01148183c9d149f
|
[
"MIT"
] | null | null | null |
conans/__init__.py
|
TyRoXx/conan
|
644516b5126f78f46275a9f6a01148183c9d149f
|
[
"MIT"
] | null | null | null |
# Allow conans to import ConanFile from here
# to allow refactors
from conans.model.conan_file import ConanFile
from conans.model.options import Options
from conans.model.settings import Settings
from conans.client.cmake import CMake
from conans.client.gcc import GCC
__version__ = '0.4.0'
| 29.1
| 45
| 0.817869
| 45
| 291
| 5.177778
| 0.422222
| 0.214592
| 0.193133
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011765
| 0.123711
| 291
| 9
| 46
| 32.333333
| 0.901961
| 0.209622
| 0
| 0
| 0
| 0
| 0.022026
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.833333
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
580717d64f6f41da7091e21a5bf3092554c61aab
| 3,934
|
py
|
Python
|
dbaas/dbaas_services/analyzing/migrations/0011_auto__chg_field_analyzerepository_analyzed_at.py
|
didindinn/database-as-a-service
|
747de31ff8546f7874ddd654af860e130afd17a0
|
[
"BSD-3-Clause"
] | 303
|
2015-01-08T10:35:54.000Z
|
2022-02-28T08:54:06.000Z
|
dbaas/dbaas_services/analyzing/migrations/0011_auto__chg_field_analyzerepository_analyzed_at.py
|
nouraellm/database-as-a-service
|
5e655c9347bea991b7218a01549f5e44f161d7be
|
[
"BSD-3-Clause"
] | 124
|
2015-01-14T12:56:15.000Z
|
2022-03-22T20:45:11.000Z
|
dbaas/dbaas_services/analyzing/migrations/0011_auto__chg_field_analyzerepository_analyzed_at.py
|
nouraellm/database-as-a-service
|
5e655c9347bea991b7218a01549f5e44f161d7be
|
[
"BSD-3-Clause"
] | 110
|
2015-01-02T11:59:48.000Z
|
2022-02-28T08:54:06.000Z
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'AnalyzeRepository.analyzed_at'
db.alter_column(u'analyzing_analyzerepository', 'analyzed_at', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Changing field 'AnalyzeRepository.analyzed_at'
db.alter_column(u'analyzing_analyzerepository', 'analyzed_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
models = {
u'analyzing.analyzerepository': {
'Meta': {'unique_together': "(('analyzed_at', 'instance_name'),)", 'object_name': 'AnalyzeRepository'},
'analyzed_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'cpu_alarm': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cpu_threshold': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_name': ('django.db.models.fields.CharField', [], {'max_length': '60', 'db_index': 'True'}),
'databaseinfra_name': ('django.db.models.fields.CharField', [], {'max_length': '60', 'db_index': 'True'}),
'email_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'engine_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'environment_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'memory_alarm': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'memory_threshold': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'volume_alarm': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'volume_threshold': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'analyzing.executionplan': {
'Meta': {'object_name': 'ExecutionPlan'},
'adapter': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'alarm_repository_attr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'field_to_check_value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metrics': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}),
'minimum_value': ('django.db.models.fields.IntegerField', [], {}),
'plan_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'}),
'proccess_function': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'threshold': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'threshold_repository_attr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['analyzing']
| 69.017544
| 139
| 0.602949
| 419
| 3,934
| 5.486874
| 0.214797
| 0.107873
| 0.182688
| 0.260983
| 0.744672
| 0.726403
| 0.703784
| 0.684646
| 0.443236
| 0.395389
| 0
| 0.012643
| 0.175648
| 3,934
| 57
| 140
| 69.017544
| 0.696269
| 0.029232
| 0
| 0.086957
| 0
| 0
| 0.562631
| 0.313155
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.086957
| 0
| 0.195652
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
6af55e4553c5c08a0fea8bdecbe3be8cc62d1168
| 4,981
|
py
|
Python
|
osgar/drivers/test_sicklidar.py
|
m3d/osgar_archive_2020
|
556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e
|
[
"MIT"
] | 12
|
2017-02-16T10:22:59.000Z
|
2022-03-20T05:48:06.000Z
|
osgar/drivers/test_sicklidar.py
|
m3d/osgar_archive_2020
|
556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e
|
[
"MIT"
] | 618
|
2016-08-30T04:46:12.000Z
|
2022-03-25T16:03:10.000Z
|
osgar/drivers/test_sicklidar.py
|
robotika/osgar
|
6f4f584d5553ab62c08a1c7bb493fefdc9033173
|
[
"MIT"
] | 11
|
2016-08-27T20:02:55.000Z
|
2022-03-07T08:53:53.000Z
|
import unittest
from unittest.mock import MagicMock
from osgar.drivers.sicklidar import SICKLidar
from osgar.bus import Bus
class SICKLidarTest(unittest.TestCase):
def test_start_stop(self):
config = {}
logger = MagicMock()
bus = Bus(logger)
lidar = SICKLidar(config, bus=bus.handle('lidar'))
lidar.start()
lidar.request_stop()
lidar.join()
def test_parse_raw_data(self):
raw_data = b"""\x02sRA LMDscandata 1 1 10A719E 0 0 20F9 20FB 26C0C24A 26C0DDF1 0 0 1 0 0 5DC
A2 0 1 DIST1 3F800000 00000000 FFF92230 D05 32B 87B 88D 8A8 8C3 8DE 8F5 912 936
95F 986 9B8 9D4 9EA 9FA A07 A16 A29 A38 A48 A58 A69 A7A A8B A9A AB1 0 0 0 0 0 0
0 0 0 1E 635 66A 6A6 6DB D7B DC5 E0E E60 EB4 EFC F55 FAE 1008 106E 10D1 113C 11C
4 1255 12C0 12CA 12BF 12B8 12A9 129C 1294 128A 1279 1271 1267 125E 1253 124B 124
3 607 61B 60D 604 5FB 5F5 5EC 5EC 5EF 5F0 5F1 5F4 5F8 603 60D 61F 625 62A 653 1F
31 1F84 202E 2060 2082 210B 21DA 228D 2345 23E1 249E 253A 261E 26ED 27BE 28B4 29
49 2957 294D 294D 2946 2944 2936 2937 2935 292E 11D8 B1B B10 B00 AE8 AD0 AC8 ABD
AA4 A9B A93 A8C A88 A88 A83 A7D A6F A5F A4D A44 A3C A37 A31 A2C A32 A33 A33 0 6
D4 6C9 6C3 6B9 6B0 6AC 6AD 6AE 6A7 6A2 69C 69D 69C 67 88 D6 147 1A8 1C4 1C9 1C9
1C7 1C8 1C9 1CC 1CA 1CB 1CC 1C6 1CD 1CB 1CF 1D1 1CF 1D0 1CE 1D5 1CF 1D1 1D5 1D4
1D9 1D8 1D8 1D7 1D9 1D9 1D6 1C9 18D 156 126 111 10A 104 100 F9 EA E8 E5 E1 E4 E3
EF 123 17F 1D0 1EC 1F4 1F7 1FA 1F9 1FC 200 1FF 206 205 204 20D 20C 20C 20A 20F
210 212 210 216 213 219 219 21C 222 221 222 21E 21F 21C 21A 216 219 211 213 210
211 211 212 216 257 282 28B 291 292 296 2A6 2B2 2C2 2BD 2B5 2A4 2A9 2A6 299 299
291 292 28E 290 280 27C 27A 270 272 26A 26D 263 262 255 24F 24B 245 240 234 234
227 22B 21E 220 21F 217 216 21D 211 20D 207 200 1F9 1FA 1EF 1EC 1E5 1E3 1E4 1DE
1DA 1D7 1CD 1CF 1D0 1C6 1D0 1C3 1BE 1C0 1B7 1B7 1BB 1B8 1B7 1B2 1B3 1AD 1AF 1B3
1B4 1AE 1B0 1B0 1AF 1A8 1AF 1AD 1AB 1AD 1AC 1AB 1AB 1A5 1A2 1B2 1AE 1B4 1B8 1B0
1B0 1A9 1B3 1B1 1B2 1AE 1B8 1BC 1B7 1B4 1B8 1C3 1C4 1CD 1CC 1CD 1CC 1D7 20E AFA
0 0 0 0 4A6 4A6 4A5 4A4 49C 49A 498 49C 498 499 496 493 48F 48D 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 482 484 482 0 0 0 49A 4A0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2
2 14 1F 24 28 34 35 3C 40 45 49 4B 4D 50 50 50 50 51 55 55 54 51 54 51 50 51 54
54 52 52 54 55 54 52 56 57 55 55 58 58 58 59 59 58 56 59 57 58 57 59 5C 5A 59 5B
5B 58 5C 5C 5C 5A 5E 5B 5E 60 5F 5C 5C 5B 5B 5A 5A 5D 5B 5B 5D 5D 5D 5C 5B 5E 5
F 5D 63 5F 62 5E 63 61 63 64 61 62 68 68 67 62 66 65 6B 6A 6A 66 67 6C 6C 6D 6D
6C 6F 6D 6D 6E 6C 72 70 71 71 73 75 75 77 77 75 76 79 76 78 77 79 7A 7D 7E 7D 7D
80 81 82 82 83 84 82 83 83 86 88 8A 8B 87 8C 90 8E 8D 92 93 95 97 97 95 9A 9E 9
C A1 A2 A5 A6 A3 A8 AC B0 AE AE B2 B4 B6 B7 BA BF C1 C4 C0 CA CB C7 CA CC CB CE
D2 CE D6 D3 DA D7 D3 DF DC E3 E6 ED EC EF F3 FB FE FD 105 10D 112 117 11F 125 12
A 130 135 134 13D 138 134 131 131 12C 12A 128 128 126 11B 11C 113 11B 10E 2 2 2
2 2 2 184 18E 2 21E 211 213 229 247 244 26A 288 293 295 28C 290 28A 281 28D 28B
289 286 28C 290 28C 28B 28B 28B 287 28A 282 284 286 283 283 286 277 285 288 282
28A 282 281 277 271 256 2 10C F5 FA F0 F3 F4 EB F2 F2 F4 EC EB 2 2 2 2 2 2 0 2 2
2 2 2 2 2 2 0 2 0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2 2 11A 2 133 135 132 13E 147 14E 2 14F 5BB 562 555 55A 54C 549
545 536 535 535 52D 522 51B 51A 519 50D 509 504 506 500 4FD 4FF 4E4 0 0 1 B not
defined 0 0 0\x03"""
data = SICKLidar.parse_raw_data(raw_data)
self.assertIsNotNone(data)
def test_sleep(self):
config = {}
logger = MagicMock()
bus = Bus(logger)
lidar = SICKLidar(config, bus=bus.handle('lidar'))
self.assertIsNone(lidar.sleep)
config = {"sleep": 0.1}
lidar = SICKLidar(config, bus=bus.handle('lidar'))
self.assertAlmostEqual(lidar.sleep, 0.1)
def test_empty_scan(self):
tcp_buf = b'\x02sRA LMDscandata 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\x03'
data = SICKLidar.parse_raw_data(tcp_buf)
self.assertEqual(data, ([], None)) # empty scan and no RSSI
# TODO test that empty scan is not published!
def test_mask(self):
config = {
'mask': [1, -1]
}
logger = MagicMock()
bus = Bus(logger)
lidar = SICKLidar(config, bus=bus.handle('lidar'))
scan = [123] * 270
masked_scan = lidar.apply_mask(scan)
self.assertEqual(masked_scan[0], 0)
self.assertEqual(scan[1:-1], masked_scan[1:-1])
self.assertEqual(masked_scan[-1], 0)
def test_blind_zone(self):
config = {
'blind_zone': 10
}
logger = MagicMock()
bus = Bus(logger)
lidar = SICKLidar(config, bus=bus.handle('lidar'))
scan = [123] * 269 + [2]
masked_scan = lidar.apply_mask(scan)
self.assertEqual(masked_scan[-1], 0)
# vim: expandtab sw=4 ts=4
| 46.12037
| 100
| 0.659105
| 1,111
| 4,981
| 2.928893
| 0.49595
| 0.046097
| 0.058082
| 0.068838
| 0.203135
| 0.203135
| 0.192379
| 0.192379
| 0.176706
| 0.155808
| 0
| 0.450468
| 0.292712
| 4,981
| 107
| 101
| 46.551402
| 0.473176
| 0.018269
| 0
| 0.235955
| 0
| 0.258427
| 0.635898
| 0
| 0
| 0
| 0
| 0.009346
| 0.089888
| 1
| 0.067416
| false
| 0
| 0.044944
| 0
| 0.123596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ed03d9bd6f0b8de7317769246027bd314e8747ca
| 199
|
py
|
Python
|
pyhafas/profile/__init__.py
|
anjomro/pyhafas
|
3cf519bc37f98293c8567b3b3cb52bd705436d47
|
[
"MIT"
] | null | null | null |
pyhafas/profile/__init__.py
|
anjomro/pyhafas
|
3cf519bc37f98293c8567b3b3cb52bd705436d47
|
[
"MIT"
] | null | null | null |
pyhafas/profile/__init__.py
|
anjomro/pyhafas
|
3cf519bc37f98293c8567b3b3cb52bd705436d47
|
[
"MIT"
] | null | null | null |
from .interfaces import ProfileInterface # isort:skip
from .base import BaseProfile
from .db import DBProfile
from .vsn import VSNProfile
from .rkrp import RKRPProfile
from .nasa import NASAProfile
| 28.428571
| 54
| 0.824121
| 26
| 199
| 6.307692
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135678
| 199
| 6
| 55
| 33.166667
| 0.953488
| 0.050251
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
ed11aabb3fab60d386c533b6aeacbf1508743eaa
| 143
|
py
|
Python
|
ncov/apps/news/apps.py
|
ExchangeAnn/2019ncov
|
dbb2c87a6ae4eb50bece9f5b6e2431e89d66f02e
|
[
"MIT"
] | null | null | null |
ncov/apps/news/apps.py
|
ExchangeAnn/2019ncov
|
dbb2c87a6ae4eb50bece9f5b6e2431e89d66f02e
|
[
"MIT"
] | 274
|
2020-02-22T07:54:37.000Z
|
2021-06-23T12:48:05.000Z
|
ncov/apps/news/apps.py
|
ExchangeAnn/2019ncov
|
dbb2c87a6ae4eb50bece9f5b6e2431e89d66f02e
|
[
"MIT"
] | 4
|
2020-02-20T11:19:33.000Z
|
2020-09-30T12:40:34.000Z
|
from django.apps import AppConfig
class NewsConfig(AppConfig):
name = "apps.news"
def ready(self):
import apps.news.signals
| 15.888889
| 33
| 0.685315
| 18
| 143
| 5.444444
| 0.722222
| 0.163265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.223776
| 143
| 8
| 34
| 17.875
| 0.882883
| 0
| 0
| 0
| 0
| 0
| 0.062937
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
ed2f30dd104b0cb0b9bf2b041aa6afdc7f25d560
| 87
|
py
|
Python
|
eclapp/apps.py
|
BlackBoxSQL/ecl-intern
|
17e82a7d563f1a55f7a3a70bac66cb8582256fd7
|
[
"MIT"
] | null | null | null |
eclapp/apps.py
|
BlackBoxSQL/ecl-intern
|
17e82a7d563f1a55f7a3a70bac66cb8582256fd7
|
[
"MIT"
] | null | null | null |
eclapp/apps.py
|
BlackBoxSQL/ecl-intern
|
17e82a7d563f1a55f7a3a70bac66cb8582256fd7
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class EclappConfig(AppConfig):
name = 'eclapp'
| 14.5
| 33
| 0.747126
| 10
| 87
| 6.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 87
| 5
| 34
| 17.4
| 0.902778
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
ed3e3d29b5f1548f3a99d0b915c55a7d7689d4de
| 497
|
py
|
Python
|
release/stubs.min/System/Diagnostics/__init___parts/DebuggerStepperBoundaryAttribute.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/System/Diagnostics/__init___parts/DebuggerStepperBoundaryAttribute.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/System/Diagnostics/__init___parts/DebuggerStepperBoundaryAttribute.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class DebuggerStepperBoundaryAttribute(Attribute,_Attribute):
"""
Indicates the code following the attribute is to be executed in run,not step,mode.
DebuggerStepperBoundaryAttribute()
"""
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __reduce_ex__(self,*args):
pass
| 35.5
| 215
| 0.734406
| 59
| 497
| 5.40678
| 0.508475
| 0.141066
| 0.15047
| 0.178683
| 0.354232
| 0.354232
| 0.354232
| 0.354232
| 0.354232
| 0.354232
| 0
| 0
| 0.148893
| 497
| 13
| 216
| 38.230769
| 0.754137
| 0.653924
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.4
| 0
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
ed49e3b34eca5785fdc8dd0e97568b32ea107fe7
| 1,286
|
py
|
Python
|
api/responses.py
|
dv-bv/serverless-python-boilerplate
|
bd3d436b166b59f5b9c8c017b3a8e976ead6aefc
|
[
"MIT"
] | 10
|
2019-08-16T06:41:53.000Z
|
2021-04-06T14:53:11.000Z
|
api/responses.py
|
dv-bv/serverless-python-boilerplate
|
bd3d436b166b59f5b9c8c017b3a8e976ead6aefc
|
[
"MIT"
] | 1
|
2021-05-08T12:22:54.000Z
|
2021-05-08T12:22:54.000Z
|
api/responses.py
|
dv-bv/serverless-python-boilerplate
|
bd3d436b166b59f5b9c8c017b3a8e976ead6aefc
|
[
"MIT"
] | 7
|
2019-07-29T04:41:25.000Z
|
2021-03-24T17:25:57.000Z
|
import json
from http import HTTPStatus
cors_headers = {
'Access-Control-Allow-Origin': '*',
'Content-Type': 'application/json',
}
def generate_empty_response(status_code):
return {
'headers': cors_headers,
'statusCode': status_code
}
def generate_response(body, status_code):
response = generate_empty_response(status_code)
response['body'] = json.dumps(body)
return response
def generate_message_response(message, status_code):
return generate_response({'message': message}, status_code)
def generate_error_response(err, status_code):
return generate_response({'error': str(err)}, status_code)
def invalid_request_response(message='Invalid Request'):
return generate_error_response(message, HTTPStatus.BAD_REQUEST.value)
def ok_response(body=None):
if body is None:
return generate_empty_response(HTTPStatus.OK.value)
return generate_response(body, HTTPStatus.OK.value)
def internal_error_response(err):
return generate_error_response(err, HTTPStatus.INTERNAL_SERVER_ERROR.value)
def unauthorized_response():
return generate_message_response('Unauthorized', HTTPStatus.UNAUTHORIZED.value)
def not_found_response():
return generate_message_response('Not Found', HTTPStatus.NOT_FOUND.value)
| 31.365854
| 83
| 0.764386
| 156
| 1,286
| 6.019231
| 0.262821
| 0.085197
| 0.067093
| 0.057508
| 0.212993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136858
| 1,286
| 41
| 84
| 31.365854
| 0.845946
| 0
| 0
| 0
| 1
| 0
| 0.097125
| 0.020979
| 0
| 0
| 0
| 0
| 0
| 1
| 0.290323
| false
| 0
| 0.064516
| 0.225806
| 0.677419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
ed516e721748fb0f43660ab05a444732d1a71447
| 2,307
|
py
|
Python
|
tests/test_gmemcache.py
|
msabramo/gmemcache
|
fcfee82672f732c119364bb45015df7b02ff11cb
|
[
"Apache-2.0"
] | 1
|
2016-03-13T18:37:56.000Z
|
2016-03-13T18:37:56.000Z
|
tests/test_gmemcache.py
|
msabramo/gmemcache
|
fcfee82672f732c119364bb45015df7b02ff11cb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_gmemcache.py
|
msabramo/gmemcache
|
fcfee82672f732c119364bb45015df7b02ff11cb
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import time
import uuid
from gmemcache import MemcacheConnection
from nose.tools import *
MEMCACHED_SERVER = '127.0.0.1:11211'
_conn = None
def _setup_connection():
global _conn
_conn = MemcacheConnection([MEMCACHED_SERVER])
def _drop_connection():
global _conn
_conn.close()
_conn = None
def test_open_lazy():
conn = MemcacheConnection([MEMCACHED_SERVER], lazy=True)
ok_(not conn.is_connected())
conn.open()
ok_(conn.is_connected())
conn.close()
def test_close():
conn = MemcacheConnection([MEMCACHED_SERVER])
conn.open()
ok_(conn.is_connected())
conn.close()
ok_(not conn.is_connected())
@with_setup(setup=_setup_connection, teardown=_drop_connection)
def test_get():
key = uuid.uuid1().hex
_conn.set(key, 'value')
eq_('value', _conn.get(key))
@with_setup(setup=_setup_connection, teardown=_drop_connection)
def test_get_multi():
key1 = uuid.uuid1().hex
key2 = uuid.uuid1().hex
key3 = uuid.uuid1().hex
key4 = uuid.uuid1().hex
_conn.set_multi({key1: 'value1', key2: 'value2', key3: 'value3'})
eq_({key1: 'value1', key2: 'value2', key3: 'value3'},
_conn.get_multi([key1, key2, key3]))
eq_({key1: 'value1'},
_conn.get_multi([key1, key4]))
@with_setup(setup=_setup_connection, teardown=_drop_connection)
def test_set():
key = uuid.uuid1().hex
_conn.set(key, 'value')
eq_('value', _conn.get(key))
@with_setup(setup=_setup_connection, teardown=_drop_connection)
def test_set_with_lifetime():
key = uuid.uuid1().hex
_conn.set(key, 'value', lifetime=1)
eq_('value', _conn.get(key))
time.sleep(2)
eq_(None, _conn.get(key))
@with_setup(setup=_setup_connection, teardown=_drop_connection)
def test_set_multi():
key1 = uuid.uuid1().hex
key2 = uuid.uuid1().hex
key3 = uuid.uuid1().hex
_conn.set_multi({key1: 'value1', key2: 'value2', key3: 'value3'})
eq_({key1: 'value1', key2: 'value2', key3: 'value3'}, _conn.get_multi([key1, key2, key3]))
@with_setup(setup=_setup_connection, teardown=_drop_connection)
def test_set_multi_with_lifetime():
key = uuid.uuid1().hex
_conn.set_multi({key: 'value'}, lifetime=1)
eq_('value', _conn.get(key))
time.sleep(2)
eq_(None, _conn.get(key))
| 23.783505
| 94
| 0.672735
| 311
| 2,307
| 4.675241
| 0.170418
| 0.082531
| 0.090784
| 0.078404
| 0.742779
| 0.715268
| 0.711829
| 0.711829
| 0.618294
| 0.618294
| 0
| 0.035472
| 0.169051
| 2,307
| 96
| 95
| 24.03125
| 0.723005
| 0.009103
| 0
| 0.597015
| 0
| 0
| 0.058231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.149254
| false
| 0
| 0.059701
| 0
| 0.208955
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ed5689fd004cdce446e1c147f6cbe11a539d7d05
| 169
|
py
|
Python
|
continents/urls.py
|
brapastor/pygeographic
|
3b1522b62bf06430dca007d64a5b71243fdb71f0
|
[
"MIT"
] | null | null | null |
continents/urls.py
|
brapastor/pygeographic
|
3b1522b62bf06430dca007d64a5b71243fdb71f0
|
[
"MIT"
] | null | null | null |
continents/urls.py
|
brapastor/pygeographic
|
3b1522b62bf06430dca007d64a5b71243fdb71f0
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import ContinentsView
app_name = 'continents'
urlpatterns = [
path("", ContinentsView.as_view(), name="continents_home"),
]
| 24.142857
| 63
| 0.745562
| 20
| 169
| 6.15
| 0.7
| 0.227642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130178
| 169
| 6
| 64
| 28.166667
| 0.836735
| 0
| 0
| 0
| 0
| 0
| 0.147929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
ed669f9c6e7b942ee66d8967c41352e03b7eaf31
| 118
|
py
|
Python
|
django/contrib/auth/signals.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 61,676
|
2015-01-01T00:05:13.000Z
|
2022-03-31T20:37:54.000Z
|
virtual/lib/python3.6/site-packages/django/contrib/auth/signals.py
|
kahenya-anita/Insta-Clone
|
4894e959c17170505e73aee6dc497aeb29d55a71
|
[
"MIT"
] | 8,884
|
2015-01-01T00:12:05.000Z
|
2022-03-31T19:53:11.000Z
|
virtual/lib/python3.6/site-packages/django/contrib/auth/signals.py
|
kahenya-anita/Insta-Clone
|
4894e959c17170505e73aee6dc497aeb29d55a71
|
[
"MIT"
] | 33,143
|
2015-01-01T02:04:52.000Z
|
2022-03-31T19:42:46.000Z
|
from django.dispatch import Signal
user_logged_in = Signal()
user_login_failed = Signal()
user_logged_out = Signal()
| 19.666667
| 34
| 0.79661
| 17
| 118
| 5.176471
| 0.647059
| 0.340909
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118644
| 118
| 5
| 35
| 23.6
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ed6a636306de178b1954b82072c2d8c118996523
| 235
|
py
|
Python
|
classes/conversion.py
|
HisenZhang/IED-tranport-sim
|
93c61d92a628ed6f9b5c035a232867e9c5e5de5d
|
[
"MIT"
] | null | null | null |
classes/conversion.py
|
HisenZhang/IED-tranport-sim
|
93c61d92a628ed6f9b5c035a232867e9c5e5de5d
|
[
"MIT"
] | 7
|
2020-06-07T00:43:26.000Z
|
2020-06-19T19:25:45.000Z
|
classes/conversion.py
|
HisenZhang/IED-tranport-sim
|
93c61d92a628ed6f9b5c035a232867e9c5e5de5d
|
[
"MIT"
] | null | null | null |
from classes.exceptions import ConversionFailure
def MPHtoMPG_Gas(speed):
return 18 # TODO MPG as a function of speed in mph
def MPHtoMPG_Electric(speed):
return 65
def MPGtoMPKWh(mpg):
return 0.029669 * mpg # 1/33.705
| 21.363636
| 55
| 0.73617
| 36
| 235
| 4.75
| 0.75
| 0.128655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089947
| 0.195745
| 235
| 11
| 56
| 21.363636
| 0.814815
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
ed7ecaa5916dc439e3f13a11f6920980ae82a28e
| 867
|
py
|
Python
|
scanners/zap-advanced/scanner/zapclient/configuration/zap_configuration_api.py
|
watchmen-coder/secureCodeBox
|
ac3482d4ffa6ced7e8251bc7b72144d11ec2d62d
|
[
"Apache-2.0"
] | 1
|
2021-05-24T08:17:48.000Z
|
2021-05-24T08:17:48.000Z
|
scanners/zap-advanced/scanner/zapclient/configuration/zap_configuration_api.py
|
watchmen-coder/secureCodeBox
|
ac3482d4ffa6ced7e8251bc7b72144d11ec2d62d
|
[
"Apache-2.0"
] | null | null | null |
scanners/zap-advanced/scanner/zapclient/configuration/zap_configuration_api.py
|
watchmen-coder/secureCodeBox
|
ac3482d4ffa6ced7e8251bc7b72144d11ec2d62d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# SPDX-FileCopyrightText: 2021 iteratec GmbH
#
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
import collections
import logging
from .zap_configuration_list import ZapConfigurationList
class ZapConfigurationApi(ZapConfigurationList):
"""This class represent a ZAP specific for ZAP API configurations based on a given YAML file."""
def __init__(self, api_configurations: collections.OrderedDict):
"""Initial constructor used for this class
Parameters
----------
api_configurations : collections.OrderedDict
The relative path to the config dir containing all relevant config YAML files.
"""
super().__init__(api_configurations, "api", "apis")
def __str__(self):
return " ZapConfigurationApi( " + str(self.get_configurations) + " )"
| 29.896552
| 100
| 0.688581
| 93
| 867
| 6.225806
| 0.666667
| 0.117444
| 0.096718
| 0.134715
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010264
| 0.213379
| 867
| 28
| 101
| 30.964286
| 0.83871
| 0.464821
| 0
| 0
| 0
| 0
| 0.0775
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.375
| 0.125
| 0.875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
71ec6ce55e082ab50e64afbe24aa942246e2273c
| 14,111
|
py
|
Python
|
amazon_ads_sponsored_products_client/api/snapshots_api.py
|
wangjoshuah/Amazon-Ads-Sponsored-Products-API-Python-Client
|
98a511a0544d28aac06529c13f4921c19ae8ec66
|
[
"MIT"
] | null | null | null |
amazon_ads_sponsored_products_client/api/snapshots_api.py
|
wangjoshuah/Amazon-Ads-Sponsored-Products-API-Python-Client
|
98a511a0544d28aac06529c13f4921c19ae8ec66
|
[
"MIT"
] | null | null | null |
amazon_ads_sponsored_products_client/api/snapshots_api.py
|
wangjoshuah/Amazon-Ads-Sponsored-Products-API-Python-Client
|
98a511a0544d28aac06529c13f4921c19ae8ec66
|
[
"MIT"
] | null | null | null |
"""
Amazon Ads API - Sponsored Products
Use the Amazon Ads API for Sponsored Products for campaign, ad group, keyword, negative keyword, and product ad management operations. For more information about Sponsored Products, see the [Sponsored Products Support Center](https://advertising.amazon.com/help?entityId=ENTITY3CWETCZD9HEG2#GWGFKPEWVWG2CLUJ). For onboarding information, see the [account setup](setting-up/account-setup) topic.<br/><br/> # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from amazon_ads_sponsored_products_client.api_client import ApiClient, Endpoint as _Endpoint
from amazon_ads_sponsored_products_client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from amazon_ads_sponsored_products_client.model.error import Error
from amazon_ads_sponsored_products_client.model.snapshot_request import SnapshotRequest
from amazon_ads_sponsored_products_client.model.snapshot_response import SnapshotResponse
class SnapshotsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_snapshot_status_endpoint = _Endpoint(
settings={
'response_type': (SnapshotResponse,),
'auth': [
'bearer'
],
'endpoint_path': '/v2/sp/snapshots/{snapshotId}',
'operation_id': 'get_snapshot_status',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'amazon_advertising_api_client_id',
'amazon_advertising_api_scope',
'snapshot_id',
],
'required': [
'amazon_advertising_api_client_id',
'amazon_advertising_api_scope',
'snapshot_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'amazon_advertising_api_client_id':
(str,),
'amazon_advertising_api_scope':
(str,),
'snapshot_id':
(float,),
},
'attribute_map': {
'amazon_advertising_api_client_id': 'Amazon-Advertising-API-ClientId',
'amazon_advertising_api_scope': 'Amazon-Advertising-API-Scope',
'snapshot_id': 'snapshotId',
},
'location_map': {
'amazon_advertising_api_client_id': 'header',
'amazon_advertising_api_scope': 'header',
'snapshot_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.request_snapshot_endpoint = _Endpoint(
settings={
'response_type': (SnapshotResponse,),
'auth': [
'bearer'
],
'endpoint_path': '/v2/sp/{recordType}/snapshot',
'operation_id': 'request_snapshot',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'amazon_advertising_api_client_id',
'amazon_advertising_api_scope',
'record_type',
'snapshot_request',
],
'required': [
'amazon_advertising_api_client_id',
'amazon_advertising_api_scope',
'record_type',
'snapshot_request',
],
'nullable': [
],
'enum': [
'record_type',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('record_type',): {
"CAMPAIGNS": "campaigns",
"ADGROUPS": "adGroups",
"KEYWORDS": "keywords",
"NEGATIVEKEYWORDS": "negativeKeywords",
"CAMPAIGNNEGATIVEKEYWORDS": "campaignNegativeKeywords",
"PRODUCTADS": "productAds",
"TARGETS": "targets",
"NEGATIVETARGETS": "negativeTargets"
},
},
'openapi_types': {
'amazon_advertising_api_client_id':
(str,),
'amazon_advertising_api_scope':
(str,),
'record_type':
(str,),
'snapshot_request':
(SnapshotRequest,),
},
'attribute_map': {
'amazon_advertising_api_client_id': 'Amazon-Advertising-API-ClientId',
'amazon_advertising_api_scope': 'Amazon-Advertising-API-Scope',
'record_type': 'recordType',
},
'location_map': {
'amazon_advertising_api_client_id': 'header',
'amazon_advertising_api_scope': 'header',
'record_type': 'path',
'snapshot_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def get_snapshot_status(
self,
amazon_advertising_api_client_id,
amazon_advertising_api_scope,
snapshot_id,
**kwargs
):
"""Gets the status of a requested snapshot. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_status(amazon_advertising_api_client_id, amazon_advertising_api_scope, snapshot_id, async_req=True)
>>> result = thread.get()
Args:
amazon_advertising_api_client_id (str): The identifier of a client associated with a \"Login with Amazon\" developer account.
amazon_advertising_api_scope (str): The identifier of a profile associated with the advertiser account. Use `GET` method on Profiles resource to list profiles associated with the access token passed in the HTTP Authorization header.
snapshot_id (float): The snapshot identifier.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
SnapshotResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['amazon_advertising_api_client_id'] = \
amazon_advertising_api_client_id
kwargs['amazon_advertising_api_scope'] = \
amazon_advertising_api_scope
kwargs['snapshot_id'] = \
snapshot_id
return self.get_snapshot_status_endpoint.call_with_http_info(**kwargs)
def request_snapshot(
self,
amazon_advertising_api_client_id,
amazon_advertising_api_scope,
record_type,
snapshot_request,
**kwargs
):
"""Request a file-based snapshot of all entities of the specified type. # noqa: E501
Request a file-based snapshot of all entities of the specified type in the account satisfying the filtering criteria. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.request_snapshot(amazon_advertising_api_client_id, amazon_advertising_api_scope, record_type, snapshot_request, async_req=True)
>>> result = thread.get()
Args:
amazon_advertising_api_client_id (str): The identifier of a client associated with a \"Login with Amazon\" developer account.
amazon_advertising_api_scope (str): The identifier of a profile associated with the advertiser account. Use `GET` method on Profiles resource to list profiles associated with the access token passed in the HTTP Authorization header.
record_type (str): The type of entity for which the snapshot is generated.
snapshot_request (SnapshotRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
SnapshotResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['amazon_advertising_api_client_id'] = \
amazon_advertising_api_client_id
kwargs['amazon_advertising_api_scope'] = \
amazon_advertising_api_scope
kwargs['record_type'] = \
record_type
kwargs['snapshot_request'] = \
snapshot_request
return self.request_snapshot_endpoint.call_with_http_info(**kwargs)
| 40.665706
| 422
| 0.550493
| 1,350
| 14,111
| 5.465185
| 0.176296
| 0.101382
| 0.119274
| 0.074546
| 0.756303
| 0.745188
| 0.718352
| 0.707238
| 0.693955
| 0.693955
| 0
| 0.003494
| 0.3712
| 14,111
| 346
| 423
| 40.783237
| 0.828018
| 0.354263
| 0
| 0.578059
| 0
| 0
| 0.278875
| 0.126847
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012658
| false
| 0
| 0.029536
| 0
| 0.054852
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
71eef99019beb06fa77ad8f9995fa6d2fe9a6390
| 41
|
py
|
Python
|
tuples/03.py
|
mallimuondu/python-homworks
|
352721a8e77d0b3bdb7a8a54197b6a04e1aec3c0
|
[
"MIT"
] | null | null | null |
tuples/03.py
|
mallimuondu/python-homworks
|
352721a8e77d0b3bdb7a8a54197b6a04e1aec3c0
|
[
"MIT"
] | null | null | null |
tuples/03.py
|
mallimuondu/python-homworks
|
352721a8e77d0b3bdb7a8a54197b6a04e1aec3c0
|
[
"MIT"
] | null | null | null |
a = ("malli", "nesh", "mum")
print(a[-1])
| 20.5
| 28
| 0.487805
| 7
| 41
| 2.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.121951
| 41
| 2
| 29
| 20.5
| 0.527778
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
9c148bb91a0e98bcb4be64d7ba5dada8e1256274
| 1,633
|
py
|
Python
|
finat/sympy2gem.py
|
connorjward/FInAT
|
c979533fb2361e488ae633079bb140b265e91db2
|
[
"MIT"
] | 14
|
2015-04-21T07:47:38.000Z
|
2022-02-05T15:33:24.000Z
|
finat/sympy2gem.py
|
connorjward/FInAT
|
c979533fb2361e488ae633079bb140b265e91db2
|
[
"MIT"
] | 55
|
2015-03-03T12:59:37.000Z
|
2021-10-07T15:18:23.000Z
|
finat/sympy2gem.py
|
connorjward/FInAT
|
c979533fb2361e488ae633079bb140b265e91db2
|
[
"MIT"
] | 7
|
2016-12-10T16:32:35.000Z
|
2021-11-04T17:55:10.000Z
|
from functools import singledispatch, reduce
import sympy
try:
import symengine
except ImportError:
class Mock:
def __getattribute__(self, name):
return Mock
symengine = Mock()
import gem
@singledispatch
def sympy2gem(node, self):
raise AssertionError("sympy/symengine node expected, got %s" % type(node))
@sympy2gem.register(sympy.Expr)
@sympy2gem.register(symengine.Expr)
def sympy2gem_expr(node, self):
raise NotImplementedError("no handler for sympy/symengine node type %s" % type(node))
@sympy2gem.register(sympy.Add)
@sympy2gem.register(symengine.Add)
def sympy2gem_add(node, self):
return reduce(gem.Sum, map(self, node.args))
@sympy2gem.register(sympy.Mul)
@sympy2gem.register(symengine.Mul)
def sympy2gem_mul(node, self):
return reduce(gem.Product, map(self, node.args))
@sympy2gem.register(sympy.Pow)
@sympy2gem.register(symengine.Pow)
def sympy2gem_pow(node, self):
return gem.Power(*map(self, node.args))
@sympy2gem.register(sympy.Integer)
@sympy2gem.register(symengine.Integer)
@sympy2gem.register(int)
def sympy2gem_integer(node, self):
return gem.Literal(int(node))
@sympy2gem.register(sympy.Float)
@sympy2gem.register(symengine.Float)
@sympy2gem.register(float)
def sympy2gem_float(node, self):
return gem.Literal(float(node))
@sympy2gem.register(sympy.Symbol)
@sympy2gem.register(symengine.Symbol)
def sympy2gem_symbol(node, self):
return self.bindings[node]
@sympy2gem.register(sympy.Rational)
@sympy2gem.register(symengine.Rational)
def sympy2gem_rational(node, self):
return gem.Division(*(map(self, node.as_numer_denom())))
| 24.014706
| 89
| 0.755052
| 207
| 1,633
| 5.888889
| 0.246377
| 0.251025
| 0.144381
| 0.106645
| 0.219032
| 0.14192
| 0.091058
| 0
| 0
| 0
| 0
| 0.018789
| 0.120024
| 1,633
| 67
| 90
| 24.373134
| 0.829506
| 0
| 0
| 0
| 0
| 0
| 0.04899
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 1
| 0.212766
| false
| 0
| 0.106383
| 0.170213
| 0.510638
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
9c237b9f609d4298570edc2ecb5835b90ac6de6b
| 639
|
py
|
Python
|
Messaging/Packets/Client/Alliance/UnknownAskMessage.py
|
Kuler2006/BSDS-V40
|
9e9a6e5b36cd5082fe428ebb0279df23d5d9c7b7
|
[
"Apache-2.0"
] | 4
|
2021-11-27T16:49:30.000Z
|
2021-12-21T13:50:00.000Z
|
Messaging/Packets/Client/Alliance/UnknownAskMessage.py
|
Kuler2006/BSDS-V40
|
9e9a6e5b36cd5082fe428ebb0279df23d5d9c7b7
|
[
"Apache-2.0"
] | null | null | null |
Messaging/Packets/Client/Alliance/UnknownAskMessage.py
|
Kuler2006/BSDS-V40
|
9e9a6e5b36cd5082fe428ebb0279df23d5d9c7b7
|
[
"Apache-2.0"
] | 1
|
2021-12-21T13:38:20.000Z
|
2021-12-21T13:38:20.000Z
|
from Logic.Data.DataManager import Writer
from Logic.Data.DataManager import Reader
from Messaging.Packets.Server.Alliance.AllianceDataMessage import AllianceDataMessage
from Messaging.Packets.Server.Alliance.UnknownLeaderboardAllianceMessage import UnknownLeaderboardAllianceMessage
class UnknownAskMessage(Reader):
def __init__(self, client, player, header_bytes):
super().__init__(header_bytes)
self.player = player
self.client = client
def decode(self):
pass
def process(self):
pass
# UnknownLeaderboardAllianceMessage(self.client, self.player).send(self.player.LowID)
| 31.95
| 113
| 0.762128
| 65
| 639
| 7.338462
| 0.430769
| 0.062893
| 0.054507
| 0.100629
| 0.268344
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164319
| 639
| 19
| 114
| 33.631579
| 0.893258
| 0.12989
| 0
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0.153846
| 0.307692
| 0
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 4
|
9c3e4a4649f08477e7f87226f4a120e1562dc11c
| 85
|
py
|
Python
|
ghtools/exceptions.py
|
alphagov/ghtools
|
be10c9251197c4c170e617f8328c1f94f5f45dca
|
[
"MIT"
] | 3
|
2015-02-09T12:19:40.000Z
|
2016-07-20T18:19:11.000Z
|
ghtools/exceptions.py
|
alphagov/ghtools
|
be10c9251197c4c170e617f8328c1f94f5f45dca
|
[
"MIT"
] | 3
|
2015-02-06T13:39:31.000Z
|
2016-10-03T09:33:33.000Z
|
ghtools/exceptions.py
|
alphagov/ghtools
|
be10c9251197c4c170e617f8328c1f94f5f45dca
|
[
"MIT"
] | 3
|
2017-10-12T10:33:20.000Z
|
2021-04-10T19:55:50.000Z
|
class GithubError(Exception):
pass
class GithubAPIError(GithubError):
pass
| 12.142857
| 34
| 0.741176
| 8
| 85
| 7.875
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188235
| 85
| 6
| 35
| 14.166667
| 0.913043
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
9c782c7bf428d457edf67db105b075491d48af1e
| 11,555
|
py
|
Python
|
lib/networks/vgg16_gan.py
|
asbe/PoseCNN
|
0dc7f4f1d63908a43d5afc1ac4cf327ae88c658c
|
[
"MIT"
] | null | null | null |
lib/networks/vgg16_gan.py
|
asbe/PoseCNN
|
0dc7f4f1d63908a43d5afc1ac4cf327ae88c658c
|
[
"MIT"
] | null | null | null |
lib/networks/vgg16_gan.py
|
asbe/PoseCNN
|
0dc7f4f1d63908a43d5afc1ac4cf327ae88c658c
|
[
"MIT"
] | 1
|
2018-06-24T14:48:59.000Z
|
2018-06-24T14:48:59.000Z
|
import tensorflow as tf
from networks.network import Network
class vgg16_gan(Network):
def __init__(self, input_format, num_classes, num_units, scales, vertex_reg=False, trainable=True):
self.inputs = []
self.input_format = input_format
self.num_classes = num_classes
self.num_units = num_units
self.scale = 1 / scales[0]
self.vertex_reg = vertex_reg
self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3])
if input_format == 'RGBD':
self.data_p = tf.placeholder(tf.float32, shape=[None, None, None, 3])
self.gt_label_2d = tf.placeholder(tf.float32, shape=[None, None, None, self.num_classes])
self.keep_prob = tf.placeholder(tf.float32)
if vertex_reg:
self.vertex_targets = tf.placeholder(tf.float32, shape=[None, None, None, 3 * num_classes])
self.vertex_weights = tf.placeholder(tf.float32, shape=[None, None, None, 3 * num_classes])
self.gan_label_true = tf.placeholder(tf.float32, shape=[None, None, None, 2])
self.gan_label_false = tf.placeholder(tf.float32, shape=[None, None, None, 2])
self.gan_label_color = tf.placeholder(tf.float32, shape=[num_classes, 3])
# define a queue
if input_format == 'RGBD':
if vertex_reg and trainable:
q = tf.FIFOQueue(100, [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32])
self.enqueue_op = q.enqueue([self.data, self.data_p, self.gt_label_2d, self.keep_prob, self.vertex_targets, self.vertex_weights, \
self.gan_label_true, self.gan_label_false, self.gan_label_color])
data, data_p, gt_label_2d, self.keep_prob_queue, vertex_targets, vertex_weights, gan_label_true, gan_label_false, gan_label_color = q.dequeue()
self.layers = dict({'data': data, 'data_p': data_p, 'gt_label_2d': gt_label_2d, \
'vertex_targets': vertex_targets, 'vertex_weights': vertex_weights, \
'gan_label_true': gan_label_true, 'gan_label_false': gan_label_false, \
'gan_label_color': gan_label_color})
else:
q = tf.FIFOQueue(100, [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32])
self.enqueue_op = q.enqueue([self.data, self.data_p, self.gt_label_2d, self.keep_prob, self.gan_label_true, self.gan_label_false, \
self.gan_label_color])
data, data_p, gt_label_2d, self.keep_prob_queue, gan_label_true, gan_label_false, gan_label_color = q.dequeue()
self.layers = dict({'data': data, 'data_p': data_p, 'gt_label_2d': gt_label_2d, \
'gan_label_true': gan_label_true, 'gan_label_false': gan_label_false, \
'gan_label_color': gan_label_color})
else:
if vertex_reg and trainable:
q = tf.FIFOQueue(100, [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32])
self.enqueue_op = q.enqueue([self.data, self.gt_label_2d, self.keep_prob, self.vertex_targets, self.vertex_weights, \
self.gan_label_true, self.gan_label_false, self.gan_label_color])
data, gt_label_2d, self.keep_prob_queue, vertex_targets, vertex_weights, gan_label_true, gan_label_false, gan_label_color = q.dequeue()
self.layers = dict({'data': data, 'gt_label_2d': gt_label_2d, 'vertex_targets': vertex_targets, 'vertex_weights': vertex_weights, \
'gan_label_true': gan_label_true, 'gan_label_false': gan_label_false, \
'gan_label_color': gan_label_color})
else:
q = tf.FIFOQueue(100, [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32])
self.enqueue_op = q.enqueue([self.data, self.gt_label_2d, self.keep_prob, self.gan_label_true, self.gan_label_false, self.gan_label_color])
data, gt_label_2d, self.keep_prob_queue, gan_label_true, gan_label_false, gan_label_color = q.dequeue()
self.layers = dict({'data': data, 'gt_label_2d': gt_label_2d, \
'gan_label_true': gan_label_true, 'gan_label_false': gan_label_false, \
'gan_label_color': gan_label_color})
self.close_queue_op = q.close(cancel_pending_enqueues=True)
self.trainable = trainable
self.setup()
def setup(self):
# generator
(self.feed('data')
.conv(3, 3, 64, 1, 1, name='conv1_1', c_i=3)
.conv(3, 3, 64, 1, 1, name='conv1_2', c_i=64)
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1', c_i=64)
.conv(3, 3, 128, 1, 1, name='conv2_2', c_i=128)
.max_pool(2, 2, 2, 2, name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1', c_i=128)
.conv(3, 3, 256, 1, 1, name='conv3_2', c_i=256)
.conv(3, 3, 256, 1, 1, name='conv3_3', c_i=256)
.max_pool(2, 2, 2, 2, name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1', c_i=256)
.conv(3, 3, 512, 1, 1, name='conv4_2', c_i=512)
.conv(3, 3, 512, 1, 1, name='conv4_3', c_i=512)
.max_pool(2, 2, 2, 2, name='pool4')
.conv(3, 3, 512, 1, 1, name='conv5_1', c_i=512)
.conv(3, 3, 512, 1, 1, name='conv5_2', c_i=512)
.conv(3, 3, 512, 1, 1, name='conv5_3', c_i=512))
if self.input_format == 'RGBD':
(self.feed('data_p')
.conv(3, 3, 64, 1, 1, name='conv1_1_p', c_i=3)
.conv(3, 3, 64, 1, 1, name='conv1_2_p', c_i=64)
.max_pool(2, 2, 2, 2, name='pool1_p')
.conv(3, 3, 128, 1, 1, name='conv2_1_p', c_i=64)
.conv(3, 3, 128, 1, 1, name='conv2_2_p', c_i=128)
.max_pool(2, 2, 2, 2, name='pool2_p')
.conv(3, 3, 256, 1, 1, name='conv3_1_p', c_i=128)
.conv(3, 3, 256, 1, 1, name='conv3_2_p', c_i=256)
.conv(3, 3, 256, 1, 1, name='conv3_3_p', c_i=256)
.max_pool(2, 2, 2, 2, name='pool3_p')
.conv(3, 3, 512, 1, 1, name='conv4_1_p', c_i=256)
.conv(3, 3, 512, 1, 1, name='conv4_2_p', c_i=512)
.conv(3, 3, 512, 1, 1, name='conv4_3_p', c_i=512)
.max_pool(2, 2, 2, 2, name='pool4_p')
.conv(3, 3, 512, 1, 1, name='conv5_1_p', c_i=512)
.conv(3, 3, 512, 1, 1, name='conv5_2_p', c_i=512)
.conv(3, 3, 512, 1, 1, name='conv5_3_p', c_i=512))
(self.feed('conv5_3', 'conv5_3_p')
.concat(3, name='concat_conv5')
.conv(1, 1, self.num_units, 1, 1, name='score_conv5', c_i=1024)
.deconv(4, 4, self.num_units, 2, 2, name='upscore_conv5', trainable=False))
(self.feed('conv4_3', 'conv4_3_p')
.concat(3, name='concat_conv4')
.conv(1, 1, self.num_units, 1, 1, name='score_conv4', c_i=1024))
else:
(self.feed('conv5_3')
.conv(1, 1, self.num_units, 1, 1, name='score_conv5', c_i=512)
.deconv(4, 4, self.num_units, 2, 2, name='upscore_conv5', trainable=False))
(self.feed('conv4_3')
.conv(1, 1, self.num_units, 1, 1, name='score_conv4', c_i=512))
(self.feed('score_conv4', 'upscore_conv5')
.add(name='add_score')
.dropout(self.keep_prob_queue, name='dropout')
.deconv(int(16*self.scale), int(16*self.scale), self.num_units, int(8*self.scale), int(8*self.scale), name='upscore', trainable=False)
.conv(1, 1, self.num_classes, 1, 1, name='score', c_i=self.num_units)
.log_softmax_high_dimension(self.num_classes, name='prob'))
(self.feed('score')
.softmax_high_dimension(self.num_classes, name='prob_normalized')
.argmax_2d(name='label_2d'))
if self.vertex_reg:
(self.feed('conv5_3')
.conv(1, 1, 128, 1, 1, name='score_conv5_vertex', relu=False, c_i=512)
.deconv(4, 4, 128, 2, 2, name='upscore_conv5_vertex', trainable=False))
(self.feed('conv4_3')
.conv(1, 1, 128, 1, 1, name='score_conv4_vertex', relu=False, c_i=512))
(self.feed('score_conv4_vertex', 'upscore_conv5_vertex')
.add(name='add_score_vertex')
.dropout(self.keep_prob_queue, name='dropout_vertex')
.deconv(int(16*self.scale), int(16*self.scale), 128, int(8*self.scale), int(8*self.scale), name='upscore_vertex', trainable=False)
.conv(1, 1, 3 * self.num_classes, 1, 1, name='vertex_pred', relu=False, c_i=128))
# discriminator
outputs_d = []
for i in range(2):
print(i)
if i == 0:
reuse = None
self.layers['input_d'] = 255 * self.layers['vertex_pred']
else:
reuse = True
self.layers['input_d'] = 255 * self.layers['vertex_targets']
# label tower
(self.feed('input_d', 'data')
.concat(3, name='image_d')
.conv(3, 3, 64, 1, 1, name='conv1_1_d', reuse=reuse, c_i=3*self.num_classes+3)
.conv(3, 3, 64, 1, 1, name='conv1_2_d', reuse=reuse, c_i=64)
.max_pool(2, 2, 2, 2, name='pool1_d')
.conv(3, 3, 128, 1, 1, name='conv2_1_d', reuse=reuse, c_i=64)
.conv(3, 3, 128, 1, 1, name='conv2_2_d', reuse=reuse, c_i=128)
.max_pool(2, 2, 2, 2, name='pool2_d')
.conv(3, 3, 256, 1, 1, name='conv3_1_d', reuse=reuse, c_i=128)
.conv(3, 3, 256, 1, 1, name='conv3_2_d', reuse=reuse, c_i=256)
.conv(3, 3, 256, 1, 1, name='conv3_3_d', reuse=reuse, c_i=256)
.max_pool(2, 2, 2, 2, name='pool3_d')
.conv(3, 3, 512, 1, 1, name='conv4_1_d', reuse=reuse, c_i=256)
.conv(3, 3, 512, 1, 1, name='conv4_2_d', reuse=reuse, c_i=512)
.conv(3, 3, 512, 1, 1, name='conv4_3_d', reuse=reuse, c_i=512)
.max_pool(2, 2, 2, 2, name='pool4_d')
.conv(3, 3, 512, 1, 1, name='conv5_1_d', reuse=reuse, c_i=512)
.dropout(self.keep_prob_queue, name='dropout_conv5_1_d')
.conv(3, 3, 512, 1, 1, name='conv5_2_d', reuse=reuse, c_i=512)
.dropout(self.keep_prob_queue, name='dropout_conv5_2_d')
.conv(3, 3, 512, 1, 1, name='conv5_3_d', reuse=reuse, c_i=512)
.dropout(self.keep_prob_queue, name='dropout_conv5_3_d')
.max_pool(2, 2, 2, 2, name='pool5_d')
.conv(3, 3, self.num_units, 1, 1, reuse=reuse, name='embed_d', c_i=512)
.conv(1, 1, 2, 1, 1, name='score_d', reuse=reuse, c_i=self.num_units)
.log_softmax_high_dimension(2, name='prob_d'))
# collect outputs
outputs_d.append(self.get_output('prob_d'))
self.layers['outputs_d'] = outputs_d
| 61.137566
| 159
| 0.550584
| 1,733
| 11,555
| 3.41431
| 0.069821
| 0.019605
| 0.048673
| 0.079094
| 0.808687
| 0.785871
| 0.753422
| 0.723171
| 0.693426
| 0.580193
| 0
| 0.097794
| 0.301774
| 11,555
| 188
| 160
| 61.462766
| 0.635597
| 0.005712
| 0
| 0.162651
| 0
| 0
| 0.110076
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012048
| false
| 0
| 0.012048
| 0
| 0.03012
| 0.006024
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
9c80c75ed2a0bbda6d6cead676de2408e85bbd58
| 2,374
|
py
|
Python
|
python/tests/transformers/split_columns_test.py
|
blpabhishek/prep-buddy
|
d663d13a45205777d474a2160716c283c4a864b2
|
[
"Apache-2.0"
] | null | null | null |
python/tests/transformers/split_columns_test.py
|
blpabhishek/prep-buddy
|
d663d13a45205777d474a2160716c283c4a864b2
|
[
"Apache-2.0"
] | null | null | null |
python/tests/transformers/split_columns_test.py
|
blpabhishek/prep-buddy
|
d663d13a45205777d474a2160716c283c4a864b2
|
[
"Apache-2.0"
] | 1
|
2018-05-29T16:21:33.000Z
|
2018-05-29T16:21:33.000Z
|
from utils.python_test_case import PySparkTestCase
from pyprepbuddy.rdds.transformable_rdd import TransformableRDD
class SplitColumnsTest(PySparkTestCase):
def test_should_split_given_column_indexes_split_by_delimiter(self):
initial_data_set = self.sc.parallelize(["FirstName LastName MiddleName,850"])
initial_rdd = TransformableRDD(initial_data_set, "csv")
splitted_columns = initial_rdd.split_by_delimiter(0, " ", False)
self.assertEquals("850,FirstName,LastName,MiddleName", splitted_columns.first())
def test_should_split_given_column_indexes_split_by_delimiter_with_retain_column(self):
initial_data_set = self.sc.parallelize(["FirstName LastName MiddleName,850"])
initial_rdd = TransformableRDD(initial_data_set, "csv")
split_with_retained_columns = initial_rdd.split_by_delimiter(0, " ", True)
self.assertEquals("FirstName LastName MiddleName,850,FirstName,LastName,MiddleName",
split_with_retained_columns.first())
def test_should_split_given_column_by_field_length(self):
data = ["John,Male,21,+914382313832,Canada", "Smith, Male, 30,+015314343462, UK",
"Larry, Male, 23,+009815432975, USA", "Fiona, Female,18,+891015709854,USA"]
initial_data_set = self.sc.parallelize(data)
initial_rdd = TransformableRDD(initial_data_set, "csv")
result = initial_rdd.split_by_field_length(3, [3, 10], False).collect()
self.assertTrue(len(result) == 4)
self.assertTrue(result.__contains__("John,Male,21,Canada,+91,4382313832"))
self.assertTrue(result.__contains__("Smith,Male,30,UK,+01,5314343462"))
def test_should_split_given_column_by_field_length_with_retained_columns(self):
data = ["John,Male,21,+914382313832,Canada", "Smith, Male, 30,+015314343462, UK",
"Larry, Male, 23,+009815432975, USA", "Fiona, Female,18,+891015709854,USA"]
initial_data_set = self.sc.parallelize(data)
initial_rdd = TransformableRDD(initial_data_set, "csv")
result = initial_rdd.split_by_field_length(3, [3, 10], True).collect()
self.assertTrue(len(result) == 4)
self.assertTrue(result.__contains__("John,Male,21,+914382313832,Canada,+91,4382313832"))
self.assertTrue(result.__contains__("Smith,Male,30,+015314343462,UK,+01,5314343462"))
| 53.954545
| 96
| 0.723673
| 288
| 2,374
| 5.628472
| 0.256944
| 0.054287
| 0.069093
| 0.044417
| 0.762492
| 0.746453
| 0.746453
| 0.704503
| 0.689698
| 0.637878
| 0
| 0.108162
| 0.158804
| 2,374
| 43
| 97
| 55.209302
| 0.703555
| 0
| 0
| 0.4375
| 0
| 0
| 0.25358
| 0.149537
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.21875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
92bf77a7b8b10cb4c9ef114e2b208c57f5cf0aa5
| 1,834
|
py
|
Python
|
all_tests.py
|
autumnjolitz/pytypedecl
|
7ae0f31917ae9049a7116212b6a1ff5657db9ba0
|
[
"Apache-2.0"
] | null | null | null |
all_tests.py
|
autumnjolitz/pytypedecl
|
7ae0f31917ae9049a7116212b6a1ff5657db9ba0
|
[
"Apache-2.0"
] | null | null | null |
all_tests.py
|
autumnjolitz/pytypedecl
|
7ae0f31917ae9049a7116212b6a1ff5657db9ba0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8; python-indent:2; indent-tabs-mode:nil -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from parse import ast_test
import checker_classes_test
import checker_generics_test
import checker_overloading_test
import checker_test
import checker_union_test
def suite():
# ast tests
# TODO: can this be simplified using test discovery?
ast_generation = unittest.TestLoader().loadTestsFromTestCase(ast_test.TestASTGeneration)
tuple_eq = unittest.TestLoader().loadTestsFromTestCase(ast_test.TestTupleEq)
# checker tests
classes = unittest.TestLoader().loadTestsFromTestCase(checker_classes_test.TestCheckerClasses)
generics = unittest.TestLoader().loadTestsFromTestCase(checker_generics_test.TestCheckerGenerics)
overloading = unittest.TestLoader().loadTestsFromTestCase(checker_overloading_test.TestCheckerOverloading)
simple = unittest.TestLoader().loadTestsFromTestCase(checker_test.TestChecker)
union = unittest.TestLoader().loadTestsFromTestCase(checker_union_test.TestCheckerUnion)
all_tests = [ast_generation, tuple_eq, classes, generics, overloading,
simple, union]
return unittest.TestSuite(all_tests)
if __name__ == "__main__":
unittest.TextTestRunner().run(suite())
| 36.68
| 110
| 0.780262
| 221
| 1,834
| 6.316742
| 0.515837
| 0.090258
| 0.195559
| 0.164756
| 0.065903
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006349
| 0.141221
| 1,834
| 49
| 111
| 37.428571
| 0.88
| 0.38386
| 0
| 0
| 0
| 0
| 0.007194
| 0
| 0
| 0
| 0
| 0.020408
| 0
| 1
| 0.05
| false
| 0
| 0.35
| 0
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
92bfe0007e3093684707e85a025f71a831837252
| 201
|
py
|
Python
|
aboutus/admin.py
|
aryamanak10/diner-restaurant-website
|
6d2d9de89a73c5535ebf782c4d8bbfc6ca9489fc
|
[
"MIT"
] | 1
|
2020-05-07T17:18:36.000Z
|
2020-05-07T17:18:36.000Z
|
aboutus/admin.py
|
aryamanak10/Restaurant-Site-using-Django
|
6d2d9de89a73c5535ebf782c4d8bbfc6ca9489fc
|
[
"MIT"
] | null | null | null |
aboutus/admin.py
|
aryamanak10/Restaurant-Site-using-Django
|
6d2d9de89a73c5535ebf782c4d8bbfc6ca9489fc
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import AboutUs, Why_Choose_Us, Chef
# Register your models here.
admin.site.register(AboutUs)
admin.site.register(Why_Choose_Us)
admin.site.register(Chef)
| 28.714286
| 48
| 0.820896
| 31
| 201
| 5.193548
| 0.483871
| 0.167702
| 0.31677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089552
| 201
| 7
| 49
| 28.714286
| 0.879781
| 0.129353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
92c4f6812b39ef09966ce33cf30ee874c281435a
| 167
|
py
|
Python
|
Course-2:Graphs/scc.py
|
karenk1010/Coursera-Algorithms-Specialization
|
5d293ff6e74e7d6f2090696d21d282e1734f396a
|
[
"MIT"
] | null | null | null |
Course-2:Graphs/scc.py
|
karenk1010/Coursera-Algorithms-Specialization
|
5d293ff6e74e7d6f2090696d21d282e1734f396a
|
[
"MIT"
] | null | null | null |
Course-2:Graphs/scc.py
|
karenk1010/Coursera-Algorithms-Specialization
|
5d293ff6e74e7d6f2090696d21d282e1734f396a
|
[
"MIT"
] | 2
|
2021-02-04T22:20:15.000Z
|
2021-02-11T13:27:24.000Z
|
#!/usr/bin/python3
"""
implement strongly connected components algorithm using 'SCC.txt' adjusency
list.
Problem answer (first 5): [434821, 968, 459, 313, 211]
"""
| 16.7
| 75
| 0.712575
| 22
| 167
| 5.409091
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13986
| 0.143713
| 167
| 9
| 76
| 18.555556
| 0.692308
| 0.928144
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
92db83cdc943ba3eef48d5dddfb0e0c383df1394
| 111
|
py
|
Python
|
tests/itunes/examples/functions/mute, muted, unmute/mute.py
|
andrewp-as-is/itunes.py
|
51c7f9d07ed8858970565532c8a26c5a4b78a471
|
[
"Unlicense"
] | null | null | null |
tests/itunes/examples/functions/mute, muted, unmute/mute.py
|
andrewp-as-is/itunes.py
|
51c7f9d07ed8858970565532c8a26c5a4b78a471
|
[
"Unlicense"
] | null | null | null |
tests/itunes/examples/functions/mute, muted, unmute/mute.py
|
andrewp-as-is/itunes.py
|
51c7f9d07ed8858970565532c8a26c5a4b78a471
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import itunes
itunes.mute()
print("muted: %s" % itunes.muted())
| 15.857143
| 35
| 0.621622
| 16
| 111
| 4.3125
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010417
| 0.135135
| 111
| 6
| 36
| 18.5
| 0.708333
| 0.378378
| 0
| 0
| 0
| 0
| 0.134328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
92eb40d08825fd6f106133b930720626bba8c898
| 76
|
py
|
Python
|
alastria_identity/types/presentation.py
|
alastria/alastria-identity-lib-py
|
63ec9d9e60d267c3900d2a827b5d4adb7d265acb
|
[
"MIT"
] | 1
|
2021-04-22T21:22:15.000Z
|
2021-04-22T21:22:15.000Z
|
alastria_identity/types/presentation.py
|
alejandroalffer/alastria-identity-lib-py
|
4db82fc905fcfa48749cf6908a8dcfb462a5ff81
|
[
"MIT"
] | 2
|
2020-12-01T08:50:25.000Z
|
2020-12-16T15:10:33.000Z
|
alastria_identity/types/presentation.py
|
alejandroalffer/alastria-identity-lib-py
|
4db82fc905fcfa48749cf6908a8dcfb462a5ff81
|
[
"MIT"
] | 2
|
2020-10-21T11:22:40.000Z
|
2021-04-17T15:36:56.000Z
|
from dataclasses import dataclass
@dataclass
class Presentation:
pass
| 10.857143
| 33
| 0.789474
| 8
| 76
| 7.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 76
| 6
| 34
| 12.666667
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
92f70b6998852a0327670d7059137376953b6d34
| 453
|
py
|
Python
|
src/cdev/default/output_manager.py
|
cdev-framework/cdev-sdk
|
06cd7b40936ab063d1d8fd1a7d9f6882750e8a96
|
[
"BSD-3-Clause-Clear"
] | 2
|
2022-02-28T02:51:59.000Z
|
2022-03-24T15:23:18.000Z
|
src/cdev/default/output_manager.py
|
cdev-framework/cdev-sdk
|
06cd7b40936ab063d1d8fd1a7d9f6882750e8a96
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
src/cdev/default/output_manager.py
|
cdev-framework/cdev-sdk
|
06cd7b40936ab063d1d8fd1a7d9f6882750e8a96
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
from rich import print
from typing import List
from core.constructs.output_manager import OutputManager
from ..constructs.project import Project
class CdevOutputManager(OutputManager):
def print_header(self) -> None:
myproject = Project.instance()
print("")
print(f"Project: {myproject.get_name()}")
print("")
def print_components_to_diff_against(self, old_component_names: List[str]) -> None:
pass
| 23.842105
| 87
| 0.704194
| 53
| 453
| 5.849057
| 0.603774
| 0.051613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.198676
| 453
| 18
| 88
| 25.166667
| 0.853994
| 0
| 0
| 0.166667
| 0
| 0
| 0.068433
| 0.048565
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.083333
| 0.333333
| 0
| 0.583333
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
|
0
| 4
|
13035de2f4ed7f9ba4aaffa1a58f6ef4053a0215
| 928
|
py
|
Python
|
Python3/441.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 854
|
2018-11-09T08:06:16.000Z
|
2022-03-31T06:05:53.000Z
|
Python3/441.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 29
|
2019-06-02T05:02:25.000Z
|
2021-11-15T04:09:37.000Z
|
Python3/441.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 347
|
2018-12-23T01:57:37.000Z
|
2022-03-12T14:51:21.000Z
|
__________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def arrangeCoins(self, n: int) -> int:
# 1, 2, 3, 4, 5, 6, ...
# n = (steps**2 + steps)/2
# sn = n * (n+1) / 2
sqrt = math.sqrt(1 + (4*2*n))
c1 = int((-1 + sqrt)//2)
return c1
__________________________________________________________________________________________________
sample 13088 kb submission
class Solution:
def arrangeCoins(self, n: int) -> int:
if n == 0:
return 0
sm = 0
for i in range(1,n+1):
sm = sm+i
if sm>n:
sm = sm-i
return i-1
#sm = k*(k+1)/2
return i
__________________________________________________________________________________________________
| 29.935484
| 98
| 0.580819
| 87
| 928
| 2.816092
| 0.37931
| 0.02449
| 0.187755
| 0.212245
| 0.4
| 0.4
| 0.4
| 0.4
| 0.4
| 0
| 0
| 0.05153
| 0.330819
| 928
| 30
| 99
| 30.933333
| 0.342995
| 0.085129
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
131e532f3ac414dd95b9e8351f082efeba755f20
| 220
|
py
|
Python
|
pymachinetalk/application/__init__.py
|
cerna/pymachinetalk
|
8566d064f2162c12ee2d812bd7d0b9dfb3225b7d
|
[
"MIT"
] | 6
|
2017-06-11T14:26:50.000Z
|
2020-06-25T21:56:48.000Z
|
pymachinetalk/application/__init__.py
|
cerna/pymachinetalk
|
8566d064f2162c12ee2d812bd7d0b9dfb3225b7d
|
[
"MIT"
] | 9
|
2017-04-07T12:00:04.000Z
|
2021-12-22T13:02:49.000Z
|
pymachinetalk/application/__init__.py
|
cerna/pymachinetalk
|
8566d064f2162c12ee2d812bd7d0b9dfb3225b7d
|
[
"MIT"
] | 7
|
2016-06-21T13:29:27.000Z
|
2020-09-21T16:05:48.000Z
|
# coding=utf-8
from .constants import *
from .command import ApplicationCommand
from .error import ApplicationError
from .file import ApplicationFile
from .log import ApplicationLog
from .status import ApplicationStatus
| 27.5
| 39
| 0.836364
| 26
| 220
| 7.076923
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005155
| 0.118182
| 220
| 7
| 40
| 31.428571
| 0.943299
| 0.054545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
1323873b66770208644c62a724f3445435169c34
| 14,181
|
py
|
Python
|
hmm_for_baxter_using_only_success_trials/birl_hmm_user_interface.py
|
birlrobotics/HMM
|
6c4231214668d3bf140d4b9b97323bc71f09d17e
|
[
"BSD-3-Clause"
] | 2
|
2019-01-31T11:14:50.000Z
|
2019-07-21T20:33:37.000Z
|
hmm_for_baxter_using_only_success_trials/birl_hmm_user_interface.py
|
birlrobotics/HMM
|
6c4231214668d3bf140d4b9b97323bc71f09d17e
|
[
"BSD-3-Clause"
] | null | null | null |
hmm_for_baxter_using_only_success_trials/birl_hmm_user_interface.py
|
birlrobotics/HMM
|
6c4231214668d3bf140d4b9b97323bc71f09d17e
|
[
"BSD-3-Clause"
] | null | null | null |
from optparse import OptionParser
import training_config
import util
import ipdb
import os
def warn(*args, **kwargs):
if 'category' in kwargs and kwargs['category'] == DeprecationWarning:
pass
else:
for arg in args:
print arg
import warnings
warnings.warn = warn
def build_parser():
parser = OptionParser()
parser.add_option(
"--train-model",
action="store_true",
dest="train_model",
default = False,
help="True if you want to train HMM models.")
parser.add_option(
"--train-anomaly-model",
action="store_true",
dest="train_anomaly_model",
default = False,
help="True if you want to train HMM anomaly models.")
parser.add_option(
"--learn_threshold_for_log_likelihood",
action="store_true",
dest="learn_threshold_for_log_likelihood",
default = False,
help="True if you want to learn_threshold_for_log_likelihood.")
parser.add_option(
"--learn_threshold_for_gradient_of_log_likelihood",
action="store_true",
dest="learn_threshold_for_gradient_of_log_likelihood",
default = False,
help="True if you want to learn_threshold_for_gradient_of_log_likelihood.")
parser.add_option(
"--learn_threshold_for_deri_of_diff",
action="store_true",
dest="learn_threshold_for_deri_of_diff",
default = False,
help="True if you want to learn_threshold_for_deri_of_diff.")
parser.add_option(
"--train-derivative-threshold",
action="store_true",
dest="train_derivative_threshold",
default = False,
help="True if you want to train derivative threshold.")
parser.add_option(
"--online-service",
action="store_true",
dest="online_service",
default = False,
help="True if you want to run online anomaly detection and online state classification.")
parser.add_option(
"--hidden-state-log-prob-plot",
action="store_true",
dest="hidden_state_log_prob_plot",
default = False,
help="True if you want to plot hidden state log prob.")
parser.add_option(
"--trial-log-likelihood-plot",
action="store_true",
dest="trial_log_likelihood_plot",
default = False,
help="True if you want to plot trials' log likelihood.")
parser.add_option(
"--emission-log-prob-plot",
action="store_true",
dest="emission_log_prob_plot",
default = False,
help="True if you want to plot emission log prob.")
parser.add_option(
"--trial-log-likelihood-gradient-plot",
action="store_true",
dest="trial_log_likelihood_gradient_plot",
default = False,
help="True if you want to plot trials' log likelihood gradient.")
parser.add_option(
"--check-if-score-metric-converge-loglik-curves",
action="store_true",
dest="check_if_score_metric_converge_loglik_curves",
default = False,
help="True if you want to check_if_score_metric_converge_loglik_curves.")
parser.add_option(
"--check_if_viterbi_path_grow_incrementally",
action="store_true",
dest="check_if_viterbi_path_grow_incrementally",
default = False,
help="True if you want to check_if_viterbi_path_grow_incrementally.")
#python birl_hmm_user_interface.py --plot_skill_identification_and_anomaly_detection --trial-class success
parser.add_option(
"--plot_skill_identification_and_anomaly_detection",
action="store_true",
dest="plot_skill_identification_and_anomaly_detection",
default = False,
help="True if you want to plot_skill_identification_and_anomaly_detection.")
parser.add_option("--trial-class",
action="store",
type="string",
dest="trial_class",
default = None,
help="success or test_success"
)
return parser
if __name__ == "__main__":
parser = build_parser()
(options, args) = parser.parse_args()
util.inform_config(training_config)
if options.train_model is True:
print "gonna train HMM model."
trials_group_by_folder_name, state_order_group_by_folder_name = util.get_trials_group_by_folder_name(training_config)
test_trials_group_by_folder_name, test_state_order_group_by_folder_name = util.get_trials_group_by_folder_name(training_config, data_class="test_success")
import hmm_model_training
hmm_model_training.run(
model_save_path = training_config.model_save_path,
model_type = training_config.model_type_chosen,
model_config = training_config.model_config,
score_metric = training_config.score_metric,
trials_group_by_folder_name = trials_group_by_folder_name,
test_trials_group_by_folder_name=test_trials_group_by_folder_name,
)
if options.train_anomaly_model is True:
print "gonna train HMM anomaly_model."
anomaly_trials_group_by_folder_name, state_order_group_by_folder_name = util.get_trials_group_by_folder_name(training_config, data_class='anomaly')
import hmm_model_training
hmm_model_training.run(
model_save_path = training_config.anomaly_model_save_path,
model_type = training_config.model_type_chosen,
model_config = training_config.model_config,
score_metric = training_config.score_metric,
trials_group_by_folder_name = anomaly_trials_group_by_folder_name)
if options.learn_threshold_for_log_likelihood is True:
print "gonna learn_threshold_for_log_likelihood."
trials_group_by_folder_name, state_order_group_by_folder_name = util.get_trials_group_by_folder_name(training_config)
import learn_threshold_for_log_likelihood
learn_threshold_for_log_likelihood.run(
model_save_path = training_config.model_save_path,
figure_save_path = training_config.figure_save_path,
threshold_c_value = training_config.threshold_c_value,
trials_group_by_folder_name = trials_group_by_folder_name)
if options.learn_threshold_for_gradient_of_log_likelihood is True:
print "gonna learn_threshold_for_gradient_of_log_likelihood."
trials_group_by_folder_name, state_order_group_by_folder_name = util.get_trials_group_by_folder_name(training_config)
import learn_threshold_for_gradient_of_log_likelihood
learn_threshold_for_gradient_of_log_likelihood.run(
model_save_path = training_config.model_save_path,
figure_save_path = training_config.figure_save_path,
threshold_c_value = training_config.threshold_c_value,
trials_group_by_folder_name = trials_group_by_folder_name)
if options.learn_threshold_for_deri_of_diff is True:
print "gonna learn_threshold_for_deri_of_diff."
trials_group_by_folder_name, state_order_group_by_folder_name = util.get_trials_group_by_folder_name(training_config)
import learn_threshold_for_deri_of_diff
learn_threshold_for_deri_of_diff.run(
model_save_path = training_config.model_save_path,
figure_save_path = training_config.figure_save_path,
threshold_c_value = training_config.threshold_c_value,
trials_group_by_folder_name = trials_group_by_folder_name)
if options.train_derivative_threshold is True:
print "gonna train derivative threshold."
trials_group_by_folder_name, state_order_group_by_folder_name = util.get_trials_group_by_folder_name(training_config)
import derivative_threshold_training
derivative_threshold_training.run(
model_save_path = training_config.model_save_path,
figure_save_path = training_config.figure_save_path,
threshold_c_value = training_config.threshold_c_value,
trials_group_by_folder_name = trials_group_by_folder_name)
if options.online_service is True:
print "gonna run online service."
import hmm_online_service.hmm_online_service as hmm_online_service
trials_group_by_folder_name, state_order_group_by_folder_name = util.get_trials_group_by_folder_name(training_config)
one_trial_data_group_by_state = trials_group_by_folder_name.itervalues().next()
state_amount = len(one_trial_data_group_by_state)
hmm_online_service.run(
interested_data_fields = training_config.interested_data_fields,
model_save_path = training_config.model_save_path,
state_amount = state_amount,
anomaly_detection_metric = training_config.anomaly_detection_metric,
)
if options.hidden_state_log_prob_plot is True:
print "gonna plot hidden state log prob."
trials_group_by_folder_name, state_order_group_by_folder_name = util.get_trials_group_by_folder_name(training_config)
import hidden_state_log_prob_plot
hidden_state_log_prob_plot.run(
model_save_path = training_config.model_save_path,
figure_save_path = training_config.figure_save_path,
threshold_c_value = training_config.threshold_c_value,
trials_group_by_folder_name = trials_group_by_folder_name)
if options.trial_log_likelihood_plot is True:
if options.trial_class is None:
raise Exception("options.trial_class is needed for options.trial_log_likelihood_plot")
data_class = options.trial_class
print "gonna do trial_log_likelihood_plot."
trials_group_by_folder_name, state_order_group_by_folder_name = util.get_trials_group_by_folder_name(training_config, data_class=data_class)
import trial_log_likelihood_plot
trial_log_likelihood_plot.run(
model_save_path = training_config.model_save_path,
figure_save_path = training_config.figure_save_path,
threshold_c_value = training_config.threshold_c_value,
trials_group_by_folder_name = trials_group_by_folder_name,
data_class=data_class,
)
if options.trial_log_likelihood_gradient_plot is True:
if options.trial_class is None:
raise Exception("options.trial_class is needed for options.trial_log_likelihood_gradient_plot")
data_class = options.trial_class
print "gonna do trial_log_likelihood_gradient_plot."
trials_group_by_folder_name, state_order_group_by_folder_name = util.get_trials_group_by_folder_name(training_config, data_class=data_class)
import trial_log_likelihood_gradient_plot
trial_log_likelihood_gradient_plot.run(
model_save_path = training_config.model_save_path,
figure_save_path = training_config.figure_save_path,
threshold_c_value = training_config.threshold_c_value,
trials_group_by_folder_name = trials_group_by_folder_name,
data_class=data_class,
)
if options.check_if_score_metric_converge_loglik_curves is True:
print "gonna check_if_score_metric_converge_loglik_curves."
trials_group_by_folder_name, state_order_group_by_folder_name = util.get_trials_group_by_folder_name(training_config)
import check_if_score_metric_converge_loglik_curves
check_if_score_metric_converge_loglik_curves.run(
model_save_path = training_config.model_save_path,
model_type = training_config.model_type_chosen,
figure_save_path = training_config.figure_save_path,
threshold_c_value = training_config.threshold_c_value,
trials_group_by_folder_name = trials_group_by_folder_name)
if options.check_if_viterbi_path_grow_incrementally is True:
print "gonna check_if_viterbi_path_grow_incrementally."
trials_group_by_folder_name, state_order_group_by_folder_name = util.get_trials_group_by_folder_name(training_config)
import check_if_viterbi_path_grow_incrementally
check_if_viterbi_path_grow_incrementally.run(
model_save_path = training_config.model_save_path,
figure_save_path = training_config.figure_save_path,
trials_group_by_folder_name = trials_group_by_folder_name,
options=options,
)
if options.emission_log_prob_plot is True:
print "gonna plot emission log prob."
trials_group_by_folder_name, state_order_group_by_folder_name = util.get_trials_group_by_folder_name(training_config)
import emission_log_prob_plot
emission_log_prob_plot.run(
model_save_path = training_config.model_save_path,
figure_save_path = training_config.figure_save_path,
threshold_c_value = training_config.threshold_c_value,
trials_group_by_folder_name = trials_group_by_folder_name)
if options.plot_skill_identification_and_anomaly_detection is True:
if options.trial_class is None:
raise Exception("options.trial_class is needed for options.plot_skill_identification_and_anomaly_detection")
data_class = options.trial_class
if data_class == 'success':
data_path = training_config.success_path
elif data_class == 'anomaly':
data_path = training_config.anomaly_data_path
elif data_class == 'test_success':
data_path = training_config.test_success_data_path
else:
raise Exception("unknown data class %s"%data_class)
import plot_skill_identification_and_anomaly_detection
plot_skill_identification_and_anomaly_detection.run(
model_save_path = training_config.model_save_path,
figure_save_path = training_config.figure_save_path,
anomaly_detection_metric = training_config.anomaly_detection_metric,
trial_class=options.trial_class,
data_path=data_path,
interested_data_fields = training_config.interested_data_fields,
)
| 42.972727
| 162
| 0.723644
| 1,832
| 14,181
| 5.075873
| 0.066594
| 0.053447
| 0.096462
| 0.126143
| 0.845252
| 0.802774
| 0.714378
| 0.644048
| 0.576514
| 0.548446
| 0
| 0
| 0.217192
| 14,181
| 329
| 163
| 43.103343
| 0.837672
| 0.007404
| 0
| 0.457249
| 0
| 0
| 0.187864
| 0.102032
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.003717
| 0.074349
| null | null | 0.052045
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1343d69301b48ac37a3941d5d615a2f1a2b82b98
| 1,999
|
py
|
Python
|
python/phonenumbers/data/region_LB.py
|
Eyepea/python-phonenumbers
|
0336e191fda80a21ed5c19d5e029ad8c70f620ee
|
[
"Apache-2.0"
] | 2
|
2019-03-30T02:12:54.000Z
|
2021-03-08T18:59:40.000Z
|
python/phonenumbers/data/region_LB.py
|
Eyepea/python-phonenumbers
|
0336e191fda80a21ed5c19d5e029ad8c70f620ee
|
[
"Apache-2.0"
] | null | null | null |
python/phonenumbers/data/region_LB.py
|
Eyepea/python-phonenumbers
|
0336e191fda80a21ed5c19d5e029ad8c70f620ee
|
[
"Apache-2.0"
] | 1
|
2018-11-10T03:47:34.000Z
|
2018-11-10T03:47:34.000Z
|
"""Auto-generated file, do not edit by hand. LB metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_LB = PhoneMetadata(id='LB', country_code=961, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[13-9]\\d{6,7}', possible_number_pattern='\\d{7,8}'),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:[14-6]\\d{2}|7(?:[2-579]\\d|62|8[0-7])|[89][2-9]\\d)\\d{4}', possible_number_pattern='\\d{7}', example_number='1123456'),
mobile=PhoneNumberDesc(national_number_pattern='(?:3\\d|7(?:[01]\\d|6[013-9]|8[89]|91))\\d{5}', possible_number_pattern='\\d{7,8}', example_number='71123456'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='9[01]\\d{6}', possible_number_pattern='\\d{8}', example_number='90123456'),
shared_cost=PhoneNumberDesc(national_number_pattern='8[01]\\d{6}', possible_number_pattern='\\d{8}', example_number='80123456'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d)(\\d{3})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['[13-6]|7(?:[2-579]|62|8[0-7])|[89][2-9]'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='([7-9]\\d)(\\d{3})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['[89][01]|7(?:[01]|6[013-9]|8[89]|91)'])])
| 95.190476
| 197
| 0.726363
| 285
| 1,999
| 4.82807
| 0.273684
| 0.226744
| 0.152616
| 0.313953
| 0.498547
| 0.481831
| 0.432413
| 0.419331
| 0.419331
| 0.109012
| 0
| 0.074906
| 0.065033
| 1,999
| 20
| 198
| 99.95
| 0.661316
| 0.026513
| 0
| 0
| 1
| 0.222222
| 0.200515
| 0.117526
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
13453ab152bc0873ad661b290d5d32b0f5257767
| 5,878
|
py
|
Python
|
tests/ssl/test_ssl.py
|
wacuuu/workload-collocation-agent
|
9250ec2ab8def033e8546481eaed6aca2caad3d3
|
[
"Apache-2.0"
] | 40
|
2019-05-16T16:42:33.000Z
|
2021-11-18T06:33:03.000Z
|
tests/ssl/test_ssl.py
|
wacuuu/workload-collocation-agent
|
9250ec2ab8def033e8546481eaed6aca2caad3d3
|
[
"Apache-2.0"
] | 72
|
2019-05-09T02:30:25.000Z
|
2020-11-17T09:24:44.000Z
|
tests/ssl/test_ssl.py
|
ppalucki/owca
|
9316f92e2d67f6c37da2dec33e5f769a4c3a465b
|
[
"Apache-2.0"
] | 26
|
2019-05-20T09:13:38.000Z
|
2021-12-15T17:57:21.000Z
|
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ssl
import time
from http.server import HTTPServer, BaseHTTPRequestHandler
from multiprocessing import Process
import pytest
import requests
from wca.security import HTTPSAdapter
pytestmark = [pytest.mark.long, pytest.mark.ssl]
class HTTPRequestHandlerForTest(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(b'Passed')
def run_simple_https_server(ssl_context: ssl.SSLContext):
server = HTTPServer(('127.0.0.1', 8080), HTTPRequestHandlerForTest)
server.socket = ssl_context.wrap_socket(server.socket, server_side=True)
server.serve_forever()
def test_good_certificate():
# Disable due to https://github.com/urllib3/urllib3/issues/497
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.SubjectAltNameWarning)
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.load_cert_chain('tests/ssl/goodkey.crt', 'tests/ssl/goodkey.key')
server = Process(target=run_simple_https_server, args=(ssl_context,))
server.start()
time.sleep(0.5)
try:
s = requests.Session()
s.mount('https://localhost:8080/', HTTPSAdapter())
r = s.get('https://localhost:8080/', verify='tests/ssl/rootCA.crt')
assert r.text == 'Passed'
server.terminate()
except Exception:
server.terminate()
raise
def test_wrong_certificate():
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.load_cert_chain('tests/ssl/goodkey.crt', 'tests/ssl/goodkey.key')
server = Process(target=run_simple_https_server, args=(ssl_context,))
server.start()
time.sleep(0.5)
with pytest.raises(requests.exceptions.SSLError):
s = requests.Session()
try:
s.mount('https://localhost:8080/', HTTPSAdapter())
s.get('https://localhost:8080/', verify='tests/ssl/wrongRootCA.crt')
server.terminate()
except Exception:
server.terminate()
raise
def test_unsupported_rsa_1024():
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.load_cert_chain('tests/ssl/rsa1024.crt', 'tests/ssl/rsa1024.key')
server = Process(target=run_simple_https_server, args=(ssl_context,))
server.start()
time.sleep(0.5)
with pytest.raises(requests.exceptions.SSLError):
s = requests.Session()
try:
s.mount('https://localhost:8080/', HTTPSAdapter())
s.get('https://localhost:8080/', verify='tests/ssl/rootCA.crt')
server.terminate()
except Exception:
server.terminate()
raise
def test_supported_rsa_2048():
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.load_cert_chain('tests/ssl/rsa2048.crt', 'tests/ssl/rsa2048.key')
server = Process(target=run_simple_https_server, args=(ssl_context,))
server.start()
time.sleep(0.5)
with pytest.raises(requests.exceptions.SSLError):
s = requests.Session()
try:
s.mount('https://localhost:8080/', HTTPSAdapter())
s.get('https://localhost:8080/', verify='tests/ssl/rootCA.crt')
server.terminate()
except Exception:
server.terminate()
raise
def test_supported_tls_1_2():
# Disable for older openssl versions.
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.SubjectAltNameWarning)
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.load_cert_chain('tests/ssl/goodkey.crt', 'tests/ssl/goodkey.key')
server = Process(target=run_simple_https_server, args=(ssl_context,))
server.start()
time.sleep(0.5)
try:
s = requests.Session()
s.mount('https://localhost:8080/', HTTPSAdapter())
r = s.get('https://localhost:8080/', verify='tests/ssl/rootCA.crt')
assert r.text == 'Passed'
server.terminate()
except Exception:
server.terminate()
raise
def test_unsupported_tls_1_1():
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
ssl_context.load_cert_chain('tests/ssl/goodkey.crt', 'tests/ssl/goodkey.key')
server = Process(target=run_simple_https_server, args=(ssl_context,))
server.start()
time.sleep(0.5)
with pytest.raises(requests.exceptions.SSLError):
s = requests.Session()
try:
s.mount('https://localhost:8080/', HTTPSAdapter())
s.get('https://localhost:8080/', verify='tests/ssl/rootCA.crt')
server.terminate()
except Exception:
server.terminate()
raise
def test_unsupported_tls_1_0():
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ssl_context.load_cert_chain('tests/ssl/goodkey.crt', 'tests/ssl/goodkey.key')
server = Process(target=run_simple_https_server, args=(ssl_context,))
server.start()
time.sleep(0.5)
with pytest.raises(requests.exceptions.SSLError):
s = requests.Session()
try:
s.mount('https://localhost:8080/', HTTPSAdapter())
s.get('https://localhost:8080/', verify='tests/ssl/rootCA.crt')
server.terminate()
except Exception:
server.terminate()
raise
| 34.988095
| 81
| 0.677271
| 729
| 5,878
| 5.318244
| 0.227709
| 0.059324
| 0.064999
| 0.041269
| 0.70699
| 0.70699
| 0.70699
| 0.686871
| 0.686871
| 0.686871
| 0
| 0.030374
| 0.199047
| 5,878
| 167
| 82
| 35.197605
| 0.793118
| 0.111432
| 0
| 0.769841
| 0
| 0
| 0.151335
| 0.061264
| 0
| 0
| 0
| 0
| 0.015873
| 1
| 0.071429
| false
| 0.02381
| 0.055556
| 0
| 0.134921
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1352a3951d18e0957d459403059db0014e5676dc
| 1,027
|
py
|
Python
|
Homework/2.homework/Tests/others/test_bst_print.py
|
mevljas/Quality_and_testing
|
6a39610084b1538eae270682a6842270e8971b7f
|
[
"MIT"
] | null | null | null |
Homework/2.homework/Tests/others/test_bst_print.py
|
mevljas/Quality_and_testing
|
6a39610084b1538eae270682a6842270e8971b7f
|
[
"MIT"
] | null | null | null |
Homework/2.homework/Tests/others/test_bst_print.py
|
mevljas/Quality_and_testing
|
6a39610084b1538eae270682a6842270e8971b7f
|
[
"MIT"
] | null | null | null |
import pexpect2
def test_bst_print():
baza = pexpect2.pexpect()
try:
baza.expect("Enter command: ")
baza.send("use bst")
baza.expect("OK")
baza.expect("Enter command: ")
baza.send("add Andrej Novak 15 2111935500138")
baza.expect("OK")
baza.expect("Enter command: ")
baza.send("add Janez Levak 15 2111935500132")
baza.expect("OK")
baza.expect("Enter command: ")
baza.send("print")
baza.expect("2111935500138 | Novak, Andrej | 15")
baza.expect("\t2111935500132 | Levak, Janez | 15")
baza.expect("OK")
baza.expect("Enter command: ")
baza.send("count")
baza.expect("2")
baza.expect("Enter command: ")
baza.send("depth")
baza.expect("2")
baza.expect("Enter command: ")
print "PASSED\ttest_bst_print"
except:
print "FAILED\ttest_bst_print"
finally:
baza.kill()
if __name__ == "__main__":
test_bst_print()
| 21.395833
| 58
| 0.564752
| 115
| 1,027
| 4.904348
| 0.313043
| 0.265957
| 0.18617
| 0.27305
| 0.492908
| 0.492908
| 0.425532
| 0.308511
| 0.308511
| 0.159574
| 0
| 0.088276
| 0.29406
| 1,027
| 47
| 59
| 21.851064
| 0.689655
| 0
| 0
| 0.40625
| 0
| 0
| 0.314815
| 0.042885
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.03125
| 0.03125
| null | null | 0.15625
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
137489f5861a8a84ca36a22b0984fa9f1819db8c
| 879
|
py
|
Python
|
provider.py
|
CUUATS/cuuatsalg
|
0066c18c6606df7dede3627b978047fa5974efa9
|
[
"BSD-3-Clause"
] | null | null | null |
provider.py
|
CUUATS/cuuatsalg
|
0066c18c6606df7dede3627b978047fa5974efa9
|
[
"BSD-3-Clause"
] | 1
|
2018-02-21T15:43:07.000Z
|
2018-02-21T15:43:07.000Z
|
provider.py
|
CUUATS/cuuatsalg
|
0066c18c6606df7dede3627b978047fa5974efa9
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import QgsProcessingProvider
from cuuatsalg.algorithms import CopyNetworkAttributes, \
CreateNetworkMatchTable, CreateProjectFolder
plugin_path = os.path.dirname(__file__)
class CuuatsAlgorithmProvider(QgsProcessingProvider):
def __init__(self):
super().__init__()
def id(self):
return 'cuuats'
def name(self):
return 'CUUATS'
def icon(self):
return QIcon(self.svgIconPath())
def svgIconPath(self):
return os.path.join(plugin_path, 'images', 'cuuats.svg')
def loadAlgorithms(self):
algs = [
CopyNetworkAttributes(),
CreateNetworkMatchTable(),
CreateProjectFolder(),
]
for alg in algs:
self.addAlgorithm(alg)
def supportsNonFileBasedOutput(self):
return True
| 23.131579
| 64
| 0.658703
| 82
| 879
| 6.890244
| 0.5
| 0.088496
| 0.223009
| 0.067257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.253697
| 879
| 37
| 65
| 23.756757
| 0.86128
| 0
| 0
| 0
| 0
| 0
| 0.031854
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.259259
| false
| 0
| 0.148148
| 0.185185
| 0.62963
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
13831971a63db906d0f10497cb1fd8276e65f8ee
| 4,588
|
py
|
Python
|
var/spack/repos/builtin/packages/intel-mkl/package.py
|
goungy/spack
|
ffdde40f56d48c18ca9c45b0599221ef1dab40a2
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-06-25T15:25:29.000Z
|
2020-06-25T15:25:29.000Z
|
var/spack/repos/builtin/packages/intel-mkl/package.py
|
goungy/spack
|
ffdde40f56d48c18ca9c45b0599221ef1dab40a2
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/intel-mkl/package.py
|
goungy/spack
|
ffdde40f56d48c18ca9c45b0599221ef1dab40a2
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
from spack import *
class IntelMkl(IntelPackage):
"""Intel Math Kernel Library."""
homepage = "https://software.intel.com/en-us/intel-mkl"
version('2020.0.166', sha256='f6d92deb3ff10b11ba3df26b2c62bb4f0f7ae43e21905a91d553e58f0f5a8ae0',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/16232/l_mkl_2020.0.166.tgz")
version('2019.5.281', sha256='9995ea4469b05360d509c9705e9309dc983c0a10edc2ae3a5384bc837326737e',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/15816/l_mkl_2019.5.281.tgz")
version('2019.3.199', sha256='06de2b54f4812e7c39a118536259c942029fe1d6d8918ad9df558a83c4162b8f',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/15275/l_mkl_2019.3.199.tgz")
version('2019.1.144', sha256='5205a460a9c685f7a442868367389b2d0c25e1455346bc6a37c5b8ff90a20fbb',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/14895/l_mkl_2019.1.144.tgz")
version('2019.0.117', sha256='4e1fe2c705cfc47050064c0d6c4dee1a8c6740ac1c4f64dde9c7511c4989c7ad',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/13575/l_mkl_2019.0.117.tgz")
version('2018.4.274', sha256='18eb3cde3e6a61a88f25afff25df762a560013f650aaf363f7d3d516a0d04881',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/13725/l_mkl_2018.4.274.tgz")
version('2018.3.222', sha256='108d59c0927e58ce8c314db6c2b48ee331c3798f7102725f425d6884eb6ed241',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/13005/l_mkl_2018.3.222.tgz")
version('2018.2.199', sha256='e28d12173bef9e615b0ded2f95f59a42b3e9ad0afa713a79f8801da2bfb31936',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/12725/l_mkl_2018.2.199.tgz")
version('2018.1.163', sha256='f6dc263fc6f3c350979740a13de1b1e8745d9ba0d0f067ece503483b9189c2ca',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/12414/l_mkl_2018.1.163.tgz")
version('2018.0.128', sha256='c368baa40ca88057292512534d7fad59fa24aef06da038ea0248e7cd1e280cec',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/12070/l_mkl_2018.0.128.tgz")
version('2017.4.239', sha256='dcac591ed1e95bd72357fd778edba215a7eab9c6993236373231cc16c200c92a',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/12147/l_mkl_2017.4.239.tgz")
version('2017.3.196', sha256='fd7295870fa164d6138c9818304f25f2bb263c814a6c6539c9fe4e104055f1ca',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/11544/l_mkl_2017.3.196.tgz")
version('2017.2.174', sha256='0b8a3fd6bc254c3c3d9d51acf047468c7f32bf0baff22aa1e064d16d9fea389f',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/11306/l_mkl_2017.2.174.tgz")
version('2017.1.132', sha256='8c6bbeac99326d59ef3afdc2a95308c317067efdaae50240d2f4a61f37622e69',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/11024/l_mkl_2017.1.132.tgz")
version('2017.0.098', sha256='f2233e8e011f461d9c15a853edf7ed0ae8849aa665a1ec765c1ff196fd70c4d9',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/9662/l_mkl_2017.0.098.tgz")
# built from parallel_studio_xe_2016.3.x
version('11.3.3.210', sha256='ff858f0951fd698e9fb30147ea25a8a810c57f0126c8457b3b0cdf625ea43372',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/9068/l_mkl_11.3.3.210.tgz")
# built from parallel_studio_xe_2016.2.062
version('11.3.2.181', sha256='bac04a07a1fe2ae4996a67d1439ee90c54f31305e8663d1ccfce043bed84fc27',
url="http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/8711/l_mkl_11.3.2.181.tgz")
variant('shared', default=True, description='Builds shared library')
variant('ilp64', default=False, description='64 bit integers')
variant(
'threads', default='none',
description='Multithreading support',
values=('openmp', 'tbb', 'none'),
multi=False
)
provides('blas')
provides('lapack')
provides('scalapack')
provides('mkl')
provides('fftw-api@3', when='@2017:')
if sys.platform == 'darwin':
# there is no libmkl_gnu_thread on macOS
conflicts('threads=openmp', when='%gcc')
| 64.619718
| 108
| 0.751308
| 521
| 4,588
| 6.502879
| 0.309021
| 0.042503
| 0.125443
| 0.165584
| 0.294864
| 0.294864
| 0.294864
| 0.275974
| 0.275974
| 0.275974
| 0
| 0.272705
| 0.114429
| 4,588
| 70
| 109
| 65.542857
| 0.561162
| 0.073017
| 0
| 0
| 0
| 0.320755
| 0.699128
| 0.256543
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037736
| 0
| 0.075472
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
138f6c701103c1f1771823352febd549174e9ed4
| 77
|
py
|
Python
|
Lec 9/Module3.py
|
harshp2124/Python-Lectures
|
a5b8201856201da0eef8d66c35eaceec4e8e1456
|
[
"Apache-2.0"
] | null | null | null |
Lec 9/Module3.py
|
harshp2124/Python-Lectures
|
a5b8201856201da0eef8d66c35eaceec4e8e1456
|
[
"Apache-2.0"
] | null | null | null |
Lec 9/Module3.py
|
harshp2124/Python-Lectures
|
a5b8201856201da0eef8d66c35eaceec4e8e1456
|
[
"Apache-2.0"
] | null | null | null |
def fn1(a,b):
print("Subtraction=",a-b)
def fn2(c):
print(c)
| 11
| 30
| 0.506494
| 13
| 77
| 3
| 0.615385
| 0.102564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 0.285714
| 77
| 7
| 31
| 11
| 0.672727
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
1392f87fb355659a41695eb77a37072ed80d0d7e
| 472
|
py
|
Python
|
Desafio24.py
|
rsmelocunha/Python-projects
|
1740d1cbafb0aebfffeb0bfdb4ccccf0dbd14093
|
[
"MIT"
] | null | null | null |
Desafio24.py
|
rsmelocunha/Python-projects
|
1740d1cbafb0aebfffeb0bfdb4ccccf0dbd14093
|
[
"MIT"
] | null | null | null |
Desafio24.py
|
rsmelocunha/Python-projects
|
1740d1cbafb0aebfffeb0bfdb4ccccf0dbd14093
|
[
"MIT"
] | null | null | null |
cid = input('Digite o nome da cidade onde você nasceu: ').strip() # usa-se o método strip para retirar os espaços vazios antes e depois da frase
print(cid[:5].upper() == 'SANTO') # como a palavra 'santo' possui 5 letras, então foi utilizada a funcionalidade ":5" que vai
# pegar da posição 0 até a posição 6 da frase e verificar se existe a palavra 'santo'. Caso
# exista, o programa vai retornar 'True'.
| 118
| 144
| 0.625
| 70
| 472
| 4.214286
| 0.714286
| 0.047458
| 0.088136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01497
| 0.292373
| 472
| 4
| 145
| 118
| 0.868263
| 0.627119
| 0
| 0
| 0
| 0
| 0.273256
| 0
| 0
| 0
| 0
| 0.25
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
13964db6dc0253b6b9518d44f5c5d48a0ddd50ad
| 16,347
|
py
|
Python
|
genbmm/genbmm/sparse.py
|
harvardnlp/cascaded-generation
|
d6c5569ca3b2f9d7a5795bd21de4b4eec7b92936
|
[
"MIT"
] | 122
|
2020-06-02T01:27:02.000Z
|
2021-11-14T17:18:22.000Z
|
genbmm/genbmm/sparse.py
|
chenyangh/cascaded-generation
|
d6c5569ca3b2f9d7a5795bd21de4b4eec7b92936
|
[
"MIT"
] | null | null | null |
genbmm/genbmm/sparse.py
|
chenyangh/cascaded-generation
|
d6c5569ca3b2f9d7a5795bd21de4b4eec7b92936
|
[
"MIT"
] | 5
|
2020-06-02T23:56:01.000Z
|
2021-06-03T15:28:09.000Z
|
import torch
has_cuda = False
try:
import _genbmm
has_cuda = True
except ImportError:
pass
def banddiag(orig_x, lu, ld, fill=0):
s1 = list(orig_x.shape)
s2 = list(orig_x.shape)
x = orig_x
s1[-2] = lu
s2[-2] = ld
x = torch.cat(
[
torch.zeros(*s1, device=x.device, dtype=x.dtype),
x,
torch.zeros(*s2, device=x.device, dtype=x.dtype),
],
dim=-2,
)
unf = x.unfold(-2, lu + ld + 1, 1)
return (
torch.diagonal(unf, 0, -3, -2).transpose(-2, -1),
x.narrow(-2, lu, orig_x.shape[-2]),
)
def repdiag(x, lu, ld):
s1, s2 = list(x.shape), list(x.shape)
s1[-2] = ld
s2[-2] = lu
x = torch.cat(
[
torch.zeros(*s1, device=x.device, dtype=x.dtype),
x,
torch.zeros(*s2, device=x.device, dtype=x.dtype),
],
dim=-2,
)
unf = x.unfold(-2, lu + ld + 1, 1)
return torch.diagonal(unf, 0, -2, -1)
class Transpose(torch.autograd.Function):
@staticmethod
def forward(ctx, val, lu, ld):
ctx.save_for_backward(torch.tensor([lu, ld]))
return repdiag(val.flip(-1), lu, ld)
@staticmethod
def backward(ctx, grad_output):
val, = ctx.saved_tensors
lu, ld = val.tolist()
return repdiag(grad_output.flip(-1), ld, lu), None, None
class BandedMatrix:
def __init__(self, data, lu, ld, fill=0):
batch, n, off = data.shape
assert off == lu + ld + 1, "Offsets need to add up."
self.data = data
self.fill = fill
self.lu, self.ld = lu, ld
self.width = lu + ld + 1
def _new(self, lu, ld):
batch, n, off = self.data.shape
data = torch.zeros(
batch, n, ld + lu + 1, dtype=self.data.dtype, device=self.data.device
).fill_(self.fill)
return data
def band_shift(self, t):
if t == 0:
return self
batch, n, off = self.data.shape
pad = torch.zeros(
batch, n, abs(t), dtype=self.data.dtype, device=self.data.device
).fill_(self.fill)
if t > 0:
v = torch.cat([self.data[:, :, t:], pad], 2)
else:
v = torch.cat([pad, self.data[:, :, :t]], 2)
return BandedMatrix(v, self.lu + t, self.ld - t, self.fill)
# def band_shift(self):
# batch, n, off = self.data.shape
# return BandedMatrix(
# torch.cat(
# [self.data[:, :, 1:],
# torch.zeros(batch, n, 1, dtype=self.data.dtype, device=self.data.device).fill_(self.fill)], 2
# ),
# self.lu - 1,
# self.ld + 1,
# self.fill,
# )
def band_unshift(self):
batch, n, off = self.data.shape
return BandedMatrix(
torch.cat(
[
torch.zeros(
batch, n, 1, dtype=self.data.dtype, device=self.data.device
).fill_(self.fill),
self.data[:, :, :-1],
],
2,
),
self.lu - 1,
self.ld + 1,
self.fill,
)
def col_shift(self, t):
if t == 0:
return self
batch, n, off = self.data.shape
pad = torch.zeros(
batch, abs(t), off, dtype=self.data.dtype, device=self.data.device
).fill_(self.fill)
if t > 0:
v = torch.cat([self.data[:, t:, :], pad], 1)
else:
v = torch.cat([pad, self.data[:, :t, :]], 1)
return BandedMatrix(v, self.lu - t, self.ld + t, self.fill)
def col_unshift(self):
batch, n, off = self.data.shape
return BandedMatrix(
torch.cat(
[
torch.zeros(
batch, 1, off, dtype=self.data.dtype, device=self.data.device
).fill_(self.fill),
self.data[:, :-1, :],
],
1,
),
self.lu + 1,
self.ld - 1,
self.fill,
)
def to_dense(self):
batch, n, off = self.data.shape
full = torch.zeros(batch, n, n, dtype=self.data.dtype, device=self.data.device)
full.fill_(self.fill)
x2, x = banddiag(full, self.lu, self.ld)
x2[:] = self.data
return x
def _expand(self, lu, ld):
batch, n, off = self.data.shape
data = self._new(lu, ld)
s = lu - self.lu
data[:, :, s : s + self.width] = self.data
return BandedMatrix(data, lu, ld, self.fill)
def op(self, other, op, zero=0):
batch, n, off = self.data.shape
lu = max(self.lu, other.lu)
ld = max(self.ld, other.ld)
data = self._new(lu, ld).fill_(zero)
s1 = lu - self.lu
data[:, :, s1 : s1 + self.width] = self.data
s2 = lu - other.lu
data[:, :, s2 : s2 + other.width] = op(
data[:, :, s2 : s2 + other.width], other.data
)
return BandedMatrix(data, lu, ld, self.fill)
def transpose(self):
batch, n, off = self.data.shape
y2 = Transpose.apply(self.data, self.lu, self.ld)
assert y2.shape[1] == n
return BandedMatrix(y2, self.ld, self.lu, self.fill)
# def multiply(self, other):
# batch, n, off = self.data.shape
# assert other.data.shape[1] == n
# lu = self.lu + other.ld
# ld = self.ld + other.lu
# out, = _genbmm.forward_band(self.data, self.lu, self.ld,
# other.data, other.lu, other.ld, 3)
# return BandedMatrix(out, lu, ld, self.fill)
def multiply(self, other):
if has_cuda:
batch, n, off = self.data.shape
assert other.data.shape[1] == n
lu = self.lu + other.ld
ld = self.ld + other.lu
out = bandedbmm(
self.data, self.lu, self.ld, other.data, other.lu, other.ld, lu, ld
)
return BandedMatrix(out, lu, ld, self.fill)
else:
return self.multiply_simple(other)
def multiply_log(self, other):
if has_cuda:
batch, n, off = self.data.shape
assert other.data.shape[1] == n
lu = self.lu + other.ld
ld = self.ld + other.lu
out = bandedlogbmm(
self.data, self.lu, self.ld, other.data, other.lu, other.ld, lu, ld
)
return BandedMatrix(out, lu, ld, self.fill)
else:
return self.multiply_log_simple(other)
def multiply_max(self, other):
if has_cuda and other.data.is_cuda:
batch, n, off = self.data.shape
assert other.data.shape[1] == n
lu = self.lu + other.ld
ld = self.ld + other.lu
out = bandedmaxbmm(
self.data, self.lu, self.ld, other.data, other.lu, other.ld, lu, ld
)
return BandedMatrix(out, lu, ld, self.fill)
else:
return self.multiply_max_simple(other)
def multiply_simple(self, other):
batch, n, off = self.data.shape
assert other.data.shape[1] == n
lu = self.lu + other.ld
ld = self.ld + other.lu
data = self._new(lu, ld)
result = BandedMatrix(data, lu, ld, self.fill)
for i in range(n):
for j in range(result.width):
o = i + (j - result.lu)
if o < 0 or o >= n:
continue
val = torch.zeros(batch)
for k in range(self.width):
pos = i + (k - self.lu)
if pos < 0 or pos >= n:
continue
k2 = (pos - o) + other.lu
if k2 < 0 or k2 >= other.width:
continue
val += self.data[:, i, k] * other.data[:, o, k2]
data[:, i, j] = val
return result
def multiply_max_simple(self, other):
batch, n, off = self.data.shape
assert other.data.shape[1] == n
lu = self.lu + other.ld
ld = self.ld + other.lu
data = self._new(lu, ld)
result = BandedMatrix(data, lu, ld, self.fill)
for i in range(n):
for j in range(result.width):
o = i + (j - result.lu)
if o < 0 or o >= n:
continue
m = torch.zeros(batch).fill_(-1e9)
for k in range(self.width):
pos = i + (k - self.lu)
if pos < 0 or pos >= n:
continue
k2 = (pos - o) + other.lu
if k2 < 0 or k2 >= other.width:
continue
m = torch.max(m, self.data[:, i, k] + other.data[:, o, k2])
data[:, i, j] = m
return result
def multiply_log_simple(self, other):
batch, n, off = self.data.shape
assert other.data.shape[1] == n
lu = self.lu + other.ld
ld = self.ld + other.lu
data = self._new(lu, ld)
result = BandedMatrix(data, lu, ld, self.fill)
for i in range(n):
for j in range(result.width):
o = i + (j - result.lu)
if o < 0 or o >= n:
continue
val = torch.zeros(batch)
m = torch.zeros(batch).fill_(-1e9)
for k in range(self.width):
pos = i + (k - self.lu)
if pos < 0 or pos >= n:
continue
k2 = (pos - o) + other.lu
if k2 < 0 or k2 >= other.width:
continue
m = torch.max(m, self.data[:, i, k] + other.data[:, o, k2])
for k in range(self.width):
pos = i + (k - self.lu)
if pos < 0 or pos >= n:
continue
k2 = (pos - o) + other.lu
if k2 < 0 or k2 >= other.width:
continue
val += torch.exp(self.data[:, i, k] + other.data[:, o, k2] - m)
data[:, i, j] = torch.log(val) + m
return result
def multiply_back(self, other, out, grad_out):
batch, n, off = self.data.shape
assert other.data.shape[1] == n
grad_a, = _genbmm.backward_band(
self.data,
self.lu,
self.ld,
other.data,
other.lu,
other.ld,
grad_out,
grad_out,
3,
)
grad_a = BandedMatrix(grad_a, self.lu, self.ld, self.fill)
return grad_a
def multiply_back_simple(self, other, grad_out):
batch, n, off = self.data.shape
assert other.data.shape[1] == n
data = self._new(self.lu, self.ld)
result = BandedMatrix(data, self.lu, self.ld, self.fill)
for i in range(n):
for j in range(self.width):
o = i + (j - self.lu)
val = torch.zeros(batch)
for k in range(grad_out.width):
pos = i + (k - grad_out.lu)
if pos < 0 or pos >= n:
continue
k2 = (o - pos) + other.lu
if k2 < 0 or k2 >= other.width:
continue
val += other.data[:, pos, k2] * grad_out.data[:, i, k]
data[:, i, j] = val
return result.transpose()
class BandedMul(torch.autograd.Function):
@staticmethod
def forward(ctx, a, a_lu, a_ld, b, b_lu, b_ld, o_lu, o_ld):
a = a.contiguous()
b = b.contiguous()
out, _ = _genbmm.forward_band(a, a_lu, a_ld, b, b_lu, b_ld, 3)
ctx.save_for_backward(
a, b, out, torch.LongTensor([a_lu, a_ld, b_lu, b_ld, o_lu, o_ld])
)
return out
@staticmethod
def backward(ctx, grad_output):
a, b, switches, bands = ctx.saved_tensors
a_lu, a_ld, b_lu, b_ld, o_lu, o_ld = bands.tolist()
a = BandedMatrix(a, a_lu, a_ld, 0)
b = BandedMatrix(b, b_lu, b_ld, 0)
grad_output = BandedMatrix(grad_output, o_lu, o_ld, 0)
switches = BandedMatrix(switches.float(), o_lu, o_ld, 0)
grad_a, = _genbmm.backward_band(
a.data,
a.lu,
a.ld,
b.data,
b.lu,
b.ld,
grad_output.data.contiguous(),
switches.data,
3,
)
grad_b, = _genbmm.backward_band(
b.data.contiguous(),
b.lu,
b.ld,
a.data.contiguous(),
a.lu,
a.ld,
grad_output.transpose().data.contiguous(),
switches.transpose().data.contiguous(),
3,
)
return grad_a, None, None, grad_b, None, None, None, None
class BandedLogMul(torch.autograd.Function):
@staticmethod
def forward(ctx, a, a_lu, a_ld, b, b_lu, b_ld, o_lu, o_ld):
a = a.contiguous()
b = b.contiguous()
out, _ = _genbmm.forward_band(a, a_lu, a_ld, b, b_lu, b_ld, 0)
ctx.save_for_backward(
a, b, out, torch.LongTensor([a_lu, a_ld, b_lu, b_ld, o_lu, o_ld])
)
return out
@staticmethod
def backward(ctx, grad_output):
a, b, switches, bands = ctx.saved_tensors
a_lu, a_ld, b_lu, b_ld, o_lu, o_ld = bands.tolist()
a = BandedMatrix(a, a_lu, a_ld, -1e9)
b = BandedMatrix(b, b_lu, b_ld, -1e9)
grad_output = BandedMatrix(grad_output, o_lu, o_ld, -1e9)
switches = BandedMatrix(switches.float(), o_lu, o_ld, -1e9)
grad_a, = _genbmm.backward_band(
a.data,
a.lu,
a.ld,
b.data,
b.lu,
b.ld,
grad_output.data.contiguous(),
switches.data,
0,
)
grad_b, = _genbmm.backward_band(
b.data.contiguous(),
b.lu,
b.ld,
a.data.contiguous(),
a.lu,
a.ld,
grad_output.transpose().data.contiguous(),
switches.transpose().data.contiguous(),
0,
)
return grad_a, None, None, grad_b, None, None, None, None
class BandedMaxMul(torch.autograd.Function):
@staticmethod
def forward(ctx, a, a_lu, a_ld, b, b_lu, b_ld, o_lu, o_ld):
a = a.contiguous()
b = b.contiguous()
out, indices = _genbmm.forward_band(a, a_lu, a_ld, b, b_lu, b_ld, 1)
at = BandedMatrix(a, a_lu, a_ld, -1e9)
bt = BandedMatrix(b, b_lu, b_ld, -1e9)
_, indices2 = _genbmm.forward_band(
bt.data.contiguous(), bt.lu, bt.ld, at.data.contiguous(), at.lu, at.ld, 1
)
ctx.save_for_backward(
a,
b,
indices,
indices2,
torch.LongTensor([a_lu, a_ld, b_lu, b_ld, o_lu, o_ld]),
)
return out
@staticmethod
def backward(ctx, grad_output):
a, b, switches, switches2, bands = ctx.saved_tensors
a_lu, a_ld, b_lu, b_ld, o_lu, o_ld = bands.tolist()
a = BandedMatrix(a, a_lu, a_ld, -1e9)
b = BandedMatrix(b, b_lu, b_ld, -1e9)
grad_output = BandedMatrix(grad_output, o_lu, o_ld, -1e9)
switches = BandedMatrix(switches.float(), o_lu, o_ld, -1e9)
switches2 = BandedMatrix(switches2.float(), o_lu, o_ld, -1e9)
grad_a, = _genbmm.backward_band(
a.data,
a.lu,
a.ld,
b.data,
b.lu,
b.ld,
grad_output.data.contiguous(),
switches.data,
1,
)
grad_b, = _genbmm.backward_band(
b.data.contiguous(),
b.lu,
b.ld,
a.data.contiguous(),
a.lu,
a.ld,
grad_output.transpose().data.contiguous(),
switches2.data.contiguous(),
1,
)
return grad_a, None, None, grad_b, None, None, None, None
bandedbmm = BandedMul.apply
bandedlogbmm = BandedLogMul.apply
bandedmaxbmm = BandedMaxMul.apply
| 30.960227
| 112
| 0.479538
| 2,131
| 16,347
| 3.570624
| 0.061473
| 0.057826
| 0.011565
| 0.017348
| 0.788408
| 0.768301
| 0.742542
| 0.716914
| 0.693521
| 0.67131
| 0
| 0.017029
| 0.392916
| 16,347
| 527
| 113
| 31.018975
| 0.749698
| 0.039824
| 0
| 0.613793
| 0
| 0
| 0.001467
| 0
| 0
| 0
| 0
| 0
| 0.022989
| 1
| 0.064368
| false
| 0.002299
| 0.006897
| 0
| 0.156322
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
139ff99767989b07de74409445ae9ab848a6c71a
| 159
|
py
|
Python
|
examples/cherenkov/EventAction.py
|
yu22mal/geant4_pybind
|
ff7efc322fe53f39c7ae7ed140861052a92479fd
|
[
"Unlicense"
] | null | null | null |
examples/cherenkov/EventAction.py
|
yu22mal/geant4_pybind
|
ff7efc322fe53f39c7ae7ed140861052a92479fd
|
[
"Unlicense"
] | null | null | null |
examples/cherenkov/EventAction.py
|
yu22mal/geant4_pybind
|
ff7efc322fe53f39c7ae7ed140861052a92479fd
|
[
"Unlicense"
] | null | null | null |
from geant4_pybind import *
class EventAction(G4UserEventAction):
def __init__(self):
super().__init__()
def EndOfEventAction(self, evt):
pass
| 14.454545
| 37
| 0.716981
| 17
| 159
| 6.176471
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015385
| 0.18239
| 159
| 10
| 38
| 15.9
| 0.792308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0.166667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
13a2f642225fd09f7ed3a937d3863f4fd311c380
| 4,933
|
py
|
Python
|
kusanagi/core/payload/cmd/revshell/validator.py
|
cytopia/kusanagi
|
0f2f2f9d84fee0037ee45dc8c1c41adf841e480e
|
[
"MIT"
] | 13
|
2021-04-10T10:36:01.000Z
|
2021-11-22T17:15:10.000Z
|
kusanagi/core/payload/cmd/revshell/validator.py
|
FDlucifer/kusanagi
|
0f2f2f9d84fee0037ee45dc8c1c41adf841e480e
|
[
"MIT"
] | null | null | null |
kusanagi/core/payload/cmd/revshell/validator.py
|
FDlucifer/kusanagi
|
0f2f2f9d84fee0037ee45dc8c1c41adf841e480e
|
[
"MIT"
] | 4
|
2021-05-04T19:37:58.000Z
|
2021-08-05T06:39:16.000Z
|
"""This file holds the payload yaml validator/definition template."""
VALIDATOR = {
"specification": {
"type": dict,
"required": True,
"childs": {
"payload": {
"type": str,
"required": True,
"allowed": "^(cmd)$",
"childs": {},
},
"type": {
"type": str,
"required": True,
"allowed": "^(revshell|bindshell)$",
"childs": {},
},
"version": {
"type": str,
"required": True,
"allowed": "^([0-9]\\.[0-9]\\.[0-9])$",
"childs": {},
},
},
},
"items": {
"type": list,
"required": True,
"childs": {
"name": {
"type": str,
"required": True,
"allowed": "^(.+)$",
"childs": {},
},
"desc": {
"type": str,
"required": True,
"allowed": "^(.+)$",
"childs": {},
},
"info": {
"type": list,
"required": True,
"childs": {},
},
"rating": {
"type": int,
"required": True,
"allowed": "^([0-9])$",
"childs": {},
},
"meta": {
"type": dict,
"required": True,
"childs": {
"author": {
"type": str,
"required": True,
"childs": {},
},
"editors": {
"type": list,
"required": True,
"childs": {},
},
"created": {
"type": str,
"required": True,
"allowed": "^([0-9]{4}-[0-9]{2}-[0-9]{2})$",
"childs": {},
},
"modified": {
"type": str,
"required": True,
"allowed": "^([0-9]{4}-[0-9]{2}-[0-9]{2})$",
"childs": {},
},
"version": {
"type": str,
"required": True,
"allowed": "^([0-9]\\.[0-9]\\.[0-9])$",
"childs": {},
},
},
},
"cmd": {
"type": dict,
"required": True,
"childs": {
"executable": {
"type": str,
"required": True,
"allowed": "^(.+)$",
"childs": {},
},
"requires": {
"type": dict,
"required": True,
"childs": {
"commands": {
"type": list,
"required": True,
"childs": {},
},
"shell_env": {
"type": list,
"required": True,
"childs": {},
},
"os": {
"type": list,
"required": True,
"childs": {},
},
},
},
},
},
"revshell": {
"type": dict,
"required": True,
"childs": {
"proto": {
"type": str,
"required": True,
"allowed": "^(tcp|udp)$",
"childs": {},
},
"shell": {
"type": str,
"required": True,
"allowed": "^(.+)$",
"childs": {},
},
"command": {
"type": (str, type(None)),
"required": True,
"allowed": "^(.+)$",
"childs": {},
},
},
},
"payload": {
"type": str,
"required": True,
"allowed": "(.*__ADDR__.*__PORT__.*|.*__PORT__.*__ADDR__.*)",
"childs": {},
},
},
},
}
| 31.825806
| 77
| 0.219947
| 224
| 4,933
| 4.767857
| 0.223214
| 0.292135
| 0.249064
| 0.231273
| 0.690075
| 0.359551
| 0.2397
| 0.166667
| 0.166667
| 0.166667
| 0
| 0.017288
| 0.624772
| 4,933
| 154
| 78
| 32.032468
| 0.559697
| 0.012771
| 0
| 0.585526
| 0
| 0
| 0.198808
| 0.036801
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
13a6f1a95001e1e439f16518bf3b984319a4f53c
| 159
|
py
|
Python
|
config/views.py
|
fagrimacs/fagrimacs
|
471627ff2ead072d1bf04a542d6c47b542b95cba
|
[
"MIT"
] | null | null | null |
config/views.py
|
fagrimacs/fagrimacs
|
471627ff2ead072d1bf04a542d6c47b542b95cba
|
[
"MIT"
] | 5
|
2020-02-17T11:23:06.000Z
|
2021-06-10T19:11:33.000Z
|
config/views.py
|
fagrimacs/fagrimacs
|
471627ff2ead072d1bf04a542d6c47b542b95cba
|
[
"MIT"
] | 11
|
2019-12-06T20:05:50.000Z
|
2020-03-12T07:32:03.000Z
|
from django.shortcuts import render
from django.views.generic import TemplateView
class ComingSoonView(TemplateView):
template_name = 'coming-soon.html'
| 22.714286
| 45
| 0.811321
| 19
| 159
| 6.736842
| 0.789474
| 0.15625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119497
| 159
| 6
| 46
| 26.5
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0.100629
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
13c1fa3642bbfb694b84f9e00e6cde81e9256d42
| 227
|
py
|
Python
|
streetview/__init__.py
|
stanford-policylab/surveilling-surveillance
|
bbb9a147927a6342eecfe07ffa756b3acdb63f35
|
[
"MIT"
] | 8
|
2021-05-21T03:38:52.000Z
|
2021-11-21T08:32:41.000Z
|
streetview/__init__.py
|
stanford-policylab/surveilling-surveillance
|
bbb9a147927a6342eecfe07ffa756b3acdb63f35
|
[
"MIT"
] | null | null | null |
streetview/__init__.py
|
stanford-policylab/surveilling-surveillance
|
bbb9a147927a6342eecfe07ffa756b3acdb63f35
|
[
"MIT"
] | 1
|
2021-06-13T21:49:14.000Z
|
2021-06-13T21:49:14.000Z
|
from .download import download_streetview_image
from .sample import random_points, random_stratified_points
from .coverage import calculate_coverage
from .zoning import calculate_zone
from .road import calculate_road_length
| 28.375
| 59
| 0.867841
| 30
| 227
| 6.266667
| 0.5
| 0.239362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105727
| 227
| 7
| 60
| 32.428571
| 0.926108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
13de34dd3a17bd24d7be6ce03f31829dea130698
| 4,264
|
py
|
Python
|
ckanext/example_iconfigurer/tests/test_iconfigurer_toolkit.py
|
gg2/ckan
|
d61a533cc330b6050f4957573f58ec912695ed0a
|
[
"BSD-3-Clause"
] | 2,805
|
2015-01-02T18:13:15.000Z
|
2022-03-31T03:35:01.000Z
|
ckanext/example_iconfigurer/tests/test_iconfigurer_toolkit.py
|
gg2/ckan
|
d61a533cc330b6050f4957573f58ec912695ed0a
|
[
"BSD-3-Clause"
] | 3,801
|
2015-01-02T11:05:36.000Z
|
2022-03-31T19:24:37.000Z
|
ckanext/example_iconfigurer/tests/test_iconfigurer_toolkit.py
|
cascaoSDC/ckan
|
75a08caa7c688ce70229dfea7070cc667a15c5e8
|
[
"BSD-3-Clause"
] | 1,689
|
2015-01-02T19:46:43.000Z
|
2022-03-28T14:59:43.000Z
|
# encoding: utf-8
import pytest
import ckan.plugins.toolkit as toolkit
@pytest.mark.usefixtures("clean_db")
class TestIConfigurerToolkitAddCkanAdminTab(object):
"""
Tests for toolkit.add_ckan_admin_tab used by the IConfigurer interface.
"""
def test_add_ckan_admin_tab_updates_config_dict(self):
"""Config dict updated by toolkit.add_ckan_admin_tabs method."""
config = {}
toolkit.add_ckan_admin_tab(config, "my_route_name", "my_label")
assert {
"ckan.admin_tabs": {
"my_route_name": {"label": "my_label", "icon": None}
}
} == config
def test_add_ckan_admin_tab_twice(self):
"""
Calling add_ckan_admin_tab twice with same values returns expected
config.
"""
config = {}
toolkit.add_ckan_admin_tab(config, "my_route_name", "my_label")
toolkit.add_ckan_admin_tab(config, "my_route_name", "my_label")
expected_dict = {
"ckan.admin_tabs": {
"my_route_name": {"label": "my_label", "icon": None}
}
}
assert expected_dict == config
def test_add_ckan_admin_tab_twice_replace_value(self):
"""
Calling add_ckan_admin_tab twice with a different value returns
expected config.
"""
config = {}
toolkit.add_ckan_admin_tab(config, "my_route_name", "my_label")
toolkit.add_ckan_admin_tab(
config, "my_route_name", "my_replacement_label"
)
expected_dict = {
"ckan.admin_tabs": {
"my_route_name": {
"label": "my_replacement_label",
"icon": None,
}
}
}
assert expected_dict == config
def test_add_ckan_admin_tab_two_routes(self):
"""
Add two different route/label pairs to ckan.admin_tabs.
"""
config = {}
toolkit.add_ckan_admin_tab(config, "my_route_name", "my_label")
toolkit.add_ckan_admin_tab(
config, "my_other_route_name", "my_other_label"
)
expected_dict = {
"ckan.admin_tabs": {
"my_other_route_name": {
"label": "my_other_label",
"icon": None,
},
"my_route_name": {"label": "my_label", "icon": None},
}
}
assert expected_dict == config
def test_add_ckan_admin_tab_config_has_existing_admin_tabs(self):
"""
Config already has a ckan.admin_tabs option.
"""
config = {
"ckan.admin_tabs": {
"my_existing_route": {
"label": "my_existing_label",
"icon": None,
}
}
}
toolkit.add_ckan_admin_tab(config, "my_route_name", "my_label")
toolkit.add_ckan_admin_tab(
config, "my_other_route_name", "my_other_label"
)
expected_dict = {
"ckan.admin_tabs": {
"my_existing_route": {
"label": "my_existing_label",
"icon": None,
},
"my_other_route_name": {
"label": "my_other_label",
"icon": None,
},
"my_route_name": {"label": "my_label", "icon": None},
}
}
assert expected_dict == config
def test_add_ckan_admin_tab_config_has_existing_other_option(self):
"""
Config already has existing other option.
"""
config = {"ckan.my_option": "This is my option"}
toolkit.add_ckan_admin_tab(config, "my_route_name", "my_label")
toolkit.add_ckan_admin_tab(
config, "my_other_route_name", "my_other_label"
)
expected_dict = {
"ckan.my_option": "This is my option",
"ckan.admin_tabs": {
"my_other_route_name": {
"label": "my_other_label",
"icon": None,
},
"my_route_name": {"label": "my_label", "icon": None},
},
}
assert expected_dict == config
| 29.006803
| 75
| 0.529784
| 449
| 4,264
| 4.603563
| 0.144766
| 0.130624
| 0.121916
| 0.145138
| 0.771166
| 0.760523
| 0.749879
| 0.726657
| 0.674407
| 0.674407
| 0
| 0.000368
| 0.363274
| 4,264
| 146
| 76
| 29.205479
| 0.760958
| 0.104597
| 0
| 0.57732
| 0
| 0
| 0.234043
| 0
| 0
| 0
| 0
| 0
| 0.061856
| 1
| 0.061856
| false
| 0
| 0.020619
| 0
| 0.092784
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
13e65eca252c06bccb6369bd506f941899e836d6
| 7,122
|
py
|
Python
|
client/verta/tests/test_permissions/test_sharing_old.py
|
NaiboWang/modeldb
|
43faa8266f7134404fe5cb21954a477ed5963300
|
[
"Apache-2.0"
] | 1
|
2021-03-26T05:41:34.000Z
|
2021-03-26T05:41:34.000Z
|
client/verta/tests/test_permissions/test_sharing_old.py
|
NaiboWang/modeldb
|
43faa8266f7134404fe5cb21954a477ed5963300
|
[
"Apache-2.0"
] | null | null | null |
client/verta/tests/test_permissions/test_sharing_old.py
|
NaiboWang/modeldb
|
43faa8266f7134404fe5cb21954a477ed5963300
|
[
"Apache-2.0"
] | 1
|
2021-05-04T13:52:09.000Z
|
2021-05-04T13:52:09.000Z
|
"""
Original collaboration/sharing tests, before the visibility overhaul.
"""
import pytest
from verta._internal_utils import _utils
pytestmark = pytest.mark.not_oss
class TestProject:
def test_share_project_personal_workspace(self, client, client_2, email_2):
"""
User 1 share a project in personal workspace to user 2.
"""
project_name = _utils.generate_default_name()
project = client.create_project(project_name)
project._add_collaborator(email=email_2)
assert client_2.get_project(id=project.id)
assert client_2.get_project(name=project.name)
def test_org_public_project(self, client, organization, client_2, email_2):
"""
User 2 tries to access a org-public project created by a user in the same organization.
"""
project_name = _utils.generate_default_name()
project = client.create_project(project_name, workspace=organization.name, public_within_org=True)
organization.add_member(email_2)
assert client_2.get_project(id=project.id)
assert client_2.get_project(name=project.name, workspace=organization.name)
def test_non_org_public_project_access_error(self, client, organization, client_2, email_2):
"""
User 2 tries to access a non-org-public project created by a user in the same organization.
"""
project_name = _utils.generate_default_name()
project = client.create_project(project_name, workspace=organization.name, public_within_org=False)
organization.add_member(email_2)
# Shouldn't be able to access:
with pytest.raises(ValueError, match="not found"):
client_2.get_project(id=project.id)
def test_share_org_project(self, client, organization, client_2, email_2):
"""
User 2 tries to access a non-org-public project created by another user, but has been shared to user 2.
"""
project_name = _utils.generate_default_name()
project = client.create_project(project_name, workspace=organization.name, public_within_org=False)
organization.add_member(email_2)
project._add_collaborator(email=email_2)
assert client_2.get_project(id=project.id)
assert client_2.get_project(name=project.name, workspace=organization.name)
class TestDataset:
def test_org_public_dataset(self, client, organization, client_2, email_2):
"""
User 2 tries to access a org-public dataset created by a user in the same organization.
"""
dataset_name = _utils.generate_default_name()
dataset = client.create_dataset(dataset_name, workspace=organization.name, public_within_org=True)
organization.add_member(email_2)
assert client_2.get_dataset(id=dataset.id)
assert client_2.get_dataset(name=dataset.name, workspace=organization.name)
dataset.delete()
def test_non_org_public_dataset_access_error(self, client, organization, client_2, email_2):
"""
User 2 tries to access a non-org-public dataset created by a user in the same organization.
"""
dataset_name = _utils.generate_default_name()
dataset = client.create_dataset(dataset_name, workspace=organization.name, public_within_org=False)
organization.add_member(email_2)
# Shouldn't be able to access:
with pytest.raises(ValueError, match="not found"):
client_2.get_dataset(id=dataset.id)
dataset.delete()
class TestRegisteredModel:
def test_org_public_registered_model(self, client, organization, client_2, email_2):
"""
User 2 tries to access a org-public registered_model created by a user in the same organization.
"""
registered_model_name = _utils.generate_default_name()
registered_model = client.create_registered_model(registered_model_name, workspace=organization.name, public_within_org=True)
organization.add_member(email_2)
assert client_2.get_registered_model(id=registered_model.id)
assert client_2.get_registered_model(name=registered_model.name, workspace=organization.name)
registered_model.delete()
def test_non_org_public_registered_model_access_error(self, client, organization, client_2, email_2):
"""
User 2 tries to access a non-org-public registered_model created by a user in the same organization.
"""
registered_model_name = _utils.generate_default_name()
registered_model = client.create_registered_model(registered_model_name, workspace=organization.name, public_within_org=False)
organization.add_member(email_2)
# Shouldn't be able to access:
with pytest.raises(ValueError, match="not found"):
client_2.get_registered_model(id=registered_model.id)
registered_model.delete()
class TestRepository:
def test_org_public_repository(self, client, organization, client_2, email_2):
"""
User 2 tries to access a org-public repository created by a user in the same organization.
"""
repository_name = _utils.generate_default_name()
repository = client.set_repository(repository_name, workspace=organization.name, public_within_org=True)
organization.add_member(email_2)
assert client_2.get_or_create_repository(id=repository.id)
assert client_2.get_or_create_repository(name=repository.name, workspace=organization.name).id == repository.id
repository.delete()
def test_non_org_public_repository_access_error(self, client, organization, client_2, email_2):
"""
User 2 tries to access a non-org-public repository created by a user in the same organization.
"""
repository_name = _utils.generate_default_name()
repository = client.set_repository(repository_name, workspace=organization.name, public_within_org=False)
organization.add_member(email_2)
# Shouldn't be able to access:
with pytest.raises(ValueError, match="no Repository found"):
client_2.get_or_create_repository(id=repository.id)
repository.delete()
class TestEndpoint:
def test_org_endpoint(self, client, organization, client_2, email_2):
"""
Non-owner access to org-public endpoint and private endpoint within an org.
"""
organization.add_member(email_2)
path = _utils.generate_default_name()
# ORG_SCOPED_PUBLIC
public_path = "public-{}".format(path)
endpoint = client.create_endpoint(public_path, workspace=organization.name, public_within_org=True)
client_2.get_endpoint(public_path, workspace=organization.name)
endpoint.delete()
# PRIVATE
private_path = "private-{}".format(path)
endpoint = client.create_endpoint(private_path, workspace=organization.name, public_within_org=False)
with pytest.raises(ValueError, match="Endpoint not found"):
client_2.get_endpoint(private_path, workspace=organization.name)
endpoint.delete()
| 40.465909
| 134
| 0.714125
| 912
| 7,122
| 5.298246
| 0.100877
| 0.042012
| 0.037252
| 0.084023
| 0.851821
| 0.793874
| 0.720613
| 0.675083
| 0.675083
| 0.645695
| 0
| 0.011295
| 0.204437
| 7,122
| 175
| 135
| 40.697143
| 0.841511
| 0.167369
| 0
| 0.45977
| 0
| 0
| 0.014657
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 1
| 0.126437
| false
| 0
| 0.022989
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
13f361cbb55f90cacaa197429018d65d0a7c7ae5
| 728
|
py
|
Python
|
tests/conftest.py
|
staciekith/mental-unload
|
93d2ff29e159020c270b8c97b0d6dc97a1ad16e7
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
staciekith/mental-unload
|
93d2ff29e159020c270b8c97b0d6dc97a1ad16e7
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
staciekith/mental-unload
|
93d2ff29e159020c270b8c97b0d6dc97a1ad16e7
|
[
"MIT"
] | null | null | null |
import pytest
import config
from app import create_app, db
from app.adapters.auth0.auth0_adapter import Auth0Adapter
@pytest.fixture(scope='module')
def app():
app = create_app(config.Test)
with app.app_context():
yield app
@pytest.fixture(scope='module')
def app_db(app):
db.drop_all()
with app.app_context():
yield db
@pytest.fixture(scope='module')
def client(app):
return app.test_client()
@pytest.fixture(scope='module')
def runner(app):
return app.test_cli_runner()
@pytest.fixture(scope='function')
def auth(monkeypatch):
def auth_return(_token, _):
return {
'sub': 'user'
}
monkeypatch.setattr(Auth0Adapter, "verify_token", auth_return)
| 20.8
| 66
| 0.682692
| 96
| 728
| 5.020833
| 0.34375
| 0.134855
| 0.186722
| 0.19917
| 0.327801
| 0.124481
| 0
| 0
| 0
| 0
| 0
| 0.00678
| 0.18956
| 728
| 35
| 66
| 20.8
| 0.810169
| 0
| 0
| 0.222222
| 0
| 0
| 0.069959
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.148148
| 0.111111
| 0.481481
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
b918a0c57c7a8f88610d0cb7c461bfdc7393cd6e
| 356
|
py
|
Python
|
mail/__init__.py
|
irahorecka/craigslist-housing-subscription
|
389c325dc30526eaed4c2333f5dd4d60d7939a13
|
[
"MIT"
] | null | null | null |
mail/__init__.py
|
irahorecka/craigslist-housing-subscription
|
389c325dc30526eaed4c2333f5dd4d60d7939a13
|
[
"MIT"
] | null | null | null |
mail/__init__.py
|
irahorecka/craigslist-housing-subscription
|
389c325dc30526eaed4c2333f5dd4d60d7939a13
|
[
"MIT"
] | null | null | null |
import os
from ._threading import map_threads
from .send_email import write_email
# Check if environment variables exist for sender email and password
if not os.environ.get("EMAIL_USER") or not os.environ.get("EMAIL_PASS"):
raise ValueError(
"No value for 'EMAIL_USER' and/or 'EMAIL_PASS'. Please configure these environment variables."
)
| 35.6
| 102
| 0.761236
| 53
| 356
| 4.962264
| 0.584906
| 0.152091
| 0.091255
| 0.114068
| 0.152091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162921
| 356
| 9
| 103
| 39.555556
| 0.88255
| 0.185393
| 0
| 0
| 0
| 0
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.285714
| 0.428571
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
b9272757bcdff4617c3e6868e50e84a6884af822
| 899
|
py
|
Python
|
Scoreboard.py
|
rayman42003/INF123-VVPilot
|
6087332f2e53a4651dc97bd9c7142cde0fd9cf48
|
[
"MIT"
] | null | null | null |
Scoreboard.py
|
rayman42003/INF123-VVPilot
|
6087332f2e53a4651dc97bd9c7142cde0fd9cf48
|
[
"MIT"
] | null | null | null |
Scoreboard.py
|
rayman42003/INF123-VVPilot
|
6087332f2e53a4651dc97bd9c7142cde0fd9cf48
|
[
"MIT"
] | null | null | null |
'''
Created on May 19, 2014
@author: john
'''
class scoreboard:
def __init__(self):
self.score_map = {}
def add_player(self, ship):
# tuple is (kills, deaths, score)
self.score_map[ship] = (0, 0, 0)
def on_ship_death(self, ship):
if ship in self.score_map:
tup = self.score_map[ship]
self.score_map[ship] = (tup[0], tup[1]+1, tup[2])
def on_ship_kill(self, dead_ship, kill_ship):
if kill_ship in self.score_map:
tup = self.score_map[kill_ship]
self.score_map[kill_ship] = (tup[0]+1, tup[1], tup[2])
def increment_score(self, ship):
if ship in self.score_map:
tup = self.score_map[ship]
self.score_map[ship] = (tup[0], tup[1], tup[2]+1)
print str(ship) + "'s score is now " + str(self.score_map[ship])
| 29.966667
| 76
| 0.548387
| 133
| 899
| 3.503759
| 0.263158
| 0.23176
| 0.309013
| 0.206009
| 0.435622
| 0.375536
| 0.375536
| 0.375536
| 0.375536
| 0.304721
| 0
| 0.034146
| 0.315907
| 899
| 30
| 76
| 29.966667
| 0.723577
| 0.034483
| 0
| 0.222222
| 0
| 0
| 0.019488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.055556
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b969210541a1bf0ad0c156ed21f83acaaec0d2f0
| 90
|
py
|
Python
|
src/applications/payment/apps.py
|
luisito666/M2-API-REST
|
238837c2cbd0e9aadcce29def0dd9935b888047b
|
[
"MIT"
] | null | null | null |
src/applications/payment/apps.py
|
luisito666/M2-API-REST
|
238837c2cbd0e9aadcce29def0dd9935b888047b
|
[
"MIT"
] | 3
|
2021-04-08T19:14:52.000Z
|
2022-03-12T01:05:15.000Z
|
src/applications/payment/apps.py
|
luisito666/M2-API-REST
|
238837c2cbd0e9aadcce29def0dd9935b888047b
|
[
"MIT"
] | 1
|
2020-12-25T20:34:09.000Z
|
2020-12-25T20:34:09.000Z
|
from django.apps import AppConfig
class PaymentsConfig(AppConfig):
name = "payment"
| 15
| 33
| 0.755556
| 10
| 90
| 6.8
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 90
| 5
| 34
| 18
| 0.906667
| 0
| 0
| 0
| 0
| 0
| 0.077778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
b976e62eab69136c5eeefe37eaf317e0167e7a9a
| 54
|
py
|
Python
|
python/testData/formatter/sliceAlignment.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/formatter/sliceAlignment.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/formatter/sliceAlignment.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
xs1 = ys[42:
5:
-1]
xs2 = ys[:
2:
3]
xs3 = ys[::
3]
| 4.909091
| 12
| 0.407407
| 12
| 54
| 1.833333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.263158
| 0.296296
| 54
| 10
| 13
| 5.4
| 0.315789
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
b99f26afc94593832247aafee2b5e1c19669c669
| 212
|
py
|
Python
|
NoChannelBot/core/models/user.py
|
tolbiluha/NoChannelBot
|
b27cdc82facfc9a9b80ab4b3bc846f3bc592f5eb
|
[
"MIT"
] | 7
|
2021-12-10T20:41:37.000Z
|
2021-12-12T21:17:31.000Z
|
NoChannelBot/core/models/user.py
|
tolbiluha/NoChannelBot
|
b27cdc82facfc9a9b80ab4b3bc846f3bc592f5eb
|
[
"MIT"
] | null | null | null |
NoChannelBot/core/models/user.py
|
tolbiluha/NoChannelBot
|
b27cdc82facfc9a9b80ab4b3bc846f3bc592f5eb
|
[
"MIT"
] | 1
|
2022-01-26T07:18:31.000Z
|
2022-01-26T07:18:31.000Z
|
from typing import Optional
from pydantic import BaseModel
class UserModel(BaseModel):
id: int
first_name: str
last_name: Optional[str]
username: Optional[str]
language_code: Optional[str]
| 17.666667
| 32
| 0.731132
| 27
| 212
| 5.62963
| 0.62963
| 0.217105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.20283
| 212
| 11
| 33
| 19.272727
| 0.899408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
b9a852a942fe0b1aa0c6f8bc83902a5117badc7e
| 153
|
py
|
Python
|
app/objects/models.py
|
zvyap/gulag
|
82babc7f698bf42aeac523f4ea52a87a6c539fe7
|
[
"MIT"
] | 10
|
2022-02-07T16:11:39.000Z
|
2022-03-13T14:05:37.000Z
|
app/objects/models.py
|
Miku-Network/gulag
|
c6a3835ce138a8d27f7efd764e75466c881e105c
|
[
"MIT"
] | 26
|
2022-03-15T18:39:10.000Z
|
2022-03-31T06:57:06.000Z
|
app/objects/models.py
|
Miku-Network/gulag
|
c6a3835ce138a8d27f7efd764e75466c881e105c
|
[
"MIT"
] | 6
|
2022-03-20T18:52:31.000Z
|
2022-03-30T21:55:16.000Z
|
from __future__ import annotations
from pydantic import BaseModel
class OsuBeatmapRequestForm(BaseModel):
Filenames: list[str]
Ids: list[int]
| 17
| 39
| 0.777778
| 17
| 153
| 6.764706
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163399
| 153
| 8
| 40
| 19.125
| 0.898438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
b9b387c5e14bb651aeb5f4e3f21c907e6dfc6671
| 120
|
py
|
Python
|
app/__init__.py
|
lzy-106/reci.tech
|
a9cf426d9d114febd3e4a4339744a036b9ad1f1d
|
[
"BSD-3-Clause"
] | null | null | null |
app/__init__.py
|
lzy-106/reci.tech
|
a9cf426d9d114febd3e4a4339744a036b9ad1f1d
|
[
"BSD-3-Clause"
] | null | null | null |
app/__init__.py
|
lzy-106/reci.tech
|
a9cf426d9d114febd3e4a4339744a036b9ad1f1d
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import Flask
app = Flask(__name__)
app.secret_key = "super secret key"
from .controllers import controller
| 17.142857
| 35
| 0.783333
| 17
| 120
| 5.235294
| 0.588235
| 0.202247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 120
| 6
| 36
| 20
| 0.872549
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
b9da9bd88f03f9130472df90b01ecb1508f24ce7
| 144
|
py
|
Python
|
projects/golem_e2e/tests/project_suites/access_suite.py
|
kangchenwei/keyautotest2
|
f980d46cabfc128b2099af3d33968f236923063f
|
[
"MIT"
] | null | null | null |
projects/golem_e2e/tests/project_suites/access_suite.py
|
kangchenwei/keyautotest2
|
f980d46cabfc128b2099af3d33968f236923063f
|
[
"MIT"
] | null | null | null |
projects/golem_e2e/tests/project_suites/access_suite.py
|
kangchenwei/keyautotest2
|
f980d46cabfc128b2099af3d33968f236923063f
|
[
"MIT"
] | null | null | null |
description = 'Verify the user can access a suite by clicking on it in the suite list.'
apps = {}
def setup(self):
pass
def test(data):
| 14.4
| 87
| 0.680556
| 24
| 144
| 4.083333
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229167
| 144
| 9
| 88
| 16
| 0.882883
| 0
| 0
| 0
| 0
| 0
| 0.496504
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.2
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
6a3589f7ec5ec4c9acf6733265887ec24163d4d5
| 445
|
py
|
Python
|
src/probnum/filtsmooth/gaussfiltsmooth/__init__.py
|
feimeng93/probnum
|
4e46273c0157d26b9be2a7a415ccf69a3691ec22
|
[
"MIT"
] | 1
|
2021-04-14T14:17:12.000Z
|
2021-04-14T14:17:12.000Z
|
src/probnum/filtsmooth/gaussfiltsmooth/__init__.py
|
jzenn/probnum
|
cb9e5ec07384913049a312ac62cfec88970f1c8d
|
[
"MIT"
] | 16
|
2021-03-08T07:25:31.000Z
|
2022-03-28T21:05:53.000Z
|
src/probnum/filtsmooth/gaussfiltsmooth/__init__.py
|
jzenn/probnum
|
cb9e5ec07384913049a312ac62cfec88970f1c8d
|
[
"MIT"
] | 2
|
2022-01-23T14:24:08.000Z
|
2022-01-29T01:26:47.000Z
|
from .extendedkalman import ContinuousEKFComponent, DiscreteEKFComponent, EKFComponent
from .iterated_component import IteratedDiscreteComponent
from .kalman import Kalman
from .kalmanposterior import FilteringPosterior, KalmanPosterior, SmoothingPosterior
from .stoppingcriterion import StoppingCriterion
from .unscentedkalman import ContinuousUKFComponent, DiscreteUKFComponent, UKFComponent
from .unscentedtransform import UnscentedTransform
| 55.625
| 87
| 0.894382
| 35
| 445
| 11.342857
| 0.542857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076404
| 445
| 7
| 88
| 63.571429
| 0.965937
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
6a3e7bba10f4f73b9bfb2798a491be89207b8b2f
| 51
|
py
|
Python
|
source/lib/test.py
|
aapooya/new
|
fe33a706852a6196cc16e99efa439923522e33e2
|
[
"Python-2.0",
"OLDAP-2.7"
] | 24
|
2015-04-23T17:38:10.000Z
|
2022-02-12T08:49:46.000Z
|
source/lib/test.py
|
Zhaoyue-real/Mat2py
|
fe33a706852a6196cc16e99efa439923522e33e2
|
[
"Python-2.0",
"OLDAP-2.7"
] | 2
|
2015-07-20T19:45:11.000Z
|
2015-07-20T19:49:11.000Z
|
source/lib/test.py
|
Zhaoyue-real/Mat2py
|
fe33a706852a6196cc16e99efa439923522e33e2
|
[
"Python-2.0",
"OLDAP-2.7"
] | 9
|
2015-06-08T16:57:38.000Z
|
2022-03-19T16:52:30.000Z
|
def test(curNode):
print "## Hello, World! ##"
| 17
| 31
| 0.568627
| 6
| 51
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215686
| 51
| 2
| 32
| 25.5
| 0.725
| 0
| 0
| 0
| 0
| 0
| 0.372549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
6a407fdc204068e742a16ecda85974ce771d78d4
| 172
|
py
|
Python
|
4chanbot.py
|
nattycleopatra/py4chanbot
|
5ed9b06bde6641d7ff8204176e4b669768baaabd
|
[
"MIT"
] | 3
|
2016-07-22T02:55:53.000Z
|
2016-12-07T14:55:19.000Z
|
4chanbot.py
|
nattycleopatra/py4chanbot
|
5ed9b06bde6641d7ff8204176e4b669768baaabd
|
[
"MIT"
] | 1
|
2016-07-22T02:53:38.000Z
|
2016-07-27T04:04:50.000Z
|
4chanbot.py
|
nattycleopatra/py4chanbot
|
5ed9b06bde6641d7ff8204176e4b669768baaabd
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
from py4chanbot import ThreadBot
from configparser import ConfigParser
config = ConfigParser()
config.read('config.cfg')
ThreadBot(config).main()
| 19.111111
| 37
| 0.784884
| 21
| 172
| 6.428571
| 0.619048
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012987
| 0.104651
| 172
| 8
| 38
| 21.5
| 0.863636
| 0.127907
| 0
| 0
| 0
| 0
| 0.067114
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
6a465e5e0cab7b397740d0b6a32aedfc11090e12
| 2,308
|
py
|
Python
|
tests/test_nino.py
|
ukgovdatascience/scrubadub
|
9d742628345f3e8867ccbb9e9e0181540850bb88
|
[
"MIT"
] | 1
|
2018-11-19T15:37:46.000Z
|
2018-11-19T15:37:46.000Z
|
tests/test_nino.py
|
ukgovdatascience/scrubadub
|
9d742628345f3e8867ccbb9e9e0181540850bb88
|
[
"MIT"
] | 1
|
2018-01-26T17:39:54.000Z
|
2018-01-26T17:39:54.000Z
|
tests/test_nino.py
|
ukgovdatascience/scrubadub
|
9d742628345f3e8867ccbb9e9e0181540850bb88
|
[
"MIT"
] | 3
|
2019-08-29T11:53:42.000Z
|
2021-04-10T19:51:26.000Z
|
import unittest
from base import BaseTestCase
class NinoTestCase(unittest.TestCase, BaseTestCase):
"""
Test cases for National Insurance Number (NINO) removal. Also provides
test cases for when nino and name detectors clash.
"""
def test_nino(self):
"""
BEFORE: My nino is AB121314C.
AFTER: My nino is {{NINO}}.
"""
self.compare_before_after()
def test_lowercase_nino(self):
"""
BEFORE: My nino is ab121314c.
AFTER: My nino is {{NINO}}.
"""
self.compare_before_after()
def test_nino_with_spaces(self):
"""
Note strange behaviour here, despite regex including 'B' in second
character, it does not seem to be found by the regex, See next test
BEFORE: My nino is AC 121314 C.
AFTER: My nino is {{NINO}}.
"""
self.compare_before_after()
def test_lower_case_nino_with_spaces(self):
"""
Note strange behaviour here, despite regex including 'B' in second
character, it does not seem to be found by the regex, See next test
BEFORE: My nino is ac 121314 c.
AFTER: My nino is {{NINO}}.
"""
self.compare_before_after()
def test_nino_name_clash(self):
"""
Note strange behaviour here, despite regex including 'B' in second
character, it does not seem to be found by the regex, See next test
BEFORE: My nino is AB 121314 C.
AFTER: My nino is {{NINO+NAME}}.
"""
self.compare_before_after()
def test_disable_name(self):
"""
BEFORE: My nino is AB 123456 C
AFTER: My nino is {{NINO}}
"""
before, after = self.get_before_after()
import scrubadub
scrubber = scrubadub.Scrubber()
scrubber.remove_detector('name')
self.check_equal(after, scrubber.clean(before))
def test_nino_missing_last_character(self):
"""
BEFORE: My nino is AB121314.
AFTER: My nino is {{NINO}}.
"""
self.compare_before_after()
def test_lower_case_nino_missing_last_character(self):
"""
BEFORE: My nino is ab121314.
AFTER: My nino is {{NINO}}.
"""
self.compare_before_after()
| 28.85
| 75
| 0.597054
| 291
| 2,308
| 4.584192
| 0.257732
| 0.071964
| 0.095952
| 0.083958
| 0.722639
| 0.706147
| 0.670915
| 0.652924
| 0.652924
| 0.652924
| 0
| 0.030284
| 0.313258
| 2,308
| 79
| 76
| 29.21519
| 0.811356
| 0.437608
| 0
| 0.304348
| 0
| 0
| 0.004073
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.347826
| false
| 0
| 0.130435
| 0
| 0.521739
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
dbfad1295c1b98516a7744ee0f7b1da578062f17
| 211
|
py
|
Python
|
runners/abs.py
|
rudi/chat-app
|
ce80afdfea43cad366bb8b0ccd6341be9da22fa8
|
[
"BSD-2-Clause"
] | 3
|
2019-10-25T11:39:51.000Z
|
2020-04-28T18:14:55.000Z
|
runners/abs.py
|
rudi/chat-app
|
ce80afdfea43cad366bb8b0ccd6341be9da22fa8
|
[
"BSD-2-Clause"
] | 11
|
2020-04-10T10:09:45.000Z
|
2020-05-12T14:49:27.000Z
|
runners/abs.py
|
rudi/chat-app
|
ce80afdfea43cad366bb8b0ccd6341be9da22fa8
|
[
"BSD-2-Clause"
] | 5
|
2020-04-08T15:32:46.000Z
|
2020-05-08T19:14:25.000Z
|
from runners.output_parser import OutputParser
def setup(oBenchmarkRunner, cores, phys_cores, scenario, memory):
print("ABS!")
def gnuplot(cores, files, results):
OutputParser(files).parse(cores, results)
| 26.375
| 65
| 0.777251
| 26
| 211
| 6.230769
| 0.730769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109005
| 211
| 7
| 66
| 30.142857
| 0.861702
| 0
| 0
| 0
| 0
| 0
| 0.018957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0
| 0.6
| 0.2
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
e02d8d2f8386373bdcc48a09d334f2ae706a1c25
| 222
|
py
|
Python
|
Python Basic Foundation Programs/listoperations2.py
|
gohulnathv3/Python
|
96da0b35c5a4067ed31288f3710735405f058f5d
|
[
"MIT"
] | null | null | null |
Python Basic Foundation Programs/listoperations2.py
|
gohulnathv3/Python
|
96da0b35c5a4067ed31288f3710735405f058f5d
|
[
"MIT"
] | null | null | null |
Python Basic Foundation Programs/listoperations2.py
|
gohulnathv3/Python
|
96da0b35c5a4067ed31288f3710735405f058f5d
|
[
"MIT"
] | null | null | null |
p = [1,2,3,4,5,6,7,8,9]
del p[1:3]
print(p[:])
p.remove(8)
print(p[:])
print(p.pop())
p.clear()
print(p[:])
l=[1,3,4,5,6,7]
l.remove(3)
print(l[:])
l.sort()
print(l[:])
l.reverse()
print(l[:])
l.clear()
print(l[:])
| 8.88
| 23
| 0.518018
| 52
| 222
| 2.211538
| 0.346154
| 0.208696
| 0.182609
| 0.069565
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097436
| 0.121622
| 222
| 24
| 24
| 9.25
| 0.492308
| 0
| 0
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.470588
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
e04087cc9cf4d8ce2cf17245ee344942520ed32d
| 91
|
py
|
Python
|
backend/placeapp/apps.py
|
Lenend-KPU/LBS-Platform
|
75ba24db8969248e74e9d974638977de1c0bc36a
|
[
"MIT"
] | 15
|
2020-12-23T13:56:49.000Z
|
2021-12-10T11:04:23.000Z
|
backend/placeapp/apps.py
|
Lenend-KPU/LBS-Platform
|
75ba24db8969248e74e9d974638977de1c0bc36a
|
[
"MIT"
] | 41
|
2021-03-19T07:51:48.000Z
|
2021-11-22T09:45:46.000Z
|
backend/placeapp/apps.py
|
Lenend-KPU/LBS-Platform
|
75ba24db8969248e74e9d974638977de1c0bc36a
|
[
"MIT"
] | 3
|
2021-03-24T15:18:24.000Z
|
2021-09-11T14:51:35.000Z
|
from django.apps import AppConfig
class PlaceappConfig(AppConfig):
name = 'placeapp'
| 15.166667
| 33
| 0.758242
| 10
| 91
| 6.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164835
| 91
| 5
| 34
| 18.2
| 0.907895
| 0
| 0
| 0
| 0
| 0
| 0.087912
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
e05669c42c27bdf4523636ea6d4321cb07d3fc4e
| 261
|
py
|
Python
|
nexoclom/solarsystem/__init__.py
|
mburger-stsci/NExoCloM
|
c0c81eeb04c5571662f3d86337d84a18f1cd0dcf
|
[
"BSD-3-Clause"
] | null | null | null |
nexoclom/solarsystem/__init__.py
|
mburger-stsci/NExoCloM
|
c0c81eeb04c5571662f3d86337d84a18f1cd0dcf
|
[
"BSD-3-Clause"
] | null | null | null |
nexoclom/solarsystem/__init__.py
|
mburger-stsci/NExoCloM
|
c0c81eeb04c5571662f3d86337d84a18f1cd0dcf
|
[
"BSD-3-Clause"
] | 1
|
2018-11-23T20:55:33.000Z
|
2018-11-23T20:55:33.000Z
|
from nexoclom.solarsystem.SSObject import SSObject
from nexoclom.solarsystem.planet_dist import planet_dist
from nexoclom.solarsystem.planet_geometry import planet_geometry
__name__ = 'solarsystem'
__author__ = 'Matthew Burger'
__email__ = 'mburger@stsci.edu'
| 32.625
| 64
| 0.846743
| 31
| 261
| 6.612903
| 0.516129
| 0.17561
| 0.336585
| 0.282927
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088123
| 261
| 7
| 65
| 37.285714
| 0.861345
| 0
| 0
| 0
| 0
| 0
| 0.16092
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
e05d4b6da00c48e3456be80d1354bef3db028344
| 392
|
py
|
Python
|
smarty/__init__.py
|
openforcefield/smarty
|
882d54b6d6d0fada748c71789964b07be2210a6a
|
[
"MIT"
] | 10
|
2018-03-29T15:31:50.000Z
|
2022-02-17T14:04:37.000Z
|
smarty/__init__.py
|
openforcefield/smarty
|
882d54b6d6d0fada748c71789964b07be2210a6a
|
[
"MIT"
] | 14
|
2017-11-22T21:27:25.000Z
|
2019-01-24T04:50:42.000Z
|
smarty/__init__.py
|
openforcefield/smarty
|
882d54b6d6d0fada748c71789964b07be2210a6a
|
[
"MIT"
] | 2
|
2019-03-05T22:52:26.000Z
|
2022-02-17T14:05:06.000Z
|
try:
import openeye
# These can only be imported if openeye tools are available
from smarty.atomtyper import *
from smarty.sampler import *
from smarty.utils import *
from smarty.sampler_smirky import *
except Exception as e:
print(e)
print('Warning: Cannot import openeye toolkit; not all functionality will be available.')
from smarty.score_utils import *
| 28
| 93
| 0.729592
| 53
| 392
| 5.358491
| 0.584906
| 0.176056
| 0.169014
| 0.161972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 392
| 13
| 94
| 30.153846
| 0.922078
| 0.145408
| 0
| 0
| 0
| 0
| 0.24024
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.7
| 0
| 0.7
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
0ebab5b6ba33572e99dc11309f3cbc445ef8a28f
| 380
|
py
|
Python
|
ex08.py
|
Rajab322/lpthw
|
bde26ca21bd1c72807c93fff15a45a1154ba59d7
|
[
"MIT"
] | 329
|
2017-02-25T15:06:58.000Z
|
2022-03-31T18:22:21.000Z
|
ex8.py
|
dkorzhevin/learn-python3-thw-code
|
bea1e954d52ed845c3ade7ed87d7bef7de1651ad
|
[
"MIT"
] | 10
|
2017-02-26T13:55:38.000Z
|
2020-02-20T06:10:26.000Z
|
ex8.py
|
dkorzhevin/learn-python3-thw-code
|
bea1e954d52ed845c3ade7ed87d7bef7de1651ad
|
[
"MIT"
] | 180
|
2017-02-25T20:42:03.000Z
|
2022-02-09T05:21:40.000Z
|
formatter = "{} {} {} {}"
print(formatter.format(1, 2, 3, 4))
print(formatter.format("one", "two", "three", "four"))
print(formatter.format(True, False, False, True))
print(formatter.format(formatter, formatter, formatter, formatter))
print(formatter.format(
"I had this thing.",
"That you could type up right.",
"But it didn't sing.",
"So I said goodnight."
))
| 27.142857
| 67
| 0.652632
| 51
| 380
| 4.862745
| 0.607843
| 0.282258
| 0.403226
| 0.233871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0125
| 0.157895
| 380
| 13
| 68
| 29.230769
| 0.7625
| 0
| 0
| 0
| 0
| 0
| 0.292876
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.454545
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
0ed9e460df06db0d9eb0489f4da0eb68ba0c4502
| 94
|
py
|
Python
|
Quiz.py
|
LordEldak/Vampy-2017-CS
|
0754f166c2825c4f9afebb9ddad11ad2bd176133
|
[
"MIT"
] | null | null | null |
Quiz.py
|
LordEldak/Vampy-2017-CS
|
0754f166c2825c4f9afebb9ddad11ad2bd176133
|
[
"MIT"
] | null | null | null |
Quiz.py
|
LordEldak/Vampy-2017-CS
|
0754f166c2825c4f9afebb9ddad11ad2bd176133
|
[
"MIT"
] | null | null | null |
answer = input("What food do you prefer? A: Pizza B: Chicken C: Hamburgers")
if answer == "A"
| 31.333333
| 76
| 0.680851
| 16
| 94
| 4
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180851
| 94
| 2
| 77
| 47
| 0.831169
| 0
| 0
| 0
| 0
| 0
| 0.62766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
0ee2773c5b2de9a4ee3e33798b45e1c39570fc9b
| 251
|
py
|
Python
|
active_learning/query_strats/regression/__init__.py
|
WardLT/active-learning
|
289f2ef28f6376d697d435014ad03a4e1c0a8ae5
|
[
"Apache-2.0"
] | null | null | null |
active_learning/query_strats/regression/__init__.py
|
WardLT/active-learning
|
289f2ef28f6376d697d435014ad03a4e1c0a8ae5
|
[
"Apache-2.0"
] | null | null | null |
active_learning/query_strats/regression/__init__.py
|
WardLT/active-learning
|
289f2ef28f6376d697d435014ad03a4e1c0a8ae5
|
[
"Apache-2.0"
] | 1
|
2019-04-29T15:33:52.000Z
|
2019-04-29T15:33:52.000Z
|
"""Query strategies specific to regression problems"""
from .greedy import GreedySelection
from .mcal_regression import MCALSelection
from .uncertainty import UncertaintySampling
__all__ = ['GreedySelection', 'MCALSelection', 'UncertaintySampling']
| 31.375
| 69
| 0.820717
| 23
| 251
| 8.73913
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099602
| 251
| 7
| 70
| 35.857143
| 0.889381
| 0.191235
| 0
| 0
| 0
| 0
| 0.238579
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
0ef20565be9370febdc2a6f0617af9c1ebd6525f
| 194
|
py
|
Python
|
__init__.py
|
tjpell/ezencoder
|
b30433cf1f96204acd6c844312bc6e61fe5f3165
|
[
"MIT"
] | 2
|
2018-04-07T04:09:45.000Z
|
2018-07-30T20:30:53.000Z
|
__init__.py
|
tjpell/ezencoder
|
b30433cf1f96204acd6c844312bc6e61fe5f3165
|
[
"MIT"
] | null | null | null |
__init__.py
|
tjpell/ezencoder
|
b30433cf1f96204acd6c844312bc6e61fe5f3165
|
[
"MIT"
] | null | null | null |
from ezencoder.target import TargetEncoder
from ezencoder.unknown import UnknownEncoder
from ezencoder.cyclic import CyclicEncoder
__all__ = (target.__all__ + unknown.__all__ + cyclic.__all__)
| 32.333333
| 61
| 0.840206
| 22
| 194
| 6.681818
| 0.454545
| 0.265306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103093
| 194
| 6
| 61
| 32.333333
| 0.844828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
0efaf6e60b7c666e84bbef04126d98e91cf1ec00
| 9,394
|
py
|
Python
|
mkt/reviewers/tests/test_tasks.py
|
acidburn0zzz/zamboni
|
780fbeb99e240a569a72a1c15410f49b76b3807c
|
[
"BSD-3-Clause"
] | 1
|
2017-07-14T19:22:39.000Z
|
2017-07-14T19:22:39.000Z
|
mkt/reviewers/tests/test_tasks.py
|
Acidburn0zzz/zamboni
|
780fbeb99e240a569a72a1c15410f49b76b3807c
|
[
"BSD-3-Clause"
] | 6
|
2021-02-02T23:08:48.000Z
|
2021-09-08T02:47:17.000Z
|
mkt/reviewers/tests/test_tasks.py
|
Acidburn0zzz/zamboni
|
780fbeb99e240a569a72a1c15410f49b76b3807c
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from django.conf import settings
import mock
from nose.tools import eq_
import amo
from abuse.models import AbuseReport
from amo.tasks import find_abuse_escalations, find_refund_escalations
from amo.tests import app_factory
from devhub.models import AppLog
from editors.models import EscalationQueue
from market.models import AddonPurchase, Refund
from stats.models import Contribution
from users.models import UserProfile
class TestAbuseEscalationTask(amo.tests.TestCase):
fixtures = ['base/users']
def setUp(self):
self.app = app_factory(name='XXX')
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
patcher = mock.patch.object(settings, 'TASK_USER_ID', 4043307)
patcher.start()
self.addCleanup(patcher.stop)
def test_no_abuses_no_history(self):
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
def test_abuse_no_history(self):
for x in range(2):
AbuseReport.objects.create(addon=self.app)
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
def test_abuse_already_escalated(self):
for x in range(2):
AbuseReport.objects.create(addon=self.app)
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
def test_abuse_cleared_not_escalated(self):
for x in range(2):
ar = AbuseReport.objects.create(addon=self.app)
ar.created = datetime.datetime.now() - datetime.timedelta(days=1)
ar.save()
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
# Simulate a reviewer clearing an escalation... remove app from queue,
# and write a log.
EscalationQueue.objects.filter(addon=self.app).delete()
amo.log(amo.LOG.ESCALATION_CLEARED, self.app, self.app.current_version,
details={'comments': 'All clear'})
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
# Task will find it again but not add it again.
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
def test_older_abuses_cleared_then_new(self):
for x in range(2):
ar = AbuseReport.objects.create(addon=self.app)
ar.created = datetime.datetime.now() - datetime.timedelta(days=1)
ar.save()
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
# Simulate a reviewer clearing an escalation... remove app from queue,
# and write a log.
EscalationQueue.objects.filter(addon=self.app).delete()
amo.log(amo.LOG.ESCALATION_CLEARED, self.app, self.app.current_version,
details={'comments': 'All clear'})
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
# Task will find it again but not add it again.
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
# New abuse reports that come in should re-add to queue.
for x in range(2):
AbuseReport.objects.create(addon=self.app)
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
def test_already_escalated_for_other_still_logs(self):
# Add app to queue for high refunds.
EscalationQueue.objects.create(addon=self.app)
amo.log(amo.LOG.ESCALATED_HIGH_REFUNDS, self.app,
self.app.current_version, details={'comments': 'hi refunds'})
# Set up abuses.
for x in range(2):
AbuseReport.objects.create(addon=self.app)
find_abuse_escalations(self.app.id)
# Verify it logged the high abuse reports.
action = amo.LOG.ESCALATED_HIGH_ABUSE
assert AppLog.objects.filter(
addon=self.app, activity_log__action=action.id).exists(), (
u'Expected high abuse to be logged')
class TestRefundsEscalationTask(amo.tests.TestCase):
fixtures = ['base/users']
def setUp(self):
self.app = app_factory(name='XXX')
self.user1, self.user2, self.user3 = UserProfile.objects.all()[:3]
patcher = mock.patch.object(settings, 'TASK_USER_ID', 4043307)
patcher.start()
self.addCleanup(patcher.stop)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
def _purchase(self, user=None, created=None):
ap1 = AddonPurchase.objects.create(user=user or self.user1,
addon=self.app)
if created:
ap1.update(created=created)
def _refund(self, user=None, created=None):
contribution = Contribution.objects.create(addon=self.app,
user=user or self.user1)
ref = Refund.objects.create(contribution=contribution,
user=user or self.user1)
if created:
ref.update(created=created)
# Needed because these tests can run in the same second and the
# refund detection task depends on timestamp logic for when to
# escalate.
applog = AppLog.objects.all().order_by('-created', '-id')[0]
applog.update(created=created)
def test_multiple_refunds_same_user(self):
self._purchase(self.user1)
self._refund(self.user1)
self._refund(self.user1)
eq_(Refund.recent_refund_ratio(
self.app.id, datetime.datetime.now() - datetime.timedelta(days=1)),
1.0)
def test_no_refunds(self):
find_refund_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
def test_refunds(self):
self._purchase(self.user1)
self._purchase(self.user2)
self._refund(self.user1)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
def test_refunds_already_escalated(self):
self._purchase(self.user1)
self._purchase(self.user2)
self._refund(self.user1)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
# Task was run on Refund.post_save, re-run task to make sure we don't
# escalate again.
find_refund_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
def test_refunds_cleared_not_escalated(self):
stamp = datetime.datetime.now() - datetime.timedelta(days=2)
self._purchase(self.user1, stamp)
self._purchase(self.user2, stamp)
self._refund(self.user1, stamp)
# Simulate a reviewer clearing an escalation...
# remove app from queue and write a log.
EscalationQueue.objects.filter(addon=self.app).delete()
amo.log(amo.LOG.ESCALATION_CLEARED, self.app, self.app.current_version,
details={'comments': 'All clear'})
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
# Task will find it again but not add it again.
find_refund_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
def test_older_refund_escalations_then_new(self):
stamp = datetime.datetime.now() - datetime.timedelta(days=2)
self._purchase(self.user1, stamp)
self._purchase(self.user2, stamp)
# Triggers 33% for refund / purchase ratio.
self._refund(self.user1, stamp)
# Simulate a reviewer clearing an escalation...
# remove app from queue and write a log.
EscalationQueue.objects.filter(addon=self.app).delete()
amo.log(amo.LOG.ESCALATION_CLEARED, self.app, self.app.current_version,
details={'comments': 'All ok'})
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
# Task will find it again but not add it again.
find_refund_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
# Issue another refund, which should trigger another escalation.
self._purchase(self.user3)
self._refund(self.user3)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
def test_already_escalated_for_other_still_logs(self):
# Add app to queue for abuse reports.
EscalationQueue.objects.create(addon=self.app)
amo.log(amo.LOG.ESCALATED_HIGH_ABUSE, self.app,
self.app.current_version, details={'comments': 'abuse'})
# Set up purchases.
stamp = datetime.datetime.now() - datetime.timedelta(days=2)
self._purchase(self.user1, stamp)
self._purchase(self.user2, stamp)
# Triggers 33% for refund / purchase ratio.
self._refund(self.user1, stamp)
# Verify it logged the high refunds.
action = amo.LOG.ESCALATED_HIGH_REFUNDS
assert AppLog.objects.filter(
addon=self.app, activity_log__action=action.id).exists(), (
u'Expected high refunds to be logged')
| 41.38326
| 79
| 0.65776
| 1,192
| 9,394
| 5.04698
| 0.145973
| 0.077959
| 0.075798
| 0.102394
| 0.769781
| 0.72889
| 0.718418
| 0.70861
| 0.694315
| 0.686336
| 0
| 0.011657
| 0.232915
| 9,394
| 226
| 80
| 41.566372
| 0.823203
| 0.120077
| 0
| 0.679245
| 0
| 0
| 0.02707
| 0
| 0
| 0
| 0
| 0
| 0.012579
| 1
| 0.106918
| false
| 0
| 0.081761
| 0
| 0.213836
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
16097f3ea9cc9e964c39493c4541089fd4cedba5
| 178
|
py
|
Python
|
bnn_mcmc_examples/datasets/pima/data1/constants.py
|
papamarkou/bnn_mcmc_examples
|
7bb4ecfb33db4c30a8e61e31f528bda0efb24e3d
|
[
"MIT"
] | 1
|
2021-09-09T15:55:37.000Z
|
2021-09-09T15:55:37.000Z
|
bnn_mcmc_examples/datasets/pima/data1/constants.py
|
kushagragpt99/bnn_mcmc_examples
|
297cdb1e74335860989bebdb4ff6f6322b6adc06
|
[
"MIT"
] | null | null | null |
bnn_mcmc_examples/datasets/pima/data1/constants.py
|
kushagragpt99/bnn_mcmc_examples
|
297cdb1e74335860989bebdb4ff6f6322b6adc06
|
[
"MIT"
] | 1
|
2021-10-05T06:38:57.000Z
|
2021-10-05T06:38:57.000Z
|
# %% Import packages
from bnn_mcmc_examples.datasets import data_paths
# %% Define constants
data_base_name = 'data1'
data_path = data_paths['pima'].joinpath(data_base_name)
| 17.8
| 55
| 0.775281
| 25
| 178
| 5.16
| 0.68
| 0.139535
| 0.186047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00641
| 0.123596
| 178
| 9
| 56
| 19.777778
| 0.820513
| 0.213483
| 0
| 0
| 0
| 0
| 0.065693
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
161934b58f7c1472bf59a0ac5c87867facc9a02e
| 304
|
py
|
Python
|
testing/example_scripts/fixtures/custom_item/conftest.py
|
JarnoRFB/pytest
|
44cd8a3a86354b2b686d0b64f2ac328aca574bc7
|
[
"MIT"
] | 2,479
|
2018-05-28T14:51:29.000Z
|
2022-03-30T14:41:18.000Z
|
testing/example_scripts/fixtures/custom_item/conftest.py
|
JarnoRFB/pytest
|
44cd8a3a86354b2b686d0b64f2ac328aca574bc7
|
[
"MIT"
] | 7,642
|
2018-05-28T09:38:03.000Z
|
2022-03-31T20:55:48.000Z
|
testing/example_scripts/fixtures/custom_item/conftest.py
|
JarnoRFB/pytest
|
44cd8a3a86354b2b686d0b64f2ac328aca574bc7
|
[
"MIT"
] | 1,303
|
2018-05-29T14:50:02.000Z
|
2022-03-30T17:30:42.000Z
|
import pytest
class CustomItem(pytest.Item):
def runtest(self):
pass
class CustomFile(pytest.File):
def collect(self):
yield CustomItem.from_parent(name="foo", parent=self)
def pytest_collect_file(path, parent):
return CustomFile.from_parent(fspath=path, parent=parent)
| 19
| 61
| 0.717105
| 39
| 304
| 5.487179
| 0.512821
| 0.093458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177632
| 304
| 15
| 62
| 20.266667
| 0.856
| 0
| 0
| 0
| 0
| 0
| 0.009868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.111111
| 0.111111
| 0.111111
| 0.777778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
162f5f00bdafff9bedea86a762cc350c5f2a7389
| 143
|
py
|
Python
|
Practice/Python/SetAdd().py
|
avantikasharma/HackerRank-Solutions
|
a980859ac352688853fcbcf3c7ec6d95685f99ea
|
[
"MIT"
] | 1
|
2018-07-08T15:44:15.000Z
|
2018-07-08T15:44:15.000Z
|
Practice/Python/SetAdd().py
|
avantikasharma/HackerRank-Solutions
|
a980859ac352688853fcbcf3c7ec6d95685f99ea
|
[
"MIT"
] | null | null | null |
Practice/Python/SetAdd().py
|
avantikasharma/HackerRank-Solutions
|
a980859ac352688853fcbcf3c7ec6d95685f99ea
|
[
"MIT"
] | 2
|
2018-08-10T06:49:34.000Z
|
2020-10-01T04:50:59.000Z
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
n=input()
s=set()
for i in range(n):
s.add(raw_input())
print len(s)
| 20.428571
| 69
| 0.685315
| 28
| 143
| 3.464286
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 143
| 6
| 70
| 23.833333
| 0.82906
| 0.468531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| null | null | 0
| 0
| null | null | 0.2
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.