hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
90b83c141f19001ea74be5f77aed29f7c055ef62
| 3,686
|
py
|
Python
|
z2/part2/batch/jm/parser_errors_2/269298945.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 1
|
2020-04-16T12:13:47.000Z
|
2020-04-16T12:13:47.000Z
|
z2/part2/batch/jm/parser_errors_2/269298945.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:50:15.000Z
|
2020-05-19T14:58:30.000Z
|
z2/part2/batch/jm/parser_errors_2/269298945.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:45:13.000Z
|
2020-06-09T19:18:31.000Z
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 269298945
"""
"""
random actions, total chaos
"""
board = gamma_new(5, 4, 2, 6)
assert board is not None
assert gamma_move(board, 1, 1, 3) == 1
assert gamma_free_fields(board, 1) == 19
assert gamma_move(board, 2, 2, 2) == 1
assert gamma_move(board, 2, 0, 1) == 1
board638769248 = gamma_board(board)
assert board638769248 is not None
assert board638769248 == (".1...\n"
"..2..\n"
"2....\n"
".....\n")
del board638769248
board638769248 = None
assert gamma_move(board, 1, 4, 3) == 1
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_busy_fields(board, 1) == 2
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 1, 0, 3) == 1
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_busy_fields(board, 1) == 3
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 2, 4, 1) == 1
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 2, 2, 3) == 1
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_golden_move(board, 1, 2, 2) == 1
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 2, 4, 2) == 1
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 3, 3) == 1
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_free_fields(board, 1) == 7
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_busy_fields(board, 2) == 5
assert gamma_move(board, 1, 4, 0) == 1
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_free_fields(board, 2) == 6
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 0, 0) == 0
board671028753 = gamma_board(board)
assert board671028753 is not None
assert board671028753 == ("11221\n"
"111.2\n"
"2...2\n"
"1.1.1\n")
del board671028753
board671028753 = None
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 1, 3, 1) == 1
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 1, 3, 3) == 0
assert gamma_golden_move(board, 1, 1, 4) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_move(board, 1, 3, 0) == 1
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_busy_fields(board, 1) == 12
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_golden_move(board, 2, 3, 0) == 1
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_busy_fields(board, 1) == 12
assert gamma_move(board, 2, 0, 1) == 0
gamma_delete(board)
| 30.97479
| 46
| 0.657895
| 672
| 3,686
| 3.450893
| 0.061012
| 0.34627
| 0.375162
| 0.500216
| 0.790427
| 0.789133
| 0.668392
| 0.584304
| 0.499353
| 0.488141
| 0
| 0.130319
| 0.183939
| 3,686
| 118
| 47
| 31.237288
| 0.640625
| 0
| 0
| 0.333333
| 0
| 0
| 0.015564
| 0
| 0
| 0
| 0
| 0
| 0.764706
| 1
| 0
| false
| 0
| 0.009804
| 0
| 0.009804
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
90fe58762a9853e240c6462b0eb4e0dd09b32db4
| 164
|
py
|
Python
|
src/kong/exceptions.py
|
SpazioDati/python-kong
|
fbfc0a357a5ea0471c2dc138bcbe521349ded965
|
[
"BSD-2-Clause"
] | null | null | null |
src/kong/exceptions.py
|
SpazioDati/python-kong
|
fbfc0a357a5ea0471c2dc138bcbe521349ded965
|
[
"BSD-2-Clause"
] | null | null | null |
src/kong/exceptions.py
|
SpazioDati/python-kong
|
fbfc0a357a5ea0471c2dc138bcbe521349ded965
|
[
"BSD-2-Clause"
] | 1
|
2020-01-17T06:55:00.000Z
|
2020-01-17T06:55:00.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
class ConflictError(Exception):
pass
class ServerError(Exception):
pass
| 14.909091
| 55
| 0.731707
| 18
| 164
| 6.333333
| 0.833333
| 0.22807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007353
| 0.170732
| 164
| 10
| 56
| 16.4
| 0.830882
| 0.128049
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.4
| 0.2
| 0
| 0.6
| 0.2
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
29027cc0a2f5770fd0fd19fa0c0cf2a211c23df3
| 870
|
py
|
Python
|
storm_control/test/__init__.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 47
|
2015-02-11T16:05:54.000Z
|
2022-03-26T14:13:12.000Z
|
storm_control/test/__init__.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 110
|
2015-01-30T03:53:41.000Z
|
2021-11-03T15:58:44.000Z
|
storm_control/test/__init__.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 61
|
2015-01-09T18:31:27.000Z
|
2021-12-21T13:07:51.000Z
|
#!/usr/bin/env python
import os
def dataDirectory():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/")
def daveXmlFilePathAndName(filename):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "dave_xml", filename)
def halXmlFilePathAndName(filename):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "hal", filename)
def kilroyXmlFilePathAndName(filename):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "kilroy_xml", filename)
def logDirectory():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs/")
def steveXmlFilePathAndName(filename):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "steve_xml", filename)
def xmlFilePathAndName(filename):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "xml", filename)
| 34.8
| 91
| 0.744828
| 117
| 870
| 5.273504
| 0.239316
| 0.204214
| 0.136143
| 0.181524
| 0.58671
| 0.58671
| 0.58671
| 0.58671
| 0.58671
| 0.58671
| 0
| 0
| 0.096552
| 870
| 24
| 92
| 36.25
| 0.784987
| 0.022989
| 0
| 0
| 0
| 0
| 0.050648
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.466667
| false
| 0
| 0.066667
| 0.466667
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
291268307d9f0ddc0f4c38b786447465cafe808b
| 305
|
py
|
Python
|
orttraining/orttraining/python/training/optim/__init__.py
|
lchang20/onnxruntime
|
97b8f6f394ae02c73ed775f456fd85639c91ced1
|
[
"MIT"
] | 1
|
2022-03-16T11:30:16.000Z
|
2022-03-16T11:30:16.000Z
|
orttraining/orttraining/python/training/optim/__init__.py
|
lchang20/onnxruntime
|
97b8f6f394ae02c73ed775f456fd85639c91ced1
|
[
"MIT"
] | 10
|
2022-02-15T21:52:24.000Z
|
2022-02-23T21:26:01.000Z
|
orttraining/orttraining/python/training/optim/__init__.py
|
lchang20/onnxruntime
|
97b8f6f394ae02c73ed775f456fd85639c91ced1
|
[
"MIT"
] | null | null | null |
from .config import _OptimizerConfig, AdamConfig, LambConfig, SGDConfig
from .lr_scheduler import _LRScheduler, ConstantWarmupLRScheduler, CosineWarmupLRScheduler,\
LinearWarmupLRScheduler, PolyWarmupLRScheduler
from .fused_adam import FusedAdam, AdamWMode
from .fp16_optimizer import FP16_Optimizer
| 43.571429
| 92
| 0.862295
| 28
| 305
| 9.178571
| 0.714286
| 0.101167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.095082
| 305
| 6
| 93
| 50.833333
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
29149f486db47f946e722da6d0f9834f2c634b09
| 189
|
py
|
Python
|
Services/FacesLocator.py
|
miguel-kjh/ReID-in-Sport-competitions
|
c7abd47e6ce659aa73e76f797215941a11416981
|
[
"MIT"
] | 5
|
2021-06-30T07:41:42.000Z
|
2022-01-20T17:57:36.000Z
|
Services/FacesLocator.py
|
miguel-kjh/ReID-in-Sport-competitions
|
c7abd47e6ce659aa73e76f797215941a11416981
|
[
"MIT"
] | null | null | null |
Services/FacesLocator.py
|
miguel-kjh/ReID-in-Sport-competitions
|
c7abd47e6ce659aa73e76f797215941a11416981
|
[
"MIT"
] | null | null | null |
from abc import abstractmethod
from Domain.FacesCollection import FacesCollection
class FacesLocator:
@abstractmethod
def locate(self, file: str) -> FacesCollection:
pass
| 21
| 51
| 0.756614
| 19
| 189
| 7.526316
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 189
| 9
| 52
| 21
| 0.934641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.166667
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
2940752032dedd900b99f13119d37d7b3a695e6a
| 16
|
py
|
Python
|
examples/int/ex1.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
examples/int/ex1.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
examples/int/ex1.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
print(int(5.5))
| 8
| 15
| 0.625
| 4
| 16
| 2.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 0.0625
| 16
| 1
| 16
| 16
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
297ad37f13b67c5b718867ebf1a9ad4b797a5179
| 3,734
|
py
|
Python
|
tests/phase0/block_processing/test_process_deposit.py
|
hwwhww/eth2.0-specs
|
729757d4279db4535b176361d67d1567c0df314b
|
[
"CC0-1.0"
] | 3
|
2020-07-22T14:51:07.000Z
|
2022-01-02T12:02:45.000Z
|
tests/phase0/block_processing/test_process_deposit.py
|
hwwhww/eth2.0-specs
|
729757d4279db4535b176361d67d1567c0df314b
|
[
"CC0-1.0"
] | null | null | null |
tests/phase0/block_processing/test_process_deposit.py
|
hwwhww/eth2.0-specs
|
729757d4279db4535b176361d67d1567c0df314b
|
[
"CC0-1.0"
] | null | null | null |
from copy import deepcopy
import pytest
import build.phase0.spec as spec
from build.phase0.spec import (
Deposit,
process_deposit,
)
from tests.phase0.helpers import (
build_deposit,
)
# mark entire file as 'voluntary_exits'
pytestmark = pytest.mark.voluntary_exits
def test_success(state, deposit_data_leaves, pubkeys, privkeys):
pre_state = deepcopy(state)
index = len(deposit_data_leaves)
pubkey = pubkeys[index]
privkey = privkeys[index]
deposit, root, deposit_data_leaves = build_deposit(
pre_state,
deposit_data_leaves,
pubkey,
privkey,
spec.MAX_DEPOSIT_AMOUNT,
)
pre_state.latest_eth1_data.deposit_root = root
pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
post_state = deepcopy(pre_state)
process_deposit(post_state, deposit)
assert len(post_state.validator_registry) == len(state.validator_registry) + 1
assert len(post_state.validator_balances) == len(state.validator_balances) + 1
assert post_state.validator_registry[index].pubkey == pubkeys[index]
assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count
return pre_state, deposit, post_state
def test_success_top_up(state, deposit_data_leaves, pubkeys, privkeys):
pre_state = deepcopy(state)
validator_index = 0
amount = spec.MAX_DEPOSIT_AMOUNT // 4
pubkey = pubkeys[validator_index]
privkey = privkeys[validator_index]
deposit, root, deposit_data_leaves = build_deposit(
pre_state,
deposit_data_leaves,
pubkey,
privkey,
amount,
)
pre_state.latest_eth1_data.deposit_root = root
pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
pre_balance = pre_state.validator_balances[validator_index]
post_state = deepcopy(pre_state)
process_deposit(post_state, deposit)
assert len(post_state.validator_registry) == len(state.validator_registry)
assert len(post_state.validator_balances) == len(state.validator_balances)
assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count
assert post_state.validator_balances[validator_index] == pre_balance + amount
return pre_state, deposit, post_state
def test_wrong_index(state, deposit_data_leaves, pubkeys, privkeys):
pre_state = deepcopy(state)
index = len(deposit_data_leaves)
pubkey = pubkeys[index]
privkey = privkeys[index]
deposit, root, deposit_data_leaves = build_deposit(
pre_state,
deposit_data_leaves,
pubkey,
privkey,
spec.MAX_DEPOSIT_AMOUNT,
)
# mess up deposit_index
deposit.index = pre_state.deposit_index + 1
pre_state.latest_eth1_data.deposit_root = root
pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
post_state = deepcopy(pre_state)
with pytest.raises(AssertionError):
process_deposit(post_state, deposit)
return pre_state, deposit, None
def test_bad_merkle_proof(state, deposit_data_leaves, pubkeys, privkeys):
pre_state = deepcopy(state)
index = len(deposit_data_leaves)
pubkey = pubkeys[index]
privkey = privkeys[index]
deposit, root, deposit_data_leaves = build_deposit(
pre_state,
deposit_data_leaves,
pubkey,
privkey,
spec.MAX_DEPOSIT_AMOUNT,
)
# mess up merkle branch
deposit.proof[-1] = spec.ZERO_HASH
pre_state.latest_eth1_data.deposit_root = root
pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
post_state = deepcopy(pre_state)
with pytest.raises(AssertionError):
process_deposit(post_state, deposit)
return pre_state, deposit, None
| 28.075188
| 82
| 0.72978
| 474
| 3,734
| 5.398734
| 0.130802
| 0.081282
| 0.126221
| 0.074248
| 0.789371
| 0.761235
| 0.761235
| 0.761235
| 0.732317
| 0.732317
| 0
| 0.006319
| 0.194697
| 3,734
| 132
| 83
| 28.287879
| 0.844696
| 0.021693
| 0
| 0.645161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107527
| 1
| 0.043011
| false
| 0
| 0.053763
| 0
| 0.139785
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
297ad51178c65611a1bb2760695fd6131e60c7c4
| 223
|
py
|
Python
|
payment/admin.py
|
satsuma-ken/PurchaseManagement
|
c6eda5c673520fc98b23da137cd26c1cc384809c
|
[
"BSD-3-Clause"
] | null | null | null |
payment/admin.py
|
satsuma-ken/PurchaseManagement
|
c6eda5c673520fc98b23da137cd26c1cc384809c
|
[
"BSD-3-Clause"
] | null | null | null |
payment/admin.py
|
satsuma-ken/PurchaseManagement
|
c6eda5c673520fc98b23da137cd26c1cc384809c
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from payment.models import *
admin.site.register(Company)
admin.site.register(Department)
admin.site.register(Currency)
admin.site.register(Bills_Header)
admin.site.register(Bills_Detail)
| 22.3
| 33
| 0.829596
| 31
| 223
| 5.903226
| 0.483871
| 0.245902
| 0.464481
| 0.240437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067265
| 223
| 9
| 34
| 24.777778
| 0.879808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2983c08c163296c60a69ac5c29fdeb3bb13e53e2
| 40
|
py
|
Python
|
fake_success.py
|
rmorgan10/error_cat
|
5d0c17f9636255f34926570e5ce4a4e49a47ddf9
|
[
"MIT"
] | 3
|
2019-10-31T14:21:41.000Z
|
2020-04-23T13:45:00.000Z
|
fake_success.py
|
rmorgan10/error_cat
|
5d0c17f9636255f34926570e5ce4a4e49a47ddf9
|
[
"MIT"
] | null | null | null |
fake_success.py
|
rmorgan10/error_cat
|
5d0c17f9636255f34926570e5ce4a4e49a47ddf9
|
[
"MIT"
] | 1
|
2019-12-12T21:55:30.000Z
|
2019-12-12T21:55:30.000Z
|
# a code that works
if 5 > 3:
pass
| 8
| 19
| 0.55
| 8
| 40
| 2.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0.375
| 40
| 4
| 20
| 10
| 0.8
| 0.425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
463129e063ffbda100a1d1e205e2d007c145e5d2
| 84
|
py
|
Python
|
utils/graph/__init__.py
|
Guangjun-A/---11-
|
43ecfae86e5785410d5812d22700bf32397a75e1
|
[
"Apache-2.0"
] | null | null | null |
utils/graph/__init__.py
|
Guangjun-A/---11-
|
43ecfae86e5785410d5812d22700bf32397a75e1
|
[
"Apache-2.0"
] | null | null | null |
utils/graph/__init__.py
|
Guangjun-A/---11-
|
43ecfae86e5785410d5812d22700bf32397a75e1
|
[
"Apache-2.0"
] | null | null | null |
from . import tools
from . import ntu_rgb_d
from . import kinetics
from . import fsd
| 21
| 23
| 0.77381
| 14
| 84
| 4.5
| 0.571429
| 0.634921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 84
| 4
| 24
| 21
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
463bc80934e144f36327c37444fe895ea5c1a587
| 66
|
py
|
Python
|
jax_audio/utils/__init__.py
|
machineko/jaxaudio
|
7320a4ecd8f267e206c36be3551f919faa08d3b0
|
[
"MIT"
] | 2
|
2021-02-24T08:42:01.000Z
|
2021-05-11T15:47:11.000Z
|
jax_audio/utils/__init__.py
|
machineko/jaxaudio
|
7320a4ecd8f267e206c36be3551f919faa08d3b0
|
[
"MIT"
] | 1
|
2021-02-21T23:17:48.000Z
|
2021-02-21T23:42:23.000Z
|
jax_audio/utils/__init__.py
|
machineko/jaxaudio
|
7320a4ecd8f267e206c36be3551f919faa08d3b0
|
[
"MIT"
] | null | null | null |
from jax_audio.utils.audio_utils import read_wav, write_wav, stft
| 33
| 65
| 0.848485
| 12
| 66
| 4.333333
| 0.75
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 66
| 1
| 66
| 66
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4646f351ef11026d9213ffd8976f82a1235bccab
| 106
|
py
|
Python
|
lib/enthought/etsconfig/api.py
|
mattfoster/matplotlib
|
0b47697b19b77226c633ec6a3d74a2199a153315
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2016-05-08T18:33:12.000Z
|
2016-05-08T18:33:12.000Z
|
lib/enthought/etsconfig/api.py
|
mattfoster/matplotlib
|
0b47697b19b77226c633ec6a3d74a2199a153315
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
lib/enthought/etsconfig/api.py
|
mattfoster/matplotlib
|
0b47697b19b77226c633ec6a3d74a2199a153315
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
from enthought.etsconfig.version import version, version as __version__
from etsconfig import ETSConfig
| 21.2
| 71
| 0.849057
| 13
| 106
| 6.615385
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122642
| 106
| 4
| 72
| 26.5
| 0.924731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
465f9cc58844455d3c67b3a794b9bc1a7ac028db
| 114
|
py
|
Python
|
eachday/log.py
|
bcongdon/EveryDay
|
e09bc7d8a4bdfd5775e90492ba282a0c49a2c67c
|
[
"MIT"
] | 4
|
2017-06-10T19:38:44.000Z
|
2021-08-19T12:39:25.000Z
|
eachday/log.py
|
bcongdon/EachDay
|
e09bc7d8a4bdfd5775e90492ba282a0c49a2c67c
|
[
"MIT"
] | 7
|
2017-05-28T04:36:58.000Z
|
2018-03-01T04:43:32.000Z
|
eachday/log.py
|
bcongdon/EveryDay
|
e09bc7d8a4bdfd5775e90492ba282a0c49a2c67c
|
[
"MIT"
] | 4
|
2018-07-07T15:24:08.000Z
|
2020-10-19T14:32:04.000Z
|
from werkzeug.local import LocalProxy
from flask import current_app
log = LocalProxy(lambda: current_app.logger)
| 22.8
| 44
| 0.833333
| 16
| 114
| 5.8125
| 0.6875
| 0.215054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114035
| 114
| 4
| 45
| 28.5
| 0.920792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4661c37353b3e408cd76ba0b1aed4992f3bdb864
| 29
|
py
|
Python
|
proposals/utils/__init__.py
|
mindruion/test
|
d27ef1caf8f76aead934bc83be7729f79a4be503
|
[
"MIT"
] | 2
|
2017-04-22T11:07:13.000Z
|
2018-03-02T12:23:24.000Z
|
proposals/utils/__init__.py
|
mindruion/test
|
d27ef1caf8f76aead934bc83be7729f79a4be503
|
[
"MIT"
] | 124
|
2020-04-30T07:06:58.000Z
|
2022-03-28T12:50:16.000Z
|
proposals/utils/__init__.py
|
mindruion/test
|
d27ef1caf8f76aead934bc83be7729f79a4be503
|
[
"MIT"
] | 1
|
2021-08-04T11:44:21.000Z
|
2021-08-04T11:44:21.000Z
|
from .proposal_utils import *
| 29
| 29
| 0.827586
| 4
| 29
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
466cb3cb514b6a32ba6cba2f0e04159ccb298fb4
| 10,481
|
py
|
Python
|
plenum/test/checkpoints/test_backup_replica_resumes_ordering_on_lag_in_checkpoints.py
|
Toktar/indy-plenum
|
2f1f838332b0506f8dd8837ac341cba0cd3f7ff4
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/checkpoints/test_backup_replica_resumes_ordering_on_lag_in_checkpoints.py
|
Toktar/indy-plenum
|
2f1f838332b0506f8dd8837ac341cba0cd3f7ff4
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/checkpoints/test_backup_replica_resumes_ordering_on_lag_in_checkpoints.py
|
Toktar/indy-plenum
|
2f1f838332b0506f8dd8837ac341cba0cd3f7ff4
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.server.replica import Replica
from plenum.test import waits
from plenum.test.delayers import cDelay, chk_delay
from plenum.test.helper import sdk_send_random_requests, assertExp, sdk_send_random_and_check, assert_eq
from stp_core.loop.eventually import eventually
nodeCount = 4
CHK_FREQ = 5
LOG_SIZE = 3 * CHK_FREQ
@pytest.fixture(scope="module")
def tconf(tconf):
old = tconf.Max3PCBatchesInFlight
# This test requires lots of batches in flight (actually 8) in order to function properly,
# so we allow any number to simplify things
tconf.Max3PCBatchesInFlight = None
yield tconf
tconf.Max3PCBatchesInFlight = old
def test_backup_replica_resumes_ordering_on_lag_in_checkpoints(
looper, chkFreqPatched, reqs_for_checkpoint,
one_replica_and_others_in_backup_instance,
sdk_pool_handle, sdk_wallet_client, view_change_done, txnPoolNodeSet):
"""
Verifies resumption of ordering 3PC-batches on a backup replica
on detection of a lag in checkpoints
"""
slow_replica, other_replicas = one_replica_and_others_in_backup_instance
view_no = slow_replica.viewNo
# Send a request and ensure that the replica orders the batch for it
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.run(
eventually(lambda: assert_eq(slow_replica.last_ordered_3pc, (view_no, 2)),
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
# Don't receive Commits from two replicas
slow_replica.node.nodeIbStasher.delay(
cDelay(instId=1, sender_filter=other_replicas[0].node.name))
slow_replica.node.nodeIbStasher.delay(
cDelay(instId=1, sender_filter=other_replicas[1].node.name))
# Send a request for which the replica will not be able to order the batch
# due to an insufficient count of Commits
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Recover reception of Commits
slow_replica.node.nodeIbStasher.drop_delayeds()
slow_replica.node.nodeIbStasher.resetDelays()
# Send requests but in a quantity insufficient
# for catch-up number of checkpoints
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP *
reqs_for_checkpoint - 3)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Ensure that the replica has not ordered any batches
# after the very first one
assert slow_replica.last_ordered_3pc == (view_no, 2)
# Ensure that the watermarks have not been shifted since the view start
assert slow_replica.h == 0
assert slow_replica.H == LOG_SIZE
# Ensure that the collections related to requests, batches and
# own checkpoints are not empty.
# (Note that a primary replica removes requests from requestQueues
# when creating a batch with them.)
if slow_replica.isPrimary:
assert slow_replica._ordering_service.sentPrePrepares
else:
assert slow_replica._ordering_service.requestQueues[DOMAIN_LEDGER_ID]
assert slow_replica._ordering_service.prePrepares
assert slow_replica._ordering_service.prepares
assert slow_replica._ordering_service.commits
assert slow_replica._ordering_service.batches
assert slow_replica._checkpointer._checkpoint_state
# Ensure that there are some quorumed stashed checkpoints
assert slow_replica._checkpointer._stashed_checkpoints_with_quorum()
# Send more requests to reach catch-up number of checkpoints
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, reqs_for_checkpoint)
# Ensure that the replica has adjusted last_ordered_3pc to the end
# of the last checkpoint
looper.run(
eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == \
(view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ)),
slow_replica,
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
# Ensure that the watermarks have been shifted so that the lower watermark
# has the same value as last_ordered_3pc
assert slow_replica.h == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ
assert slow_replica.H == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + LOG_SIZE
# Ensure that the collections related to requests, batches and
# own checkpoints have been cleared
assert not slow_replica._ordering_service.requestQueues[DOMAIN_LEDGER_ID]
assert not slow_replica._ordering_service.sentPrePrepares
assert not slow_replica._ordering_service.prePrepares
assert not slow_replica._ordering_service.prepares
assert not slow_replica._ordering_service.commits
assert not slow_replica._ordering_service.batches
assert not slow_replica._checkpointer._checkpoint_state
# Ensure that now there are no quorumed stashed checkpoints
assert not slow_replica._checkpointer._stashed_checkpoints_with_quorum()
# Send a request and ensure that the replica orders the batch for it
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.run(
eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc ==
(view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 1)),
slow_replica,
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
def test_backup_replica_resumes_ordering_on_lag_if_checkpoints_belate(
looper, chkFreqPatched, reqs_for_checkpoint,
one_replica_and_others_in_backup_instance,
sdk_pool_handle, sdk_wallet_client, view_change_done):
"""
Verifies resumption of ordering 3PC-batches on a backup replica
on detection of a lag in checkpoints in case it is detected after
some batch in the next checkpoint has already been committed but cannot
be ordered out of turn
"""
slow_replica, other_replicas = one_replica_and_others_in_backup_instance
view_no = slow_replica.viewNo
# Send a request and ensure that the replica orders the batch for it
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.run(
eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == (view_no, 2)),
slow_replica,
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
# Don't receive Commits from two replicas
slow_replica.node.nodeIbStasher.delay(
cDelay(instId=1, sender_filter=other_replicas[0].node.name))
slow_replica.node.nodeIbStasher.delay(
cDelay(instId=1, sender_filter=other_replicas[1].node.name))
# Send a request for which the replica will not be able to order the batch
# due to an insufficient count of Commits
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Receive further Commits from now on
slow_replica.node.nodeIbStasher.drop_delayeds()
slow_replica.node.nodeIbStasher.resetDelays()
looper.run(
eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == (view_no, 2)),
slow_replica,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
# Send requests but in a quantity insufficient
# for catch-up number of checkpoints
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP *
reqs_for_checkpoint - 2)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Don't receive Checkpoints
slow_replica.node.nodeIbStasher.delay(chk_delay(instId=1))
# Send more requests to reach catch-up number of checkpoints
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
reqs_for_checkpoint)
# Send a request that starts a new checkpoint
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Ensure that the replica has not ordered any batches
# after the very first one
assert slow_replica.last_ordered_3pc == (view_no, 2)
# Ensure that the watermarks have not been shifted since the view start
assert slow_replica.h == 0
assert slow_replica.H == LOG_SIZE
# Ensure that there are some quorumed stashed checkpoints
assert slow_replica._checkpointer._stashed_checkpoints_with_quorum()
# Receive belated Checkpoints
slow_replica.node.nodeIbStasher.reset_delays_and_process_delayeds()
# Ensure that the replica has ordered the batch for the last sent request
looper.run(
eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == \
(view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 2)),
slow_replica,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
# Ensure that the watermarks have been shifted so that the lower watermark
# now equals to the end of the last stable checkpoint in the instance
assert slow_replica.h == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ
assert slow_replica.H == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + LOG_SIZE
# Ensure that now there are no quorumed stashed checkpoints
assert not slow_replica._checkpointer._stashed_checkpoints_with_quorum()
# Send a request and ensure that the replica orders the batch for it
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.run(
eventually(lambda: assertExp(slow_replica.last_ordered_3pc ==
(view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 3)),
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
| 45.372294
| 119
| 0.732182
| 1,324
| 10,481
| 5.524169
| 0.164653
| 0.082718
| 0.044162
| 0.028439
| 0.844681
| 0.811731
| 0.738994
| 0.719716
| 0.691414
| 0.666803
| 0
| 0.007368
| 0.210094
| 10,481
| 230
| 120
| 45.569565
| 0.876072
| 0.256941
| 0
| 0.612403
| 0
| 0
| 0.00078
| 0
| 0
| 0
| 0
| 0
| 0.27907
| 1
| 0.023256
| false
| 0
| 0.054264
| 0
| 0.077519
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d3b25408f8f2d2f8b7029cae77c4fc2eb75cc42d
| 28
|
py
|
Python
|
src/aio_dtls/tls/record_layer.py
|
businka/aio_dtls
|
0dba40d425b443e5ceb516011aadf58f573a4dc8
|
[
"MIT"
] | null | null | null |
src/aio_dtls/tls/record_layer.py
|
businka/aio_dtls
|
0dba40d425b443e5ceb516011aadf58f573a4dc8
|
[
"MIT"
] | null | null | null |
src/aio_dtls/tls/record_layer.py
|
businka/aio_dtls
|
0dba40d425b443e5ceb516011aadf58f573a4dc8
|
[
"MIT"
] | null | null | null |
class RecordLayer:
pass
| 9.333333
| 18
| 0.714286
| 3
| 28
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 28
| 2
| 19
| 14
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
d3d54d1ebe1a8c9df759068ad4e758a2ccf0033c
| 84
|
py
|
Python
|
1_review/hello.py
|
nicholasjamesbaker/asd-class
|
c524971f800d649f4e18cc1e555c348029f6af1b
|
[
"MIT"
] | 2
|
2022-01-17T13:13:23.000Z
|
2022-03-02T18:25:24.000Z
|
1_review/hello.py
|
nicholasjamesbaker/asd-class
|
c524971f800d649f4e18cc1e555c348029f6af1b
|
[
"MIT"
] | null | null | null |
1_review/hello.py
|
nicholasjamesbaker/asd-class
|
c524971f800d649f4e18cc1e555c348029f6af1b
|
[
"MIT"
] | 3
|
2022-01-12T17:58:44.000Z
|
2022-01-16T15:17:58.000Z
|
print('Hello, World!')
print("welcome to CP1895!")
print('Great work everyone!!!')
| 16.8
| 31
| 0.678571
| 11
| 84
| 5.181818
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053333
| 0.107143
| 84
| 5
| 31
| 16.8
| 0.706667
| 0
| 0
| 0
| 0
| 0
| 0.623529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
d3f4a0282e44b5a1900b7029370f8f4985a35b4e
| 52
|
py
|
Python
|
recognition/__init__.py
|
kalenforn/MMVA
|
1e4ec5417d4497a14f226fab8a66fe065a9f0f65
|
[
"MIT"
] | 4
|
2021-12-16T08:17:49.000Z
|
2022-03-12T10:14:50.000Z
|
recognition/__init__.py
|
kalenforn/video-content-clean
|
4b6e572ec034fbe2e668c250cff8e1c9a13dd0e0
|
[
"MIT"
] | null | null | null |
recognition/__init__.py
|
kalenforn/video-content-clean
|
4b6e572ec034fbe2e668c250cff8e1c9a13dd0e0
|
[
"MIT"
] | 1
|
2021-12-14T08:17:41.000Z
|
2021-12-14T08:17:41.000Z
|
# from .recognitionProcess import RecognitionProcess
| 52
| 52
| 0.884615
| 4
| 52
| 11.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 52
| 1
| 52
| 52
| 0.958333
| 0.961538
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d3fda5538d02a0c4edf46fd1b2d1878414c279cd
| 14,251
|
py
|
Python
|
tests/engine/path_helper.py
|
infosecjosh/plaso
|
7b5fc33591c60e89afc231a451449d40e02d8985
|
[
"Apache-2.0"
] | null | null | null |
tests/engine/path_helper.py
|
infosecjosh/plaso
|
7b5fc33591c60e89afc231a451449d40e02d8985
|
[
"Apache-2.0"
] | null | null | null |
tests/engine/path_helper.py
|
infosecjosh/plaso
|
7b5fc33591c60e89afc231a451449d40e02d8985
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the path helper."""
from __future__ import unicode_literals
import os
import unittest
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from plaso.containers import artifacts
from plaso.engine import path_helper
from tests import test_lib as shared_test_lib
class PathHelperTest(shared_test_lib.BaseTestCase):
"""Tests for the path helper."""
# pylint: disable=protected-access
def testExpandUsersHomeDirectoryPathSegments(self):
"""Tests the _ExpandUsersHomeDirectoryPathSegments function."""
user_account_artifact1 = artifacts.UserAccountArtifact(
user_directory='/home/Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(
user_directory='/Users/Test2', username='Test2')
user_accounts = [user_account_artifact1, user_account_artifact2]
path_segments = ['%%users.homedir%%', '.bashrc']
expanded_paths = (
path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(
path_segments, '/', user_accounts))
expected_expanded_paths = [
'/home/Test1/.bashrc',
'/Users/Test2/.bashrc']
self.assertEqual(expanded_paths, expected_expanded_paths)
user_account_artifact1 = artifacts.UserAccountArtifact(
user_directory='C:\\Users\\Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(
user_directory='%SystemDrive%\\Users\\Test2', username='Test2')
user_accounts = [user_account_artifact1, user_account_artifact2]
path_segments = ['%%users.userprofile%%', 'Profile']
expanded_paths = (
path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(
path_segments, '\\', user_accounts))
expected_expanded_paths = [
'\\Users\\Test1\\Profile',
'\\Users\\Test2\\Profile']
self.assertEqual(expanded_paths, expected_expanded_paths)
path_segments = ['C:', 'Temp']
expanded_paths = (
path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(
path_segments, '\\', user_accounts))
expected_expanded_paths = ['\\Temp']
self.assertEqual(expanded_paths, expected_expanded_paths)
path_segments = ['C:', 'Temp', '%%users.userprofile%%']
expanded_paths = (
path_helper.PathHelper._ExpandUsersHomeDirectoryPathSegments(
path_segments, '\\', user_accounts))
expected_expanded_paths = ['\\Temp\\%%users.userprofile%%']
self.assertEqual(expanded_paths, expected_expanded_paths)
def testExpandUsersVariablePathSegments(self):
"""Tests the _ExpandUsersVariablePathSegments function."""
user_account_artifact1 = artifacts.UserAccountArtifact(
user_directory='C:\\Users\\Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(
user_directory='%SystemDrive%\\Users\\Test2', username='Test2')
user_accounts = [user_account_artifact1, user_account_artifact2]
path_segments = ['%%users.appdata%%', 'Microsoft', 'Windows', 'Recent']
expanded_paths = path_helper.PathHelper._ExpandUsersVariablePathSegments(
path_segments, '\\', user_accounts)
expected_expanded_paths = [
'\\Users\\Test1\\AppData\\Roaming\\Microsoft\\Windows\\Recent',
'\\Users\\Test1\\Application Data\\Microsoft\\Windows\\Recent',
'\\Users\\Test2\\AppData\\Roaming\\Microsoft\\Windows\\Recent',
'\\Users\\Test2\\Application Data\\Microsoft\\Windows\\Recent']
self.assertEqual(sorted(expanded_paths), expected_expanded_paths)
path_segments = ['C:', 'Windows']
expanded_paths = path_helper.PathHelper._ExpandUsersVariablePathSegments(
path_segments, '\\', user_accounts)
expected_expanded_paths = ['\\Windows']
self.assertEqual(sorted(expanded_paths), expected_expanded_paths)
def testStripDriveFromPath(self):
"""Tests the _StripDriveFromPath function."""
stripped_path = path_helper.PathHelper._StripDriveFromPath('C:\\Windows')
self.assertEqual(stripped_path, '\\Windows')
stripped_path = path_helper.PathHelper._StripDriveFromPath(
'%SystemDrive%\\Windows')
self.assertEqual(stripped_path, '\\Windows')
stripped_path = path_helper.PathHelper._StripDriveFromPath(
'%%environ_systemdrive%%\\Windows')
self.assertEqual(stripped_path, '\\Windows')
stripped_path = path_helper.PathHelper._StripDriveFromPath('\\Windows')
self.assertEqual(stripped_path, '\\Windows')
def testAppendPathEntries(self):
"""Tests the AppendPathEntries function."""
separator = '\\'
path = '\\Windows\\Test'
# Test depth of ten skipping first entry.
# The path will have 9 entries as the default depth for ** is 10, but the
# first entry is being skipped.
count = 10
skip_first = True
paths = path_helper.PathHelper.AppendPathEntries(
path, separator, count, skip_first)
# Nine paths returned
self.assertEqual(len(paths), 9)
# Nine paths in total, each one level deeper than the previous.
check_paths = sorted([
'\\Windows\\Test\\*\\*',
'\\Windows\\Test\\*\\*\\*',
'\\Windows\\Test\\*\\*\\*\\*',
'\\Windows\\Test\\*\\*\\*\\*\\*',
'\\Windows\\Test\\*\\*\\*\\*\\*\\*',
'\\Windows\\Test\\*\\*\\*\\*\\*\\*\\*',
'\\Windows\\Test\\*\\*\\*\\*\\*\\*\\*\\*',
'\\Windows\\Test\\*\\*\\*\\*\\*\\*\\*\\*\\*',
'\\Windows\\Test\\*\\*\\*\\*\\*\\*\\*\\*\\*\\*'])
self.assertEqual(sorted(paths), check_paths)
# Now test with skip_first set to False, but only a depth of 4.
# the path will have a total of 4 entries.
count = 4
skip_first = False
paths = path_helper.PathHelper.AppendPathEntries(
path, separator, count, skip_first)
# Four paths returned
self.assertEqual(len(paths), 4)
# Four paths in total, each one level deeper than the previous.
check_paths = sorted([
'\\Windows\\Test\\*',
'\\Windows\\Test\\*\\*',
'\\Windows\\Test\\*\\*\\*',
'\\Windows\\Test\\*\\*\\*\\*'])
self.assertEqual(sorted(paths), check_paths)
def testExpandRecursiveGlobs(self):
"""Tests the _ExpandRecursiveGlobs function."""
separator = '/'
# Test a path with a trailing /, which means first directory is skipped.
# The path will have 9 entries as the default depth for ** is 10, but the
# first entry is being skipped.
path = '/etc/sysconfig/**/'
paths = path_helper.PathHelper.ExpandRecursiveGlobs(path, separator)
# Nine paths returned
self.assertEqual(len(paths), 9)
# Nine paths in total, each one level deeper than the previous.
check_paths = sorted([
'/etc/sysconfig/*/*',
'/etc/sysconfig/*/*/*',
'/etc/sysconfig/*/*/*/*',
'/etc/sysconfig/*/*/*/*/*',
'/etc/sysconfig/*/*/*/*/*/*',
'/etc/sysconfig/*/*/*/*/*/*/*',
'/etc/sysconfig/*/*/*/*/*/*/*/*',
'/etc/sysconfig/*/*/*/*/*/*/*/*/*',
'/etc/sysconfig/*/*/*/*/*/*/*/*/*/*'])
self.assertEqual(sorted(paths), check_paths)
# Now test with no trailing separator, but only a depth of 4.
# the path will have a total of 4 entries.
path = '/etc/sysconfig/**4'
paths = path_helper.PathHelper.ExpandRecursiveGlobs(path, separator)
# Four paths returned
self.assertEqual(len(paths), 4)
# Four paths in total, each one level deeper than the previous.
check_paths = sorted([
'/etc/sysconfig/*',
'/etc/sysconfig/*/*',
'/etc/sysconfig/*/*/*',
'/etc/sysconfig/*/*/*/*'])
self.assertEqual(sorted(paths), check_paths)
def testExpandUsersVariablePath(self):
"""Tests the ExpandUsersVariablePath function."""
user_account_artifact1 = artifacts.UserAccountArtifact(
user_directory='C:\\Users\\Test1', username='Test1')
user_account_artifact2 = artifacts.UserAccountArtifact(
user_directory='%SystemDrive%\\Users\\Test2', username='Test2')
user_accounts = [user_account_artifact1, user_account_artifact2]
path = '%%users.appdata%%\\Microsoft\\Windows\\Recent'
expanded_paths = path_helper.PathHelper.ExpandUsersVariablePath(
path, '\\', user_accounts)
expected_expanded_paths = [
'\\Users\\Test1\\AppData\\Roaming\\Microsoft\\Windows\\Recent',
'\\Users\\Test1\\Application Data\\Microsoft\\Windows\\Recent',
'\\Users\\Test2\\AppData\\Roaming\\Microsoft\\Windows\\Recent',
'\\Users\\Test2\\Application Data\\Microsoft\\Windows\\Recent']
self.assertEqual(sorted(expanded_paths), expected_expanded_paths)
def testExpandWindowsPath(self):
"""Tests the ExpandWindowsPath function."""
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
expanded_path = path_helper.PathHelper.ExpandWindowsPath(
'%SystemRoot%\\System32', [environment_variable])
self.assertEqual(expanded_path, 'C:\\Windows\\System32')
expanded_path = path_helper.PathHelper.ExpandWindowsPath(
'C:\\Windows\\System32', [environment_variable])
self.assertEqual(expanded_path, 'C:\\Windows\\System32')
expanded_path = path_helper.PathHelper.ExpandWindowsPath(
'%SystemRoot%\\System32', None)
self.assertEqual(expanded_path, '%SystemRoot%\\System32')
expanded_path = path_helper.PathHelper.ExpandWindowsPath(
'%Bogus%\\System32', [environment_variable])
self.assertEqual(expanded_path, '%Bogus%\\System32')
expanded_path = path_helper.PathHelper.ExpandWindowsPath(
'%%environ_systemroot%%\\System32', [environment_variable])
self.assertEqual(expanded_path, '\\Windows\\System32')
# Test non-string environment variable.
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value=('bogus', 0))
expanded_path = path_helper.PathHelper.ExpandWindowsPath(
'%SystemRoot%\\System32', [environment_variable])
self.assertEqual(expanded_path, '%SystemRoot%\\System32')
def testGetDisplayNameForPathSpec(self):
"""Tests the GetDisplayNameForPathSpec function."""
test_path = self._GetTestFilePath(['syslog.gz'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
expected_display_name = 'OS:{0:s}'.format(test_path)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(
os_path_spec)
self.assertEqual(display_name, expected_display_name)
gzip_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_GZIP, parent=os_path_spec)
expected_display_name = 'GZIP:{0:s}'.format(test_path)
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(
gzip_path_spec)
self.assertEqual(display_name, expected_display_name)
test_path = self._GetTestFilePath(['vsstest.qcow2'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
vshadow_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_VSHADOW, location='/vss2',
store_index=1, parent=qcow_path_spec)
tsk_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, inode=35, location='/syslog.gz',
parent=vshadow_path_spec)
expected_display_name = 'VSS2:TSK:/syslog.gz'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(
tsk_path_spec)
self.assertEqual(display_name, expected_display_name)
expected_display_name = 'VSS2:TSK:C:/syslog.gz'
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(
tsk_path_spec, text_prepend='C:')
self.assertEqual(display_name, expected_display_name)
# Test without path specification.
display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(None)
self.assertIsNone(display_name)
# Test path specification without location
os_path_spec.location = None
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
self.assertIsNone(display_name)
def testGetRelativePathForPathSpec(self):
"""Tests the GetRelativePathForPathSpec function."""
test_path = self._GetTestFilePath(['syslog.gz'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(
os_path_spec)
self.assertEqual(relative_path, test_path)
# Test path specification with mount point.
mount_path = self._GetTestFilePath([])
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(
os_path_spec, mount_path=mount_path)
expected_relative_path = '{0:s}syslog.gz'.format(os.path.sep)
self.assertEqual(relative_path, expected_relative_path)
# Test path specification with incorrect mount point.
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(
os_path_spec, mount_path='/bogus')
self.assertEqual(relative_path, test_path)
# Test path specification with data stream.
os_path_spec.data_stream = 'MYDATA'
expected_relative_path = '{0:s}:MYDATA'.format(test_path)
relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(
os_path_spec)
self.assertEqual(relative_path, expected_relative_path)
# Test without path specification.
display_name = path_helper.PathHelper.GetRelativePathForPathSpec(None)
self.assertIsNone(display_name)
# Test path specification without location.
os_path_spec.location = None
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
display_name = path_helper.PathHelper.GetRelativePathForPathSpec(
qcow_path_spec)
self.assertIsNone(display_name)
if __name__ == '__main__':
unittest.main()
| 39.807263
| 78
| 0.701214
| 1,485
| 14,251
| 6.470034
| 0.129293
| 0.030808
| 0.066611
| 0.034971
| 0.812032
| 0.791632
| 0.75843
| 0.718047
| 0.688489
| 0.650812
| 0
| 0.008322
| 0.165252
| 14,251
| 357
| 79
| 39.918768
| 0.799344
| 0.119641
| 0
| 0.568465
| 0
| 0
| 0.184481
| 0.127267
| 0
| 0
| 0
| 0
| 0.153527
| 1
| 0.037344
| false
| 0
| 0.033195
| 0
| 0.074689
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
31577405f53748d7365a5f8d794f83bd260f696d
| 32
|
py
|
Python
|
python/testData/completion/mockPatchObject2Py2/a.after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/completion/mockPatchObject2Py2/a.after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/completion/mockPatchObject2Py2/a.after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
import mock
mock.patch.object()
| 10.666667
| 19
| 0.78125
| 5
| 32
| 5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 32
| 3
| 19
| 10.666667
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
3163d84aecc0a1cc5bff12f3a8c6cb19c7f9f516
| 32
|
py
|
Python
|
tests/conftest.py
|
pretix-unofficial/pretix-hide-sold-out
|
9a529586e44e2a6f7fb2e60b0ae578805d725185
|
[
"Apache-2.0"
] | 3
|
2021-04-25T13:10:48.000Z
|
2021-06-24T22:35:44.000Z
|
tests/conftest.py
|
pretix-unofficial/pretix-hide-sold-out
|
9a529586e44e2a6f7fb2e60b0ae578805d725185
|
[
"Apache-2.0"
] | 6
|
2021-04-24T20:47:51.000Z
|
2021-08-29T13:56:14.000Z
|
tests/conftest.py
|
pretix-unofficial/pretix-hide-sold-out
|
9a529586e44e2a6f7fb2e60b0ae578805d725185
|
[
"Apache-2.0"
] | 1
|
2021-03-25T12:02:38.000Z
|
2021-03-25T12:02:38.000Z
|
# put your pytest fixtures here
| 16
| 31
| 0.78125
| 5
| 32
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 32
| 1
| 32
| 32
| 0.961538
| 0.90625
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
317d5f03a8b2856ea2549182d7ae07feae81ac8e
| 1,013
|
py
|
Python
|
openstack/tests/functional/cloud/test_coe_clusters.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | 99
|
2018-03-28T15:41:45.000Z
|
2022-01-23T17:22:13.000Z
|
openstack/tests/functional/cloud/test_coe_clusters.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | 5
|
2018-05-25T16:54:23.000Z
|
2021-11-21T02:27:16.000Z
|
openstack/tests/functional/cloud/test_coe_clusters.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | 104
|
2018-04-06T14:33:54.000Z
|
2022-03-01T01:58:09.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_coe_clusters
----------------------------------
Functional tests for `shade` COE clusters methods.
"""
from openstack.tests.functional import base
class TestCompute(base.BaseFunctionalTest):
# NOTE(flwang): Currently, running Magnum on a cloud which doesn't support
# nested virtualization will lead to timeout. So this test file is mostly
# like a note to document why we can't have function testing for Magnum
# clusters CRUD.
pass
| 34.931034
| 78
| 0.729516
| 147
| 1,013
| 5.013605
| 0.680272
| 0.081411
| 0.035278
| 0.043419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00479
| 0.175716
| 1,013
| 28
| 79
| 36.178571
| 0.877844
| 0.846989
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
9ede305ba64c653ed6bebf4131ea7f08725dea79
| 1,668
|
py
|
Python
|
pyaz/redis/server_link/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/redis/server_link/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/redis/server_link/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
from ... pyaz_utils import _call_az
def create(name, replication_role, resource_group, server_to_link):
'''
Adds a server link to the Redis cache (requires Premium SKU).
Required Parameters:
- name -- Name of the Redis cache.
- replication_role -- Role of the redis cache to be linked
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- server_to_link -- Resource ID or name of the redis cache to be linked
'''
return _call_az("az redis server-link create", locals())
def delete(linked_server_name, name, resource_group):
'''
Required Parameters:
- linked_server_name -- Name of the linked redis cache
- name -- Name of the Redis cache.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az redis server-link delete", locals())
def show(linked_server_name, name, resource_group):
'''
Required Parameters:
- linked_server_name -- Name of the linked redis cache
- name -- Name of the Redis cache.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az redis server-link show", locals())
def list(name, resource_group):
'''
Required Parameters:
- name -- Name of the Redis cache.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az redis server-link list", locals())
| 33.36
| 128
| 0.688249
| 230
| 1,668
| 4.847826
| 0.182609
| 0.13991
| 0.081614
| 0.069955
| 0.788341
| 0.769507
| 0.769507
| 0.698655
| 0.64574
| 0.64574
| 0
| 0
| 0.217026
| 1,668
| 49
| 129
| 34.040816
| 0.853752
| 0.615707
| 0
| 0
| 0
| 0
| 0.204322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0.111111
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
9ee9e43fb933937966fce62df5a87a85d97bf199
| 69
|
py
|
Python
|
app/model/__init__.py
|
metaspace2020/sm-molecular-db
|
5b40e4e84d94e433339f32f1af998acaffa7ef8f
|
[
"Apache-2.0"
] | null | null | null |
app/model/__init__.py
|
metaspace2020/sm-molecular-db
|
5b40e4e84d94e433339f32f1af998acaffa7ef8f
|
[
"Apache-2.0"
] | null | null | null |
app/model/__init__.py
|
metaspace2020/sm-molecular-db
|
5b40e4e84d94e433339f32f1af998acaffa7ef8f
|
[
"Apache-2.0"
] | null | null | null |
from .molecular_db import MolecularDB
from .molecule import Molecule
| 23
| 37
| 0.855072
| 9
| 69
| 6.444444
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 69
| 2
| 38
| 34.5
| 0.95082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9eea7dce71fbd9b353412cc480612e8f41f24e6a
| 274
|
py
|
Python
|
dragonfly/exceptions.py
|
MattJMLewis/dragonfly
|
1374d78acc33624e9ba1d2b3d20e7ae198fe178f
|
[
"MIT"
] | null | null | null |
dragonfly/exceptions.py
|
MattJMLewis/dragonfly
|
1374d78acc33624e9ba1d2b3d20e7ae198fe178f
|
[
"MIT"
] | null | null | null |
dragonfly/exceptions.py
|
MattJMLewis/dragonfly
|
1374d78acc33624e9ba1d2b3d20e7ae198fe178f
|
[
"MIT"
] | null | null | null |
class MethodDoesNotExist(Exception):
pass
class InvalidControllerMethod(Exception):
pass
class MissingClause(Exception):
pass
class MissingTable(Exception):
pass
class InvalidOperator(Exception):
pass
class ChunkOutOfRange(Exception):
pass
| 11.913043
| 41
| 0.744526
| 24
| 274
| 8.5
| 0.375
| 0.382353
| 0.441176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189781
| 274
| 22
| 42
| 12.454545
| 0.918919
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
7355853d745585c161b3f3e4c10660e7d2240a17
| 932
|
py
|
Python
|
Lib/encodings/iso8859_1.py
|
marcosptf/cpython-2.0.1
|
73c739a764e8b1dc84640e73b880bc66e1916bca
|
[
"PSF-2.0"
] | 5
|
2022-03-26T21:53:36.000Z
|
2022-03-30T21:47:20.000Z
|
Lib/encodings/iso8859_1.py
|
marcosptf/cpython-2.0.1
|
73c739a764e8b1dc84640e73b880bc66e1916bca
|
[
"PSF-2.0"
] | 6
|
2020-11-18T15:48:14.000Z
|
2021-05-03T21:20:50.000Z
|
Lib/encodings/iso8859_1.py
|
marcosptf/cpython-2.0.1
|
73c739a764e8b1dc84640e73b880bc66e1916bca
|
[
"PSF-2.0"
] | 2
|
2015-07-16T08:14:13.000Z
|
2022-03-27T01:55:17.000Z
|
""" Python Character Mapping Codec generated from '8859-1.TXT'.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
def __init__(self,stream,errors='strict'):
codecs.StreamWriter.__init__(self,strict,errors)
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = {
}
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
| 18.64
| 68
| 0.701717
| 115
| 932
| 5.547826
| 0.469565
| 0.068966
| 0.047022
| 0.065831
| 0.125392
| 0.125392
| 0.125392
| 0
| 0
| 0
| 0
| 0.006477
| 0.171674
| 932
| 49
| 69
| 19.020408
| 0.819948
| 0.241416
| 0
| 0
| 1
| 0
| 0.026239
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0.055556
| 0.055556
| 0.166667
| 0.611111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
b4031ee6527d9c715b9e985201515c539d532fb3
| 518
|
py
|
Python
|
fc_sql/ISQLHandler.py
|
fangqk1991/py-sql
|
61acf9c4ca8fe3544a9d6b79fd338f3548f2f838
|
[
"MIT"
] | null | null | null |
fc_sql/ISQLHandler.py
|
fangqk1991/py-sql
|
61acf9c4ca8fe3544a9d6b79fd338f3548f2f838
|
[
"MIT"
] | null | null | null |
fc_sql/ISQLHandler.py
|
fangqk1991/py-sql
|
61acf9c4ca8fe3544a9d6b79fd338f3548f2f838
|
[
"MIT"
] | null | null | null |
import abc
from abc import ABCMeta
class ISQLHandler(metaclass=ABCMeta):
def __init__(self):
pass
@abc.abstractmethod
def sql_instance(self):
pass
@abc.abstractmethod
def sql_table(self):
pass
@abc.abstractmethod
def sql_cols(self):
pass
def sql_insertable_cols(self):
return self.sql_cols()
def sql_modifiable_cols(self):
return self.sql_insertable_cols()
@abc.abstractmethod
def sql_primary_key(self):
pass
| 16.709677
| 41
| 0.646718
| 62
| 518
| 5.145161
| 0.33871
| 0.112853
| 0.250784
| 0.288401
| 0.423197
| 0.291536
| 0
| 0
| 0
| 0
| 0
| 0
| 0.277992
| 518
| 30
| 42
| 17.266667
| 0.852941
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.238095
| 0.095238
| 0.095238
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
b4074b4c6e9f478c3f944b260cde76d2795314b4
| 138
|
py
|
Python
|
keras/objectives.py
|
PJmouraocs/keras
|
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
|
[
"MIT"
] | 300
|
2018-04-04T05:01:21.000Z
|
2022-02-25T18:56:04.000Z
|
keras/objectives.py
|
PJmouraocs/keras
|
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
|
[
"MIT"
] | 163
|
2018-04-03T17:41:22.000Z
|
2021-09-03T16:44:04.000Z
|
keras/objectives.py
|
PJmouraocs/keras
|
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
|
[
"MIT"
] | 94
|
2016-02-17T20:59:27.000Z
|
2021-04-19T08:18:16.000Z
|
"""Legacy objectives module.
Only kept for backwards API compatibility.
"""
from __future__ import absolute_import
from .losses import *
| 19.714286
| 42
| 0.789855
| 17
| 138
| 6.117647
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137681
| 138
| 6
| 43
| 23
| 0.87395
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b40d12650e58191184f6ae48d0e0559ba5348022
| 117
|
py
|
Python
|
toai/inpute/__init__.py
|
KipperPipper/Library_test
|
bdc35c2f1a6f2deadb4d0b00e3f4d5c8a597efdf
|
[
"MIT"
] | 1
|
2019-11-22T13:12:11.000Z
|
2019-11-22T13:12:11.000Z
|
toai/inpute/__init__.py
|
KipperPipper/Library_test
|
bdc35c2f1a6f2deadb4d0b00e3f4d5c8a597efdf
|
[
"MIT"
] | null | null | null |
toai/inpute/__init__.py
|
KipperPipper/Library_test
|
bdc35c2f1a6f2deadb4d0b00e3f4d5c8a597efdf
|
[
"MIT"
] | null | null | null |
# pylama:ignore=W0611
from .CategoricalInputer import CategoricalInputer
from .NumericInputer import NumericInputer
| 23.4
| 50
| 0.863248
| 11
| 117
| 9.181818
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037736
| 0.094017
| 117
| 4
| 51
| 29.25
| 0.915094
| 0.162393
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b4321a50c7a068ab190c2ec2906462146523c22f
| 50,095
|
py
|
Python
|
ryu/tests/unit/packet/test_sctp.py
|
xiaolanxia/ryu-gui
|
b47fa61dd307e180ed261a43ad85c7b73b982f84
|
[
"Apache-2.0"
] | 11
|
2015-06-19T03:46:20.000Z
|
2020-08-21T02:22:30.000Z
|
ryu/tests/unit/packet/test_sctp.py
|
horms/ryu
|
8728c11e1280b9c6bd4b7a68c5d2fbe4ec5f2ad3
|
[
"Apache-2.0"
] | null | null | null |
ryu/tests/unit/packet/test_sctp.py
|
horms/ryu
|
8728c11e1280b9c6bd4b7a68c5d2fbe4ec5f2ad3
|
[
"Apache-2.0"
] | 9
|
2015-05-22T09:00:08.000Z
|
2021-01-24T02:46:36.000Z
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import struct
import unittest
from nose.tools import eq_
from nose.tools import ok_
from ryu.lib import addrconv
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.packet import sctp
from ryu.ofproto import ether
from ryu.ofproto import inet
LOG = logging.getLogger(__name__)
class Test_sctp(unittest.TestCase):
def setUp(self):
self.chunks = []
self.csum = 0
self.dst_port = 1234
self.src_port = 5678
self.vtag = 98765432
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf = '\x16\x2e\x04\xd2\x05\xe3\x0a\x78\x00\x00\x00\x00'
def setUp_with_data(self):
self.unordered = 1
self.begin = 1
self.end = 1
self.length = 16 + 10
self.tsn = 12345
self.sid = 1
self.seq = 0
self.payload_id = 0
self.payload_data = '\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a'
self.data = sctp.chunk_data(
unordered=self.unordered, begin=self.begin, end=self.end,
tsn=self.tsn, sid=self.sid, payload_data=self.payload_data)
self.chunks = [self.data]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x00\x07\x00\x1a\x00\x00\x30\x39\x00\x01\x00\x00' + \
'\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a'
def setUp_with_init(self):
self.flags = 0
self.length = 20 + 8 + 20 + 8 + 4 + 16 + 16
self.init_tag = 123456
self.a_rwnd = 9876
self.os = 3
self.mis = 3
self.i_tsn = 123456
self.p_ipv4 = sctp.param_ipv4('192.168.1.1')
self.p_ipv6 = sctp.param_ipv6('fe80::647e:1aff:fec4:8284')
self.p_cookie_preserve = sctp.param_cookie_preserve(5000)
self.p_ecn = sctp.param_ecn()
self.p_host_addr = sctp.param_host_addr('test host\x00')
self.p_support_type = sctp.param_supported_addr(
[sctp.PTYPE_IPV4, sctp.PTYPE_IPV6, sctp.PTYPE_COOKIE_PRESERVE,
sctp.PTYPE_ECN, sctp.PTYPE_HOST_ADDR])
self.params = [
self.p_ipv4, self.p_ipv6, self.p_cookie_preserve,
self.p_ecn, self.p_host_addr, self.p_support_type]
self.init = sctp.chunk_init(
init_tag=self.init_tag, a_rwnd=self.a_rwnd, os=self.os,
mis=self.mis, i_tsn=self.i_tsn, params=self.params)
self.chunks = [self.init]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x01\x00\x00\x5c\x00\x01\xe2\x40\x00\x00\x26\x94' + \
'\x00\x03\x00\x03\x00\x01\xe2\x40' + \
'\x00\x05\x00\x08\xc0\xa8\x01\x01' + \
'\x00\x06\x00\x14' + \
'\xfe\x80\x00\x00\x00\x00\x00\x00' + \
'\x64\x7e\x1a\xff\xfe\xc4\x82\x84' + \
'\x00\x09\x00\x08\x00\x00\x13\x88' + \
'\x80\x00\x00\x04' + \
'\x00\x0b\x00\x0e' + \
'\x74\x65\x73\x74\x20\x68\x6f\x73\x74\x00\x00\x00' + \
'\x00\x0c\x00\x0e\x00\x05\x00\x06\x00\x09\x80\x00' + \
'\x00\x0b\x00\x00'
def setUp_with_init_ack(self):
self.flags = 0
self.length = 20 + 8 + 8 + 20 + 8 + 4 + 16
self.init_tag = 123456
self.a_rwnd = 9876
self.os = 3
self.mis = 3
self.i_tsn = 123456
self.p_state_cookie = sctp.param_state_cookie('\x01\x02\x03')
self.p_ipv4 = sctp.param_ipv4('192.168.1.1')
self.p_ipv6 = sctp.param_ipv6('fe80::647e:1aff:fec4:8284')
self.p_unrecognized_param = sctp.param_unrecognized_param(
'\xff\xff\x00\x04')
self.p_ecn = sctp.param_ecn()
self.p_host_addr = sctp.param_host_addr('test host\x00')
self.params = [
self.p_state_cookie, self.p_ipv4, self.p_ipv6,
self.p_unrecognized_param, self.p_ecn, self.p_host_addr]
self.init_ack = sctp.chunk_init_ack(
init_tag=self.init_tag, a_rwnd=self.a_rwnd, os=self.os,
mis=self.mis, i_tsn=self.i_tsn, params=self.params)
self.chunks = [self.init_ack]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x02\x00\x00\x54\x00\x01\xe2\x40\x00\x00\x26\x94' + \
'\x00\x03\x00\x03\x00\x01\xe2\x40' + \
'\x00\x07\x00\x07\x01\x02\x03\x00' + \
'\x00\x05\x00\x08\xc0\xa8\x01\x01' + \
'\x00\x06\x00\x14' + \
'\xfe\x80\x00\x00\x00\x00\x00\x00' + \
'\x64\x7e\x1a\xff\xfe\xc4\x82\x84' + \
'\x00\x08\x00\x08\xff\xff\x00\x04' + \
'\x80\x00\x00\x04' + \
'\x00\x0b\x00\x0e' + \
'\x74\x65\x73\x74\x20\x68\x6f\x73\x74\x00\x00\x00'
def setUp_with_sack(self):
self.flags = 0
self.length = 16 + 2 * 2 * 5 + 4 * 5
self.tsn_ack = 123456
self.a_rwnd = 9876
self.gapack_num = 5
self.duptsn_num = 5
self.gapacks = [[2, 3], [10, 12], [20, 24], [51, 52], [62, 63]]
self.duptsns = [123458, 123466, 123476, 123507, 123518]
self.sack = sctp.chunk_sack(
tsn_ack=self.tsn_ack, a_rwnd=self.a_rwnd,
gapack_num=self.gapack_num, duptsn_num=self.duptsn_num,
gapacks=self.gapacks, duptsns=self.duptsns)
self.chunks = [self.sack]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x03\x00\x00\x38\x00\x01\xe2\x40' + \
'\x00\x00\x26\x94\x00\x05\x00\x05' + \
'\x00\x02\x00\x03\x00\x0a\x00\x0c\x00\x14\x00\x18' + \
'\x00\x33\x00\x34\x00\x3e\x00\x3f' + \
'\x00\x01\xe2\x42\x00\x01\xe2\x4a\x00\x01\xe2\x54' + \
'\x00\x01\xe2\x73\x00\x01\xe2\x7e'
def setUp_with_heartbeat(self):
self.flags = 0
self.length = 4 + 8
self.p_heartbeat = sctp.param_heartbeat('\x01\x02\x03\x04')
self.heartbeat = sctp.chunk_heartbeat(info=self.p_heartbeat)
self.chunks = [self.heartbeat]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x04\x00\x00\x0c' + \
'\x00\x01\x00\x08' + \
'\x01\x02\x03\x04'
def setUp_with_heartbeat_ack(self):
self.flags = 0
self.length = 4 + 12
self.p_heartbeat = sctp.param_heartbeat(
'\xff\xee\xdd\xcc\xbb\xaa\x99\x88')
self.heartbeat_ack = sctp.chunk_heartbeat_ack(info=self.p_heartbeat)
self.chunks = [self.heartbeat_ack]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x05\x00\x00\x10' + \
'\x00\x01\x00\x0c' + \
'\xff\xee\xdd\xcc\xbb\xaa\x99\x88'
def setUp_with_abort(self):
self.tflag = 0
self.length = 4 + 8 + 16 + 8 + 4 + 20 + 8 + 4 + 8 + 8 + 4 + 12 \
+ 20 + 20
self.c_invalid_stream_id = sctp.cause_invalid_stream_id(4096)
self.c_missing_param = sctp.cause_missing_param(
[sctp.PTYPE_IPV4, sctp.PTYPE_IPV6,
sctp.PTYPE_COOKIE_PRESERVE, sctp.PTYPE_HOST_ADDR])
self.c_stale_cookie = sctp.cause_stale_cookie('\x00\x00\x13\x88')
self.c_out_of_resource = sctp.cause_out_of_resource()
self.c_unresolvable_addr = sctp.cause_unresolvable_addr(
sctp.param_host_addr('test host\x00'))
self.c_unrecognized_chunk = sctp.cause_unrecognized_chunk(
'\xff\x00\x00\x04')
self.c_invalid_param = sctp.cause_invalid_param()
self.c_unrecognized_param = sctp.cause_unrecognized_param(
'\xff\xff\x00\x04')
self.c_no_userdata = sctp.cause_no_userdata('\x00\x01\xe2\x40')
self.c_cookie_while_shutdown = sctp.cause_cookie_while_shutdown()
self.c_restart_with_new_addr = sctp.cause_restart_with_new_addr(
sctp.param_ipv4('192.168.1.1'))
self.c_user_initiated_abort = sctp.cause_user_initiated_abort(
'Key Interrupt.\x00')
self.c_protocol_violation = sctp.cause_protocol_violation(
'Unknown reason.\x00')
self.causes = [
self.c_invalid_stream_id, self.c_missing_param,
self.c_stale_cookie, self.c_out_of_resource,
self.c_unresolvable_addr, self.c_unrecognized_chunk,
self.c_invalid_param, self.c_unrecognized_param,
self.c_no_userdata, self.c_cookie_while_shutdown,
self.c_restart_with_new_addr, self.c_user_initiated_abort,
self.c_protocol_violation]
self.abort = sctp.chunk_abort(causes=self.causes)
self.chunks = [self.abort]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x06\x00\x00\x90' + \
'\x00\x01\x00\x08\x10\x00\x00\x00' + \
'\x00\x02\x00\x10\x00\x00\x00\x04' + \
'\x00\x05\x00\x06\x00\x09\x00\x0b' + \
'\x00\x03\x00\x08\x00\x00\x13\x88' + \
'\x00\x04\x00\x04' + \
'\x00\x05\x00\x14' + \
'\x00\x0b\x00\x0e' + \
'\x74\x65\x73\x74\x20\x68\x6f\x73\x74\x00\x00\x00' + \
'\x00\x06\x00\x08\xff\x00\x00\x04' + \
'\x00\x07\x00\x04' + \
'\x00\x08\x00\x08\xff\xff\x00\x04' + \
'\x00\x09\x00\x08\x00\x01\xe2\x40' + \
'\x00\x0a\x00\x04' + \
'\x00\x0b\x00\x0c' + \
'\x00\x05\x00\x08\xc0\xa8\x01\x01' + \
'\x00\x0c\x00\x13' + \
'\x4b\x65\x79\x20\x49\x6e\x74\x65' + \
'\x72\x72\x75\x70\x74\x2e\x00\x00' + \
'\x00\x0d\x00\x14' + \
'\x55\x6e\x6b\x6e\x6f\x77\x6e\x20' + \
'\x72\x65\x61\x73\x6f\x6e\x2e\x00'
def setUp_with_shutdown(self):
self.flags = 0
self.length = 8
self.tsn_ack = 123456
self.shutdown = sctp.chunk_shutdown(tsn_ack=self.tsn_ack)
self.chunks = [self.shutdown]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x07\x00\x00\x08\x00\x01\xe2\x40'
def setUp_with_shutdown_ack(self):
self.flags = 0
self.length = 4
self.shutdown_ack = sctp.chunk_shutdown_ack()
self.chunks = [self.shutdown_ack]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x08\x00\x00\x04'
def setUp_with_error(self):
self.flags = 0
self.length = 4 + 8 + 16 + 8 + 4 + 20 + 8 + 4 + 8 + 8 + 4 + 12 \
+ 20 + 20
self.c_invalid_stream_id = sctp.cause_invalid_stream_id(4096)
self.c_missing_param = sctp.cause_missing_param(
[sctp.PTYPE_IPV4, sctp.PTYPE_IPV6,
sctp.PTYPE_COOKIE_PRESERVE, sctp.PTYPE_HOST_ADDR])
self.c_stale_cookie = sctp.cause_stale_cookie('\x00\x00\x13\x88')
self.c_out_of_resource = sctp.cause_out_of_resource()
self.c_unresolvable_addr = sctp.cause_unresolvable_addr(
sctp.param_host_addr('test host\x00'))
self.c_unrecognized_chunk = sctp.cause_unrecognized_chunk(
'\xff\x00\x00\x04')
self.c_invalid_param = sctp.cause_invalid_param()
self.c_unrecognized_param = sctp.cause_unrecognized_param(
'\xff\xff\x00\x04')
self.c_no_userdata = sctp.cause_no_userdata('\x00\x01\xe2\x40')
self.c_cookie_while_shutdown = sctp.cause_cookie_while_shutdown()
self.c_restart_with_new_addr = sctp.cause_restart_with_new_addr(
sctp.param_ipv4('192.168.1.1'))
self.c_user_initiated_abort = sctp.cause_user_initiated_abort(
'Key Interrupt.\x00')
self.c_protocol_violation = sctp.cause_protocol_violation(
'Unknown reason.\x00')
self.causes = [
self.c_invalid_stream_id, self.c_missing_param,
self.c_stale_cookie, self.c_out_of_resource,
self.c_unresolvable_addr, self.c_unrecognized_chunk,
self.c_invalid_param, self.c_unrecognized_param,
self.c_no_userdata, self.c_cookie_while_shutdown,
self.c_restart_with_new_addr, self.c_user_initiated_abort,
self.c_protocol_violation]
self.error = sctp.chunk_error(causes=self.causes)
self.chunks = [self.error]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x09\x00\x00\x90' + \
'\x00\x01\x00\x08\x10\x00\x00\x00' + \
'\x00\x02\x00\x10\x00\x00\x00\x04' + \
'\x00\x05\x00\x06\x00\x09\x00\x0b' + \
'\x00\x03\x00\x08\x00\x00\x13\x88' + \
'\x00\x04\x00\x04' + \
'\x00\x05\x00\x14' + \
'\x00\x0b\x00\x0e' + \
'\x74\x65\x73\x74\x20\x68\x6f\x73\x74\x00\x00\x00' + \
'\x00\x06\x00\x08\xff\x00\x00\x04' + \
'\x00\x07\x00\x04' + \
'\x00\x08\x00\x08\xff\xff\x00\x04' + \
'\x00\x09\x00\x08\x00\x01\xe2\x40' + \
'\x00\x0a\x00\x04' + \
'\x00\x0b\x00\x0c' + \
'\x00\x05\x00\x08\xc0\xa8\x01\x01' + \
'\x00\x0c\x00\x13' + \
'\x4b\x65\x79\x20\x49\x6e\x74\x65' + \
'\x72\x72\x75\x70\x74\x2e\x00\x00' + \
'\x00\x0d\x00\x14' + \
'\x55\x6e\x6b\x6e\x6f\x77\x6e\x20' + \
'\x72\x65\x61\x73\x6f\x6e\x2e\x00'
def setUp_with_cookie_echo(self):
self.flags = 0
self.length = 8
self.cookie = '\x12\x34\x56\x78'
self.cookie_echo = sctp.chunk_cookie_echo(cookie=self.cookie)
self.chunks = [self.cookie_echo]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x0a\x00\x00\x08\x12\x34\x56\x78'
def setUp_with_cookie_ack(self):
self.flags = 0
self.length = 4
self.cookie_ack = sctp.chunk_cookie_ack()
self.chunks = [self.cookie_ack]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x0b\x00\x00\x04'
def setUp_with_ecn_echo(self):
self.flags = 0
self.length = 8
self.low_tsn = 123456
self.ecn_echo = sctp.chunk_ecn_echo(low_tsn=self.low_tsn)
self.chunks = [self.ecn_echo]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x0c\x00\x00\x08\x00\x01\xe2\x40'
def setUp_with_cwr(self):
self.flags = 0
self.length = 8
self.low_tsn = 123456
self.cwr = sctp.chunk_cwr(low_tsn=self.low_tsn)
self.chunks = [self.cwr]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x0d\x00\x00\x08\x00\x01\xe2\x40'
def setUp_with_shutdown_complete(self):
self.tflag = 0
self.length = 4
self.shutdown_complete = sctp.chunk_shutdown_complete()
self.chunks = [self.shutdown_complete]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x0e\x00\x00\x04'
def setUp_with_multi_chunks(self):
self.s_flags = 0
self.s_length = 16
self.s_tsn_ack = 123456
self.s_a_rwnd = 9876
self.s_gapack_num = 0
self.s_duptsn_num = 0
self.s_gapacks = None
self.s_duptsns = None
self.sack = sctp.chunk_sack(
tsn_ack=self.s_tsn_ack, a_rwnd=self.s_a_rwnd)
self.d1_unordered = 0
self.d1_begin = 1
self.d1_end = 0
self.d1_length = 16 + 10
self.d1_tsn = 12345
self.d1_sid = 1
self.d1_seq = 0
self.d1_payload_id = 0
self.d1_payload_data = '\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a'
self.data1 = sctp.chunk_data(
begin=self.d1_begin, tsn=self.d1_tsn, sid=self.d1_sid,
payload_data=self.d1_payload_data)
self.d2_unordered = 0
self.d2_begin = 0
self.d2_end = 1
self.d2_length = 16 + 10
self.d2_tsn = 12346
self.d2_sid = 1
self.d2_seq = 1
self.d2_payload_id = 0
self.d2_payload_data = '\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a'
self.data2 = sctp.chunk_data(
end=self.d2_end, tsn=self.d2_tsn, sid=self.d2_sid,
seq=self.d2_seq, payload_data=self.d2_payload_data)
self.chunks = [self.sack, self.data1, self.data2]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += '\x03\x00\x00\x10\x00\x01\xe2\x40' + \
'\x00\x00\x26\x94\x00\x00\x00\x00' + \
'\x00\x02\x00\x1a\x00\x00\x30\x39\x00\x01\x00\x00' + \
'\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a' + \
'\x00\x01\x00\x1a\x00\x00\x30\x3a\x00\x01\x00\x01' + \
'\x00\x00\x00\x00\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a'
def tearDown(self):
pass
def test_init(self):
eq_(self.src_port, self.sc.src_port)
eq_(self.dst_port, self.sc.dst_port)
eq_(self.vtag, self.sc.vtag)
eq_(self.csum, self.sc.csum)
eq_(self.chunks, self.sc.chunks)
def test_init_with_data(self):
self.setUp_with_data()
self.test_init()
def test_init_with_init(self):
self.setUp_with_init()
self.test_init()
def test_init_with_init_ack(self):
self.setUp_with_init_ack()
self.test_init()
def test_init_with_sack(self):
self.setUp_with_sack()
self.test_init()
def test_init_with_heartbeat(self):
self.setUp_with_heartbeat()
self.test_init()
def test_init_with_heartbeat_ack(self):
self.setUp_with_heartbeat_ack()
self.test_init()
def test_init_with_abort(self):
self.setUp_with_abort()
self.test_init()
def test_init_with_shutdown(self):
self.setUp_with_shutdown()
self.test_init()
def test_init_with_shutdown_ack(self):
self.setUp_with_shutdown_ack()
self.test_init()
def test_init_with_error(self):
self.setUp_with_error()
self.test_init()
def test_init_with_cookie_echo(self):
self.setUp_with_cookie_echo()
self.test_init()
def test_init_with_cookie_ack(self):
self.setUp_with_cookie_ack()
self.test_init()
def test_init_with_ecn_echo(self):
self.setUp_with_ecn_echo()
self.test_init()
def test_init_with_cwr(self):
self.setUp_with_cwr()
self.test_init()
def test_init_with_shutdown_complete(self):
self.setUp_with_shutdown_complete()
self.test_init()
def test_init_with_multi_chunks(self):
self.setUp_with_multi_chunks()
self.test_init()
def test_parser(self):
_res = self.sc.parser(str(self.buf))
if type(_res) is tuple:
res = _res[0]
else:
res = _res
# to calculate the lengths of parameters.
self.sc.serialize(None, None)
eq_(self.src_port, res.src_port)
eq_(self.dst_port, res.dst_port)
eq_(self.vtag, res.vtag)
eq_(self.csum, res.csum)
eq_(str(self.chunks), str(res.chunks))
def test_parser_with_data(self):
self.setUp_with_data()
self.test_parser()
def test_parser_with_init(self):
self.setUp_with_init()
self.test_parser()
def test_parser_with_init_ack(self):
self.setUp_with_init_ack()
self.test_parser()
def test_parser_with_sack(self):
self.setUp_with_sack()
self.test_parser()
def test_parser_with_heartbeat(self):
self.setUp_with_heartbeat()
self.test_parser()
def test_parser_with_heartbeat_ack(self):
self.setUp_with_heartbeat_ack()
self.test_parser()
def test_parser_with_abort(self):
self.setUp_with_abort()
self.test_parser()
def test_parser_with_shutdown(self):
self.setUp_with_shutdown()
self.test_parser()
def test_parser_with_shutdown_ack(self):
self.setUp_with_shutdown_ack()
self.test_parser()
def test_parser_with_error(self):
self.setUp_with_error()
self.test_parser()
def test_parser_with_cookie_echo(self):
self.setUp_with_cookie_echo()
self.test_parser()
def test_parser_with_cookie_ack(self):
self.setUp_with_cookie_ack()
self.test_parser()
def test_parser_with_ecn_echo(self):
self.setUp_with_ecn_echo()
self.test_parser()
def test_parser_with_cwr(self):
self.setUp_with_cwr()
self.test_parser()
def test_parser_with_shutdown_complete(self):
self.setUp_with_shutdown_complete()
self.test_parser()
def test_parser_with_multi_chunks(self):
self.setUp_with_multi_chunks()
self.test_parser()
def _test_serialize(self):
buf = self.sc.serialize(bytearray(), None)
res = struct.unpack_from(sctp.sctp._PACK_STR, buf)
eq_(self.src_port, res[0])
eq_(self.dst_port, res[1])
eq_(self.vtag, res[2])
# skip compare checksum
#eq_(self.csum, res[3])
return buf[sctp.sctp._MIN_LEN:]
def test_serialize(self):
self._test_serialize()
def test_serialize_with_data(self):
self.setUp_with_data()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_data._PACK_STR, buf)
eq_(sctp.chunk_data.chunk_type(), res[0])
flags = (
(self.unordered << 2) |
(self.begin << 1) |
(self.end << 0))
eq_(flags, res[1])
eq_(self.length, res[2])
eq_(self.tsn, res[3])
eq_(self.sid, res[4])
eq_(self.seq, res[5])
eq_(self.payload_id, res[6])
eq_(self.payload_data, buf[sctp.chunk_data._MIN_LEN:])
def test_serialize_with_init(self):
self.setUp_with_init()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_init._PACK_STR, buf)
eq_(sctp.chunk_init.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
eq_(self.init_tag, res[3])
eq_(self.a_rwnd, res[4])
eq_(self.os, res[5])
eq_(self.mis, res[6])
eq_(self.i_tsn, res[7])
buf = buf[sctp.chunk_init._MIN_LEN:]
res1 = struct.unpack_from(sctp.param_ipv4._PACK_STR, buf)
eq_(sctp.param_ipv4.param_type(), res1[0])
eq_(8, res1[1])
eq_('192.168.1.1', addrconv.ipv4.bin_to_text(
buf[sctp.param_ipv4._MIN_LEN:sctp.param_ipv4._MIN_LEN + 4]))
buf = buf[8:]
res2 = struct.unpack_from(sctp.param_ipv6._PACK_STR, buf)
eq_(sctp.param_ipv6.param_type(), res2[0])
eq_(20, res2[1])
eq_('fe80::647e:1aff:fec4:8284', addrconv.ipv6.bin_to_text(
buf[sctp.param_ipv6._MIN_LEN:sctp.param_ipv6._MIN_LEN + 16]))
buf = buf[20:]
res3 = struct.unpack_from(sctp.param_cookie_preserve._PACK_STR,
buf)
eq_(sctp.param_cookie_preserve.param_type(), res3[0])
eq_(8, res3[1])
eq_(5000, res3[2])
buf = buf[8:]
res4 = struct.unpack_from(sctp.param_ecn._PACK_STR, buf)
eq_(sctp.param_ecn.param_type(), res4[0])
eq_(4, res4[1])
buf = buf[4:]
res5 = struct.unpack_from(sctp.param_host_addr._PACK_STR, buf)
eq_(sctp.param_host_addr.param_type(), res5[0])
eq_(14, res5[1])
eq_('test host\x00',
buf[sctp.param_host_addr._MIN_LEN:
sctp.param_host_addr._MIN_LEN + 10])
buf = buf[16:]
res6 = struct.unpack_from(sctp.param_supported_addr._PACK_STR, buf)
res6 = list(res6)
eq_(sctp.param_supported_addr.param_type(), res6[0])
eq_(14, res6[1])
buf = buf[sctp.param_supported_addr._MIN_LEN:]
offset = 0
tmplist = []
while offset < len(buf):
(tmp, ) = struct.unpack_from('!H', buf, offset)
tmplist.append(tmp)
offset += struct.calcsize('!H')
res6.extend(tmplist)
eq_(sctp.PTYPE_IPV4, res6[2])
eq_(sctp.PTYPE_IPV6, res6[3])
eq_(sctp.PTYPE_COOKIE_PRESERVE, res6[4])
eq_(sctp.PTYPE_ECN, res6[5])
eq_(sctp.PTYPE_HOST_ADDR, res6[6])
def test_serialize_with_init_ack(self):
self.setUp_with_init_ack()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_init_ack._PACK_STR, buf)
eq_(sctp.chunk_init_ack.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
eq_(self.init_tag, res[3])
eq_(self.a_rwnd, res[4])
eq_(self.os, res[5])
eq_(self.mis, res[6])
eq_(self.i_tsn, res[7])
buf = buf[sctp.chunk_init_ack._MIN_LEN:]
res1 = struct.unpack_from(sctp.param_state_cookie._PACK_STR, buf)
eq_(sctp.param_state_cookie.param_type(), res1[0])
eq_(7, res1[1])
eq_('\x01\x02\x03',
buf[sctp.param_state_cookie._MIN_LEN:
sctp.param_state_cookie._MIN_LEN + 3])
buf = buf[8:]
res2 = struct.unpack_from(sctp.param_ipv4._PACK_STR, buf)
eq_(sctp.param_ipv4.param_type(), res2[0])
eq_(8, res2[1])
eq_('192.168.1.1', addrconv.ipv4.bin_to_text(
buf[sctp.param_ipv4._MIN_LEN:sctp.param_ipv4._MIN_LEN + 4]))
buf = buf[8:]
res3 = struct.unpack_from(sctp.param_ipv6._PACK_STR, buf)
eq_(sctp.param_ipv6.param_type(), res3[0])
eq_(20, res3[1])
eq_('fe80::647e:1aff:fec4:8284', addrconv.ipv6.bin_to_text(
buf[sctp.param_ipv6._MIN_LEN:sctp.param_ipv6._MIN_LEN + 16]))
buf = buf[20:]
res4 = struct.unpack_from(
sctp.param_unrecognized_param._PACK_STR, buf)
eq_(sctp.param_unrecognized_param.param_type(), res4[0])
eq_(8, res4[1])
eq_('\xff\xff\x00\x04',
buf[sctp.param_unrecognized_param._MIN_LEN:
sctp.param_unrecognized_param._MIN_LEN + 4])
buf = buf[8:]
res5 = struct.unpack_from(sctp.param_ecn._PACK_STR, buf)
eq_(sctp.param_ecn.param_type(), res5[0])
eq_(4, res5[1])
buf = buf[4:]
res6 = struct.unpack_from(sctp.param_host_addr._PACK_STR, buf)
eq_(sctp.param_host_addr.param_type(), res6[0])
eq_(14, res6[1])
eq_('test host\x00',
buf[sctp.param_host_addr._MIN_LEN:
sctp.param_host_addr._MIN_LEN + 10])
def test_serialize_with_sack(self):
self.setUp_with_sack()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_sack._PACK_STR, buf)
eq_(sctp.chunk_sack.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
eq_(self.tsn_ack, res[3])
eq_(self.a_rwnd, res[4])
eq_(self.gapack_num, res[5])
eq_(self.duptsn_num, res[6])
buf = buf[sctp.chunk_sack._MIN_LEN:]
gapacks = []
for _ in range(self.gapack_num):
(gap_s, gap_e) = struct.unpack_from(
sctp.chunk_sack._GAPACK_STR, buf)
one = [gap_s, gap_e]
gapacks.append(one)
buf = buf[sctp.chunk_sack._GAPACK_LEN:]
duptsns = []
for _ in range(self.duptsn_num):
(duptsn, ) = struct.unpack_from(
sctp.chunk_sack._DUPTSN_STR, buf)
duptsns.append(duptsn)
buf = buf[sctp.chunk_sack._DUPTSN_LEN:]
eq_(self.gapacks, gapacks)
eq_(self.duptsns, duptsns)
def test_serialize_with_heartbeat(self):
self.setUp_with_heartbeat()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_heartbeat._PACK_STR, buf)
eq_(sctp.chunk_heartbeat.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
buf = buf[sctp.chunk_heartbeat._MIN_LEN:]
res1 = struct.unpack_from(sctp.param_heartbeat._PACK_STR, buf)
eq_(sctp.param_heartbeat.param_type(), res1[0])
eq_(8, res1[1])
eq_('\x01\x02\x03\x04',
buf[sctp.param_heartbeat._MIN_LEN:
sctp.param_heartbeat._MIN_LEN + 4])
def test_serialize_with_heartbeat_ack(self):
self.setUp_with_heartbeat_ack()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_heartbeat_ack._PACK_STR, buf)
eq_(sctp.chunk_heartbeat_ack.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
buf = buf[sctp.chunk_heartbeat_ack._MIN_LEN:]
res1 = struct.unpack_from(sctp.param_heartbeat._PACK_STR, buf)
eq_(sctp.param_heartbeat.param_type(), res1[0])
eq_(12, res1[1])
eq_('\xff\xee\xdd\xcc\xbb\xaa\x99\x88',
buf[sctp.param_heartbeat._MIN_LEN:
sctp.param_heartbeat._MIN_LEN + 8])
def test_serialize_with_abort(self):
self.setUp_with_abort()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_abort._PACK_STR, buf)
eq_(sctp.chunk_abort.chunk_type(), res[0])
flags = self.tflag << 0
eq_(flags, res[1])
eq_(self.length, res[2])
buf = buf[sctp.chunk_abort._MIN_LEN:]
res1 = struct.unpack_from(sctp.cause_invalid_stream_id._PACK_STR, buf)
eq_(sctp.cause_invalid_stream_id.cause_code(), res1[0])
eq_(8, res1[1])
eq_(4096, res1[2])
buf = buf[8:]
res2 = struct.unpack_from(sctp.cause_missing_param._PACK_STR, buf)
eq_(sctp.cause_missing_param.cause_code(), res2[0])
eq_(16, res2[1])
eq_(4, res2[2])
types = []
for count in range(4):
(tmp, ) = struct.unpack_from(
'!H', buf, sctp.cause_missing_param._MIN_LEN + 2 * count)
types.append(tmp)
eq_(str([sctp.PTYPE_IPV4, sctp.PTYPE_IPV6,
sctp.PTYPE_COOKIE_PRESERVE, sctp.PTYPE_HOST_ADDR]),
str(types))
buf = buf[16:]
res3 = struct.unpack_from(sctp.cause_stale_cookie._PACK_STR, buf)
eq_(sctp.cause_stale_cookie.cause_code(), res3[0])
eq_(8, res3[1])
eq_('\x00\x00\x13\x88',
buf[sctp.cause_stale_cookie._MIN_LEN:
sctp.cause_stale_cookie._MIN_LEN + 4])
buf = buf[8:]
res4 = struct.unpack_from(sctp.cause_out_of_resource._PACK_STR, buf)
eq_(sctp.cause_out_of_resource.cause_code(), res4[0])
eq_(4, res4[1])
buf = buf[4:]
res5 = struct.unpack_from(
sctp.cause_unresolvable_addr._PACK_STR, buf)
eq_(sctp.cause_unresolvable_addr.cause_code(), res5[0])
eq_(20, res5[1])
eq_('\x00\x0b\x00\x0e\x74\x65\x73\x74' +
'\x20\x68\x6f\x73\x74\x00\x00\x00',
buf[sctp.cause_unresolvable_addr._MIN_LEN:
sctp.cause_unresolvable_addr._MIN_LEN + 16])
buf = buf[20:]
res6 = struct.unpack_from(
sctp.cause_unrecognized_chunk._PACK_STR, buf)
eq_(sctp.cause_unrecognized_chunk.cause_code(), res6[0])
eq_(8, res6[1])
eq_('\xff\x00\x00\x04',
buf[sctp.cause_unrecognized_chunk._MIN_LEN:
sctp.cause_unrecognized_chunk._MIN_LEN + 4])
buf = buf[8:]
res7 = struct.unpack_from(sctp.cause_invalid_param._PACK_STR, buf)
eq_(sctp.cause_invalid_param.cause_code(), res7[0])
eq_(4, res7[1])
buf = buf[4:]
res8 = struct.unpack_from(
sctp.cause_unrecognized_param._PACK_STR, buf)
eq_(sctp.cause_unrecognized_param.cause_code(), res8[0])
eq_(8, res8[1])
eq_('\xff\xff\x00\x04',
buf[sctp.cause_unrecognized_param._MIN_LEN:
sctp.cause_unrecognized_param._MIN_LEN + 4])
buf = buf[8:]
res9 = struct.unpack_from(sctp.cause_no_userdata._PACK_STR, buf)
eq_(sctp.cause_no_userdata.cause_code(), res9[0])
eq_(8, res9[1])
eq_('\x00\x01\xe2\x40',
buf[sctp.cause_no_userdata._MIN_LEN:
sctp.cause_no_userdata._MIN_LEN + 4])
buf = buf[8:]
res10 = struct.unpack_from(
sctp.cause_cookie_while_shutdown._PACK_STR, buf)
eq_(sctp.cause_cookie_while_shutdown.cause_code(), res10[0])
eq_(4, res10[1])
buf = buf[4:]
res11 = struct.unpack_from(
sctp.cause_restart_with_new_addr._PACK_STR, buf)
eq_(sctp.cause_restart_with_new_addr.cause_code(), res11[0])
eq_(12, res11[1])
eq_('\x00\x05\x00\x08\xc0\xa8\x01\x01',
buf[sctp.cause_restart_with_new_addr._MIN_LEN:
sctp.cause_restart_with_new_addr._MIN_LEN + 8])
buf = buf[12:]
res12 = struct.unpack_from(
sctp.cause_user_initiated_abort._PACK_STR, buf)
eq_(sctp.cause_user_initiated_abort.cause_code(), res12[0])
eq_(19, res12[1])
eq_('Key Interrupt.\x00',
buf[sctp.cause_user_initiated_abort._MIN_LEN:
sctp.cause_user_initiated_abort._MIN_LEN + 15])
buf = buf[20:]
res13 = struct.unpack_from(
sctp.cause_protocol_violation._PACK_STR, buf)
eq_(sctp.cause_protocol_violation.cause_code(), res13[0])
eq_(20, res13[1])
eq_('Unknown reason.\x00',
buf[sctp.cause_protocol_violation._MIN_LEN:
sctp.cause_protocol_violation._MIN_LEN + 16])
def test_serialize_with_shutdown(self):
self.setUp_with_shutdown()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_shutdown._PACK_STR, buf)
eq_(sctp.chunk_shutdown.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
eq_(self.tsn_ack, res[3])
def test_serialize_with_shutdown_ack(self):
self.setUp_with_shutdown_ack()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_shutdown_ack._PACK_STR, buf)
eq_(sctp.chunk_shutdown_ack.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
def test_serialize_with_error(self):
self.setUp_with_error()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_error._PACK_STR, buf)
eq_(sctp.chunk_error.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
buf = buf[sctp.chunk_error._MIN_LEN:]
res1 = struct.unpack_from(sctp.cause_invalid_stream_id._PACK_STR, buf)
eq_(sctp.cause_invalid_stream_id.cause_code(), res1[0])
eq_(8, res1[1])
eq_(4096, res1[2])
buf = buf[8:]
res2 = struct.unpack_from(sctp.cause_missing_param._PACK_STR, buf)
eq_(sctp.cause_missing_param.cause_code(), res2[0])
eq_(16, res2[1])
eq_(4, res2[2])
types = []
for count in range(4):
(tmp, ) = struct.unpack_from(
'!H', buf, sctp.cause_missing_param._MIN_LEN + 2 * count)
types.append(tmp)
eq_(str([sctp.PTYPE_IPV4, sctp.PTYPE_IPV6,
sctp.PTYPE_COOKIE_PRESERVE, sctp.PTYPE_HOST_ADDR]),
str(types))
buf = buf[16:]
res3 = struct.unpack_from(sctp.cause_stale_cookie._PACK_STR, buf)
eq_(sctp.cause_stale_cookie.cause_code(), res3[0])
eq_(8, res3[1])
eq_('\x00\x00\x13\x88',
buf[sctp.cause_stale_cookie._MIN_LEN:
sctp.cause_stale_cookie._MIN_LEN + 4])
buf = buf[8:]
res4 = struct.unpack_from(sctp.cause_out_of_resource._PACK_STR, buf)
eq_(sctp.cause_out_of_resource.cause_code(), res4[0])
eq_(4, res4[1])
buf = buf[4:]
res5 = struct.unpack_from(
sctp.cause_unresolvable_addr._PACK_STR, buf)
eq_(sctp.cause_unresolvable_addr.cause_code(), res5[0])
eq_(20, res5[1])
eq_('\x00\x0b\x00\x0e\x74\x65\x73\x74' +
'\x20\x68\x6f\x73\x74\x00\x00\x00',
buf[sctp.cause_unresolvable_addr._MIN_LEN:
sctp.cause_unresolvable_addr._MIN_LEN + 16])
buf = buf[20:]
res6 = struct.unpack_from(
sctp.cause_unrecognized_chunk._PACK_STR, buf)
eq_(sctp.cause_unrecognized_chunk.cause_code(), res6[0])
eq_(8, res6[1])
eq_('\xff\x00\x00\x04',
buf[sctp.cause_unrecognized_chunk._MIN_LEN:
sctp.cause_unrecognized_chunk._MIN_LEN + 4])
buf = buf[8:]
res7 = struct.unpack_from(sctp.cause_invalid_param._PACK_STR, buf)
eq_(sctp.cause_invalid_param.cause_code(), res7[0])
eq_(4, res7[1])
buf = buf[4:]
res8 = struct.unpack_from(
sctp.cause_unrecognized_param._PACK_STR, buf)
eq_(sctp.cause_unrecognized_param.cause_code(), res8[0])
eq_(8, res8[1])
eq_('\xff\xff\x00\x04',
buf[sctp.cause_unrecognized_param._MIN_LEN:
sctp.cause_unrecognized_param._MIN_LEN + 4])
buf = buf[8:]
res9 = struct.unpack_from(sctp.cause_no_userdata._PACK_STR, buf)
eq_(sctp.cause_no_userdata.cause_code(), res9[0])
eq_(8, res9[1])
eq_('\x00\x01\xe2\x40',
buf[sctp.cause_no_userdata._MIN_LEN:
sctp.cause_no_userdata._MIN_LEN + 4])
buf = buf[8:]
res10 = struct.unpack_from(
sctp.cause_cookie_while_shutdown._PACK_STR, buf)
eq_(sctp.cause_cookie_while_shutdown.cause_code(), res10[0])
eq_(4, res10[1])
buf = buf[4:]
res11 = struct.unpack_from(
sctp.cause_restart_with_new_addr._PACK_STR, buf)
eq_(sctp.cause_restart_with_new_addr.cause_code(), res11[0])
eq_(12, res11[1])
eq_('\x00\x05\x00\x08\xc0\xa8\x01\x01',
buf[sctp.cause_restart_with_new_addr._MIN_LEN:
sctp.cause_restart_with_new_addr._MIN_LEN + 8])
buf = buf[12:]
res12 = struct.unpack_from(
sctp.cause_user_initiated_abort._PACK_STR, buf)
eq_(sctp.cause_user_initiated_abort.cause_code(), res12[0])
eq_(19, res12[1])
eq_('Key Interrupt.\x00',
buf[sctp.cause_user_initiated_abort._MIN_LEN:
sctp.cause_user_initiated_abort._MIN_LEN + 15])
buf = buf[20:]
res13 = struct.unpack_from(
sctp.cause_protocol_violation._PACK_STR, buf)
eq_(sctp.cause_protocol_violation.cause_code(), res13[0])
eq_(20, res13[1])
eq_('Unknown reason.\x00',
buf[sctp.cause_protocol_violation._MIN_LEN:
sctp.cause_protocol_violation._MIN_LEN + 16])
def test_serialize_with_cookie_echo(self):
self.setUp_with_cookie_echo()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_cookie_echo._PACK_STR, buf)
eq_(sctp.chunk_cookie_echo.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
eq_(self.cookie,
buf[sctp.chunk_cookie_echo._MIN_LEN:
sctp.chunk_cookie_echo._MIN_LEN + 4])
def test_serialize_with_cookie_ack(self):
self.setUp_with_cookie_ack()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_cookie_ack._PACK_STR, buf)
eq_(sctp.chunk_cookie_ack.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
def test_serialize_with_ecn_echo(self):
self.setUp_with_ecn_echo()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_ecn_echo._PACK_STR, buf)
eq_(sctp.chunk_ecn_echo.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
eq_(self.low_tsn, res[3])
def test_serialize_with_cwr(self):
self.setUp_with_cwr()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_cwr._PACK_STR, buf)
eq_(sctp.chunk_cwr.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
eq_(self.low_tsn, res[3])
def test_serialize_with_shutdown_complete(self):
self.setUp_with_shutdown_complete()
buf = self._test_serialize()
res = struct.unpack_from(
sctp.chunk_shutdown_complete._PACK_STR, buf)
eq_(sctp.chunk_shutdown_complete.chunk_type(), res[0])
flags = self.tflag << 0
eq_(flags, res[1])
eq_(self.length, res[2])
def test_serialize_with_multi_chunks(self):
self.setUp_with_multi_chunks()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_sack._PACK_STR, buf)
eq_(sctp.chunk_sack.chunk_type(), res[0])
eq_(self.s_flags, res[1])
eq_(self.s_length, res[2])
eq_(self.s_tsn_ack, res[3])
eq_(self.s_a_rwnd, res[4])
eq_(self.s_gapack_num, res[5])
eq_(self.s_duptsn_num, res[6])
buf = buf[self.s_length:]
res = struct.unpack_from(sctp.chunk_data._PACK_STR, buf)
eq_(sctp.chunk_data.chunk_type(), res[0])
d1_flags = (
(self.d1_unordered << 2) |
(self.d1_begin << 1) |
(self.d1_end << 0))
eq_(d1_flags, res[1])
eq_(self.d1_length, res[2])
eq_(self.d1_tsn, res[3])
eq_(self.d1_sid, res[4])
eq_(self.d1_seq, res[5])
eq_(self.d1_payload_id, res[6])
eq_(self.d1_payload_data,
buf[sctp.chunk_data._MIN_LEN:
sctp.chunk_data._MIN_LEN + 10])
buf = buf[self.d1_length:]
res = struct.unpack_from(sctp.chunk_data._PACK_STR, buf)
eq_(sctp.chunk_data.chunk_type(), res[0])
d2_flags = (
(self.d2_unordered << 2) |
(self.d2_begin << 1) |
(self.d2_end << 0))
eq_(d2_flags, res[1])
eq_(self.d2_length, res[2])
eq_(self.d2_tsn, res[3])
eq_(self.d2_sid, res[4])
eq_(self.d2_seq, res[5])
eq_(self.d2_payload_id, res[6])
eq_(self.d2_payload_data,
buf[sctp.chunk_data._MIN_LEN:
sctp.chunk_data._MIN_LEN + 10])
def test_build_sctp(self):
eth = ethernet.ethernet('00:aa:aa:aa:aa:aa', '00:bb:bb:bb:bb:bb',
ether.ETH_TYPE_IP)
ip4 = ipv4.ipv4(4, 5, 16, 0, 0, 2, 0, 64, inet.IPPROTO_SCTP, 0,
'192.168.1.1', '10.144.1.1')
pkt = eth/ip4/self.sc
eth = pkt.get_protocol(ethernet.ethernet)
ok_(eth)
eq_(eth.ethertype, ether.ETH_TYPE_IP)
ip4 = pkt.get_protocol(ipv4.ipv4)
ok_(ip4)
eq_(ip4.proto, inet.IPPROTO_SCTP)
sc = pkt.get_protocol(sctp.sctp)
ok_(sc)
eq_(sc, self.sc)
def test_build_sctp_with_data(self):
self.setUp_with_data()
self.test_build_sctp()
def test_build_sctp_with_init(self):
self.setUp_with_init()
self.test_build_sctp()
def test_build_sctp_with_init_ack(self):
self.setUp_with_init_ack()
self.test_build_sctp()
def test_build_sctp_with_sack(self):
self.setUp_with_sack()
self.test_build_sctp()
def test_build_sctp_with_heartbeat(self):
self.setUp_with_heartbeat()
self.test_build_sctp()
def test_build_sctp_with_heartbeat_ack(self):
self.setUp_with_heartbeat_ack()
self.test_build_sctp()
def test_build_sctp_with_abort(self):
self.setUp_with_abort()
self.test_build_sctp()
def test_build_sctp_with_shutdown(self):
self.setUp_with_shutdown()
self.test_build_sctp()
def test_build_sctp_with_shutdown_ack(self):
self.setUp_with_shutdown_ack()
self.test_build_sctp()
def test_build_sctp_with_error(self):
self.setUp_with_error()
self.test_build_sctp()
def test_build_sctp_with_cookie_echo(self):
self.setUp_with_cookie_echo()
self.test_build_sctp()
def test_build_sctp_with_cookie_ack(self):
self.setUp_with_cookie_ack()
self.test_build_sctp()
def test_build_sctp_with_ecn_echo(self):
self.setUp_with_ecn_echo()
self.test_build_sctp()
def test_build_sctp_with_cwr(self):
self.setUp_with_cwr()
self.test_build_sctp()
def test_build_sctp_with_shutdown_complete(self):
self.setUp_with_shutdown_complete()
self.test_build_sctp()
def tset_build_sctp_with_multi_chunks(self):
self.setUp_with_multi_chunks()
self.test_build_sctp()
def test_to_string(self):
sctp_values = {'src_port': self.src_port,
'dst_port': self.dst_port,
'vtag': self.vtag,
'csum': self.csum,
'chunks': self.chunks}
_sctp_str = ','.join(['%s=%s' % (k, sctp_values[k])
for k, _ in inspect.getmembers(self.sc)
if k in sctp_values])
sctp_str = '%s(%s)' % (sctp.sctp.__name__, _sctp_str)
eq_(str(self.sc), sctp_str)
eq_(repr(self.sc), sctp_str)
def test_to_string_with_data(self):
self.setUp_with_data()
self.test_to_string()
def test_to_string_with_init(self):
self.setUp_with_init()
self.test_to_string()
def test_to_string_with_init_ack(self):
self.setUp_with_init_ack()
self.test_to_string()
def test_to_string_with_sack(self):
self.setUp_with_sack()
self.test_to_string()
def test_to_string_with_heartbeat(self):
self.setUp_with_heartbeat()
self.test_to_string()
def test_to_string_with_heartbeat_ack(self):
self.setUp_with_heartbeat_ack()
self.test_to_string()
def test_to_string_with_abort(self):
self.setUp_with_abort()
self.test_to_string()
def test_to_string_with_shutdown(self):
self.setUp_with_shutdown()
self.test_to_string()
def test_to_string_with_shutdown_ack(self):
self.setUp_with_shutdown_ack()
self.test_to_string()
def test_to_string_with_error(self):
self.setUp_with_error()
self.test_to_string()
def test_to_string_with_cookie_echo(self):
self.setUp_with_cookie_echo()
self.test_to_string()
def test_to_string_with_cookie_ack(self):
self.setUp_with_cookie_ack()
self.test_to_string()
def test_to_string_with_ecn_echo(self):
self.setUp_with_ecn_echo()
self.test_to_string()
def test_to_string_with_cwr(self):
self.setUp_with_cwr()
self.test_to_string()
def test_to_string_with_shutdown_complete(self):
self.setUp_with_shutdown_complete()
self.test_to_string()
def test_to_string_with_multi_chunks(self):
self.setUp_with_multi_chunks()
self.test_to_string()
def test_json(self):
jsondict = self.sc.to_jsondict()
sc = sctp.sctp.from_jsondict(jsondict['sctp'])
eq_(str(self.sc), str(sc))
def test_json_with_data(self):
self.setUp_with_data()
self.test_json()
def test_json_with_init(self):
self.setUp_with_init()
self.test_json()
def test_json_with_init_ack(self):
self.setUp_with_init_ack()
self.test_json()
def test_json_with_sack(self):
self.setUp_with_sack()
self.test_json()
def test_json_with_heartbeat(self):
self.setUp_with_heartbeat()
self.test_json()
def test_json_with_heartbeat_ack(self):
self.setUp_with_heartbeat_ack()
self.test_json()
def test_json_with_abort(self):
self.setUp_with_abort()
self.test_json()
def test_json_with_shutdown(self):
self.setUp_with_shutdown()
self.test_json()
def test_json_with_shutdown_ack(self):
self.setUp_with_shutdown_ack()
self.test_json()
def test_json_with_error(self):
self.setUp_with_error()
self.test_json()
def test_json_with_cookie_echo(self):
self.setUp_with_cookie_echo()
self.test_json()
def test_json_with_cookie_ack(self):
self.setUp_with_cookie_ack()
self.test_json()
def test_json_with_ecn_echo(self):
self.setUp_with_ecn_echo()
self.test_json()
def test_json_with_cwr(self):
self.setUp_with_cwr()
self.test_json()
def test_json_with_shutdown_complete(self):
self.setUp_with_shutdown_complete()
self.test_json()
def test_json_with_multi_chunks(self):
self.setUp_with_multi_chunks()
self.test_json()
| 34.453232
| 78
| 0.605689
| 7,182
| 50,095
| 3.895712
| 0.052632
| 0.032596
| 0.044605
| 0.058329
| 0.832553
| 0.792523
| 0.754673
| 0.727689
| 0.678366
| 0.608814
| 0
| 0.071563
| 0.268889
| 50,095
| 1,453
| 79
| 34.476944
| 0.692369
| 0.013255
| 0
| 0.592718
| 0
| 0.013548
| 0.081537
| 0.055451
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102456
| false
| 0.000847
| 0.011008
| 0
| 0.115157
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b4336d3ef323c89a0fff7cb7303fcc2deb60c1ca
| 103
|
py
|
Python
|
script.service.hue/resources/lib/qhue/__init__.py
|
toupeira/script.service.hue
|
c2811ef792b67084ab2e82c91aaa0947d9cb90d3
|
[
"MIT"
] | null | null | null |
script.service.hue/resources/lib/qhue/__init__.py
|
toupeira/script.service.hue
|
c2811ef792b67084ab2e82c91aaa0947d9cb90d3
|
[
"MIT"
] | null | null | null |
script.service.hue/resources/lib/qhue/__init__.py
|
toupeira/script.service.hue
|
c2811ef792b67084ab2e82c91aaa0947d9cb90d3
|
[
"MIT"
] | null | null | null |
from .qhue import Bridge, QhueException#, create_new_username
#from .qhue_remote import RemoteBridge
| 34.333333
| 62
| 0.825243
| 13
| 103
| 6.307692
| 0.769231
| 0.195122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116505
| 103
| 2
| 63
| 51.5
| 0.901099
| 0.563107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b44eacd7725398b4038249159a8891c620fa37bf
| 1,095
|
py
|
Python
|
Config/ConfigGetter.py
|
lkeme/NeteaseMusicLottery
|
5bae0eb78a786acad0b9fa2c7df1d51eed0796e9
|
[
"MIT"
] | 27
|
2019-03-11T10:03:46.000Z
|
2022-02-18T06:47:20.000Z
|
Config/ConfigGetter.py
|
lkeme/NeteaseMusicLottery
|
5bae0eb78a786acad0b9fa2c7df1d51eed0796e9
|
[
"MIT"
] | 3
|
2019-08-18T02:47:46.000Z
|
2022-02-18T08:14:29.000Z
|
Config/ConfigGetter.py
|
lkeme/NeteaseMusicLottery
|
5bae0eb78a786acad0b9fa2c7df1d51eed0796e9
|
[
"MIT"
] | 4
|
2019-07-29T04:43:02.000Z
|
2020-04-16T04:44:28.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
- author: Lkeme
- contact: Useri@live.cn
- file: ConfigGetter
- time: 2019/10/21 18:01
- desc:
"""
from Util import LazyProperty
from Config.setting import *
class ConfigGetter(object):
"""
get config
"""
def __init__(self):
pass
@LazyProperty
def db_name(self):
return DATABASES.get("default", {}).get("DATABASE", "netease")
@LazyProperty
def db_host(self):
return DATABASES.get("default", {}).get("HOST", "localhost")
@LazyProperty
def db_port(self):
return DATABASES.get("default", {}).get("PORT", 3306)
@LazyProperty
def db_user(self):
return DATABASES.get("default", {}).get("USERNAME", "root")
@LazyProperty
def db_password(self):
return DATABASES.get("default", {}).get("PASSWORD", "123456")
@LazyProperty
def user_accounts(self):
return ACCOUNTS
@LazyProperty
def notification(self):
return NOTIFICATION
config = ConfigGetter()
if __name__ == '__main__':
print(config.user_accounts)
| 19.909091
| 70
| 0.621918
| 121
| 1,095
| 5.471074
| 0.46281
| 0.15861
| 0.128399
| 0.166163
| 0.241692
| 0.241692
| 0
| 0
| 0
| 0
| 0
| 0.027155
| 0.226484
| 1,095
| 54
| 71
| 20.277778
| 0.754427
| 0.136073
| 0
| 0.241379
| 0
| 0
| 0.109544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.275862
| false
| 0.103448
| 0.068966
| 0.241379
| 0.62069
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
b45b3fcf1047d1cf8502fe6f6a4bf3d750f5030d
| 117
|
py
|
Python
|
budget_tool/admin.py
|
joyliao07/budget_tool
|
a20974f47d5bfa8ef2ef285f57c7e1aafde42f29
|
[
"MIT"
] | null | null | null |
budget_tool/admin.py
|
joyliao07/budget_tool
|
a20974f47d5bfa8ef2ef285f57c7e1aafde42f29
|
[
"MIT"
] | 6
|
2019-01-22T03:54:53.000Z
|
2019-01-25T04:49:18.000Z
|
budget_tool/admin.py
|
joyliao07/budget_tool
|
a20974f47d5bfa8ef2ef285f57c7e1aafde42f29
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Budget, Transaction
admin.site.register((Budget, Transaction))
| 23.4
| 42
| 0.811966
| 15
| 117
| 6.333333
| 0.666667
| 0.357895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 117
| 4
| 43
| 29.25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
81fee41469c604b6f06973ce5c097a0bb370faa2
| 45
|
py
|
Python
|
psdet/models/point_detector/__init__.py
|
Jiaolong/gcn-parking-slot
|
f8c3b445b186e3a7fd13af1f17fa5ba0336027c7
|
[
"MIT"
] | 56
|
2021-03-24T08:24:27.000Z
|
2022-03-26T13:56:36.000Z
|
psdet/models/point_detector/__init__.py
|
Jiaolong/gcn-parking-slot
|
f8c3b445b186e3a7fd13af1f17fa5ba0336027c7
|
[
"MIT"
] | 7
|
2021-04-05T03:55:05.000Z
|
2022-03-08T03:12:20.000Z
|
psdet/models/point_detector/__init__.py
|
Jiaolong/gcn-parking-slot
|
f8c3b445b186e3a7fd13af1f17fa5ba0336027c7
|
[
"MIT"
] | 17
|
2021-04-04T02:42:09.000Z
|
2022-03-31T01:48:06.000Z
|
from .detector_base import PointDetectorBase
| 22.5
| 44
| 0.888889
| 5
| 45
| 7.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.95122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
81ff8f45d490cb1946bc863a3f74861b5ad5028e
| 170
|
py
|
Python
|
tests/web_platform/css_flexbox_1/test_flex_direction_row_vertical.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/css_flexbox_1/test_flex_direction_row_vertical.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/css_flexbox_1/test_flex_direction_row_vertical.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | 1
|
2020-01-16T01:56:41.000Z
|
2020-01-16T01:56:41.000Z
|
from tests.utils import W3CTestCase
class TestFlexDirectionRowVertical(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flex-direction-row-vertical'))
| 28.333333
| 82
| 0.811765
| 18
| 170
| 7.388889
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019231
| 0.082353
| 170
| 5
| 83
| 34
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.159763
| 0.159763
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c342784c81187a7c1a03f7efe6a3750b08a88a0e
| 34,517
|
py
|
Python
|
billiards/billiards/management/commands/importcoupons.py
|
zxkane/billiards
|
809a37b111a0fdbf7a2b1176149256b93c43045f
|
[
"Apache-1.1"
] | null | null | null |
billiards/billiards/management/commands/importcoupons.py
|
zxkane/billiards
|
809a37b111a0fdbf7a2b1176149256b93c43045f
|
[
"Apache-1.1"
] | null | null | null |
billiards/billiards/management/commands/importcoupons.py
|
zxkane/billiards
|
809a37b111a0fdbf7a2b1176149256b93c43045f
|
[
"Apache-1.1"
] | 1
|
2021-02-08T13:19:34.000Z
|
2021-02-08T13:19:34.000Z
|
# -*- coding: utf-8 -*-
# encoding: utf-8
'''
Created on 2014年2月19日
@author: kane
'''
from django.core.management.base import NoArgsCommand
from billiards.models import Coupon
import re
from datetime import datetime
class Command(NoArgsCommand):
help = 'Import coupons from hardcode to DB'
def handle(self, *args, **options):
coupons = [
{"pk":"25", "title":"团购:北京迈8赫台球会所, 17.8元,畅打两小时", "description":"17.8元,畅打两小时", "time":"2013年9月7日 至 2014年3月6日", "baidu_lat":"40.0135572", "baidu_lng":"116.4147791", "tel":"010-84802532", "address":"朝阳区安立路九台2000家园地下一层", "link":"http://bj.meituan.com/deal/9340453.html"},
{"pk":"70", "title":"团购:北京隆轩台球俱乐部(望京), 19.9元,畅打两小时", "description":"19.9元,畅打两小时", "time":"2013.10.19 至 2014.7.17", "baidu_lat":"39.9849635", "baidu_lng":"116.4750495", "tel":"010-64728646", "address":"北京望京花家地南里5号", "link":"http://bj.meituan.com/deal/7191716.html"},
{"pk":"95", "title":"团购:北京堂棒棒台球(朝外大街), 30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013.11.19 至 2014.2.18", "baidu_lat":"39.9257578", "baidu_lng":"116.4492575", "tel":"010-85694103", "address":"北京朝阳区日坛北路17号日坛国际贸易中心地下一层商业街星光大道北B088", "link":"http://bj.meituan.com/deal/8060768.html"},
{"pk":"54", "title":"团购:北京七星岛8号台球俱乐部(牡丹园/北太平庄),38元,畅打三小时", "description":"38元,畅打三小时", "time":"2014.1.6 至 2014.7.5", "baidu_lat":"39.9836187", "baidu_lng":"116.3743070", "tel":"15701004091", "address":"北京海淀区牡丹园翠微商场院内", "link":"http://bj.meituan.com/deal/9362028.html"},
{"pk":"92", "title":"团购:北京远望谷台球会所(紫竹桥),22元,畅打三小时", "description":"22元,畅打三小时", "time":"2013.11.13 至 2014.11.12", "baidu_lat":"39.9454621", "baidu_lng":"116.3203192", "tel":"010-62651088", "address":"北京海淀区西三环北路50号豪柏大厦C2座1-103室(紫竹桥东南角)", "link":"http://bj.meituan.com/deal/3654399.html"},
{"pk":"55", "title":"团购:北京奥亨台球(草桥/公益西桥),18元,畅打三小时", "description":"18元,畅打三小时", "time":"2013.8.20 至 2014.2.19", "baidu_lat":"39.8438451", "baidu_lng":"116.3769232", "tel":"010-67529905", "address":"北京丰台区马家堡西路星河苑2号院22号楼地下一层", "link":"http://bj.meituan.com/deal/7737048.html"},
{"pk":"30", "title":"团购:北京海格台球俱乐部(宣武门),19元,畅打两小时", "description":"19元,畅打两小时", "time":"2013.9.5 至 2014.3.31", "baidu_lat":"39.9009997", "baidu_lng":"116.3816211", "tel":"010-63183990", "address":"北京西城区宣武门外大街20号海格国际酒店地下2层", "link":"http://bj.meituan.com/deal/4211247.html"},
{"pk":"28", "title":"团购:北京潘晓婷台球俱乐部(潘家园),19.8元,畅打两小时", "description":"19.8元,畅打两小时", "time":"2013年10月22日-2014年03月12日", "baidu_lat":"39.8752954", "baidu_lng":"116.4658044", "tel":"010-65305655 & 010-67628288", "address":"北京朝阳区东三环南路辅路联合国际大厦地下一层", "link":"http://bj.nuomi.com/deal/obyoqflp.html"},
{"pk":"40", "title":"团购:北京球动力台球连锁俱乐部(立水桥),28.8元,畅打三小时", "description":"28.8元,畅打三小时", "time":"2013年11月20日至2014年02月20日", "baidu_lat":"40.0639378", "baidu_lng":"116.4217211", "tel":"010-57733777", "address":"北京昌平区立水桥明珠奥特莱斯中心广场地下一层 ", "link":"http://bj.nuomi.com/deal/fez5n2em.html"},
{"pk":"34", "title":"团购:北京领航者台球俱乐部(菜市口),17.9元,畅打一小时", "description":"17.9元,畅打一小时", "time":"2013年10月22日-2014年03月03日", "baidu_lat":"39.8885423", "baidu_lng":"116.3994264", "tel":"010-63013337", "address":"北京西城区东经路禄长街2条2号(速8天桥店B一层)", "link":"http://bj.nuomi.com/deal/7xqwdcl7.html"},
{"pk":"48", "title":"团购:北京昊天台球俱乐部(虎坊桥),19元,畅打两小时", "description":"19元,畅打两小时", "time":"2013年10月14日-2014年06月30日 ", "baidu_lat":"39.8896118", "baidu_lng":"116.3893745", "tel":"010-56155999", "address":"北京西城区虎坊路陶然北岸160-4号(近清华池)", "link":"http://bj.nuomi.com/deal/wt357lru.html"},
{"pk":"42", "title":"团购:北京寻梦港台球俱乐部(回龙观),35元,畅打两小时", "description":"35元,畅打两小时", "time":"2013年10月29日至2014年03月05日", "baidu_lat":"40.0847300", "baidu_lng":"116.3392966", "tel":"13601248756", "address":"北京昌平区回龙观镇回龙观西大街18号2段1层", "link":"http://bj.nuomi.com/deal/esp4nfrt.html"},
{"pk":"58", "title":"团购:北京猫头鹰台球俱乐部(酒仙桥),18.8元,畅打两小时", "description":"18.8元,畅打两小时", "time":"2013年10月18日-2014年04月15日", "baidu_lat":"39.9722142", "baidu_lng":"116.4973278", "tel":"010-51306858", "address":"北京朝阳区酒仙桥路26号晶都国际酒店B1楼", "link":"http://beijing.55tuan.com/goods-6a17a8a990c5df4d.html"},
{"pk":"33", "title":"团购:北京博睿夜时尚台球俱乐部(西城区),7.9元,畅打一小时", "description":"7.9元,畅打一小时", "time":"2014.1.9 至 2014.4.8", "baidu_lat":"39.8893789", "baidu_lng":"116.3578907", "tel":"010-52885044", "address":"北京西城区枣林前街145号(白纸坊桥向北200米辅路东)易尚诺林大酒店B1层", "link":"http://bj.meituan.com/deal/8829545.html"},
{"pk":"117", "title":"团购:北京高谷台球俱乐部(4店通用),19元,畅打两小时", "description":"19元,畅打两小时", "time":"2014.1.10 至 2014.4.9", "baidu_lat":"39.9738183", "baidu_lng":"116.4473156", "tel":"010-64220811", "address":"北京朝阳区西坝河北里7号院(国美电器院内)", "link":"http://bj.meituan.com/deal/6420513.html"},
{"pk":"116", "title":"团购:北京高谷台球俱乐部(4店通用),19元,畅打两小时", "description":"19元,畅打两小时", "time":"2014.1.10 至 2014.4.9", "baidu_lat":"39.9598277", "baidu_lng":"116.3877299", "tel":"010-82066296", "address":"北京市西城区德胜门外教场口街9号B1楼", "link":"http://bj.meituan.com/deal/6420513.html"},
{"pk":"115", "title":"团购:北京高谷台球俱乐部(4店通用),19元,畅打两小时", "description":"19元,畅打两小时", "time":"2014.1.10 至 2014.4.9", "baidu_lat":"39.9046925", "baidu_lng":"116.3848733", "tel":"010-63028058", "address":"北京西城区香炉营头条33号B1楼(庄胜崇光百货东侧)", "link":"http://bj.meituan.com/deal/6420513.html"},
{"pk":"80", "title":"团购:北京高谷台球俱乐部(4店通用),19元,畅打两小时", "description":"19元,畅打两小时", "time":"2014.1.10 至 2014.4.9", "baidu_lat":"39.8981556", "baidu_lng":"116.3481725", "tel":"010-63456958", "address":"北京宣武区广安门外小马厂路1号院2号楼3层", "link":"http://bj.meituan.com/deal/6420513.html"},
{"pk":"118", "title":"团购:北京东方球动力台球俱乐部(劲松),17.9元,畅打两小时", "description":"17.9元,畅打两小时", "time":"2013.6.14 至 2014.3.28", "baidu_lat":"39.8908983", "baidu_lng":"116.4687451", "tel":"010-58672882", "address":"北京朝阳区东三环南路58号院3号楼(劲松桥东北角富顿中心院内C座地下一层)", "link":"http://bj.meituan.com/deal/8338874.html"},
{"pk":"119", "title":"团购:北京天瞳世家桌球会所(建外大街),36元,畅打三小时+可乐雪碧任选两瓶", "description":"36元,畅打3小时+可乐雪碧任选2瓶", "time":"2014.2.8 至 2014.8.7", "baidu_lat":"39.906204", "baidu_lng":"116.443198", "tel":"010-85677899", "address":"北京朝阳区建外大街16号东方瑞景B1层", "link":"http://bj.meituan.com/deal/6732500.html"},
{"pk":"67", "title":"团购:北京海岸桌球俱乐部(中关村),9.9元,畅打一小时", "description":"9.9元,畅打一小时", "time":"2013.4.26 至 2014.6.30", "baidu_lat":"39.9877953", "baidu_lng":"116.3119912", "tel":"010-82569505", "address":"北京海淀区苏州街3号大河庄苑4号楼B1楼(近银科大厦)", "link":"http://bj.meituan.com/deal/9676285.html"},
{"pk":"120", "title":"团购:北京夜时尚台球俱乐部北土城店(安贞),22元,畅打三小时", "description":"22元,畅打三小时", "time":"2014.1.11 至 2014.4.10", "baidu_lat":"39.9792718", "baidu_lng":"116.3942505", "tel":"010-56126947", "address":"北京朝阳区裕民东里甲1号", "link":"http://bj.meituan.com/deal/3700379.html"},
{"pk":"121", "title":"团购:北京深度撞击台球俱乐部(紫竹桥),28元,畅打三小时", "description":"28元,畅打三小时", "time":"2013.4.10 至 2014.4.10", "baidu_lat":"39.9498499", "baidu_lng":"116.3101027", "tel":"010-88553318", "address":"北京海淀区紫竹院路88号紫竹花园F座B1楼", "link":"http://bj.meituan.com/deal/4901769.html"},
{"pk":"122", "title":"团购:北京球斯卡台球俱乐部朝外旗舰店(朝阳区),18元,畅打三小时", "description":"18元,畅打三小时", "time":"2013.11.27 至 2014.6.26", "baidu_lat":"39.9305889", "baidu_lng":"116.4447598", "tel":"010-65801148", "address":"北京朝阳区朝外大街19号华普大厦B1层", "link":"http://bj.meituan.com/deal/1938412.html"},
{"pk":"123", "title":"团购:北京夜时尚台球对外经贸店(对外经贸),38元,畅打三小时", "description":"38元,畅打三小时", "time":"2013.11.28 至 2014.2.27", "baidu_lat":"39.9828042", "baidu_lng":"116.4333529", "tel":"010-84639810", "address":"北京朝阳区芍药居元大都对外经贸大学南门对面", "link":"http://bj.meituan.com/deal/5776951.html"},
{"pk":"124", "title":"团购:北京8号炫酷台球俱乐部(上地),35.9元,畅打两小时", "description":"35.9元,畅打两小时", "time":"2013.10.28 至 2014.2.27", "baidu_lat":"40.0389396", "baidu_lng":"116.3255305", "tel":"010-82783058", "address":"北京海淀区上地佳园底商36号(上地城铁站对面)", "link":"http://bj.meituan.com/deal/7356822.html"},
{"pk":"125", "title":"团购:北京悠悠台球俱乐部(回龙观),19.9元,畅打两小时", "description":"19.9元,畅打两小时", "time":"2013.4.18 至 2014.3.15", "baidu_lat":"40.0781526", "baidu_lng":"116.3248362", "tel":"010-80779687", "address":"北京昌平区回龙观龙泽苑综合楼北配楼", "link":"http://bj.meituan.com/deal/4037338.html"},
{"pk":"126", "title":"团购:北京夜时尚台球俱乐部立水桥店(立水桥),38元,畅打两小时", "description":"38元,畅打两小时", "time":"2013.6.20 至 2014.4.8", "baidu_lat":"40.0539800", "baidu_lng":"116.4170132", "tel":"010-84671325", "address":"北京朝阳区立清路8号明天第一城7号院蓝黛时空汇B1楼(近立军路)", "link":"http://bj.meituan.com/deal/4322861.html"},
{"pk":"127", "title":"团购:北京技艺台球俱乐部(东城区),25元,畅打三小时", "description":"25元,畅打三小时", "time":"2013.5.8 至 2014.5.5", "baidu_lat":"39.9036978", "baidu_lng":"116.4399315", "tel":"18612883001", "address":"北京东城区东花市大街35号旁", "link":"http://bj.meituan.com/deal/4828388.html"},
{"pk":"128", "title":"团购:北京晟世台球俱乐部(和平里),8.8元,畅打一小时", "description":"8.8元,畅打一小时", "time":"2014.1.29 至 2014.4.28", "baidu_lat":"39.9636304", "baidu_lng":"116.4259469", "tel":"010-84217770", "address":"北京东城区和平里中街19号天元和平商业大厦B1楼", "link":"http://bj.meituan.com/deal/1664990.html"},
{"pk":"129", "title":"团购:北京酷塞台球俱乐部(黄村),29.9元,畅打两小时", "description":"29.9元,畅打两小时", "time":"2014.1.8 至 2014.6.7", "baidu_lat":"39.7592469", "baidu_lng":"116.3405489", "tel":"010-69248188", "address":"北京大兴区小营路北(聚莎苑酒店院内)", "link":"http://bj.meituan.com/deal/6145824.html"},
{"pk":"130", "title":"团购:北京万赢亿胜台球俱乐部(昌平),24元,畅打三小时", "description":"24元,畅打三小时", "time":"2013.9.7 至 2014.2.28", "baidu_lat":"40.2189029", "baidu_lng":"116.2549190", "tel":"010-80107398", "address":"北京昌平区南环东路32-5号(东关南里小区南门对面)", "link":"http://bj.meituan.com/deal/2937309.html"},
{"pk":"131", "title":"团购:北京天陆台球俱乐部(大钟寺),16元,畅打两小时", "description":"16元,畅打两小时", "time":"2013.8.6 至 2014.8.6", "baidu_lat":"39.9682577", "baidu_lng":"116.3490755", "tel":"010-62139909", "address":"北京海淀区四道口路甲5号文林大厦B2楼", "link":"http://bj.meituan.com/deal/5481053.html"},
{"pk":"132", "title":"团购:北京奥亨黑八台球俱乐部劲松店(劲松),8.8元,畅打八小时", "description":"8.8元,畅打八小时", "time":"2014.1.8 至 2014.4.7", "baidu_lat":"39.8885129", "baidu_lng":"116.4672264", "tel":"010-67739348", "address":"北京朝阳区劲松三区328楼地下一层(劲松地铁站D口加油站旁)", "link":"http://bj.meituan.com/deal/5381708.html"},
{"pk":"133", "title":"团购:北京球动力总部样板店(小营),19.8元,畅打两小时", "description":"19.8元,畅打两小时", "time":"2013.9.26 至 2014.4.24", "baidu_lat":"40.0081107", "baidu_lng":"116.3795949", "tel":"010-58236022", "address":"北京朝阳区大屯路甲166号欧陆经典风格派(近欧陆经典)", "link":"http://bj.meituan.com/deal/1874810.html"},
{"pk":"134", "title":"团购:北京卓凡台球俱乐部(建国门/北京站),17.9元,畅打三小时", "description":"17.9元,畅打三小时", "time":"2014.1.17 至 2014.7.16", "baidu_lat":"39.9050354", "baidu_lng":"116.4453862", "tel":"010-51236791", "address":"北京东城区白桥大街2号(如家酒店北门地下二层卓凡台球俱乐部)", "link":"http://bj.meituan.com/deal/9539588.html"},
{"pk":"135", "title":"团购:北京亚星台球俱乐部(望京),14.9元,畅打两小时", "description":"14.9元,畅打两小时", "time":"2013.10.1 至 2014.2.28", "baidu_lat":"40.0078049", "baidu_lng":"116.4691712", "tel":"010-84723343", "address":"北京朝阳区南湖中园130号B1楼", "link":"http://bj.meituan.com/deal/3862360.html"},
{"pk":"136", "title":"团购:北京夜时尚台球俱乐部万柳桥首经贸店(夏家胡同/纪家庙),29.8元,畅打三小时", "description":"29.8元,畅打三小时", "time":"2013.6.25 至 2014.3.24", "baidu_lat":"39.8542831", "baidu_lng":"116.3265186", "tel":"010-83615939", "address":"北京丰台区丰台东路育芳园19号新时特购物广场B1(距地铁10号线首经贸站约450米)", "link":"http://bj.meituan.com/deal/6041788.html"},
{"pk":"51", "title":"团购:北京忘忧地带台球俱乐部(紫竹桥),29.9元,畅打三小时", "description":"29.9元,畅打三小时", "time":"2013.10.25 至 2014.10.24", "baidu_lat":"39.9492522", "baidu_lng":"116.3184638", "tel":"010-68726737", "address":"北京海淀区紫竹桥东北角广源大厦东侧口内B1层", "link":"http://bj.meituan.com/deal/3800478.html"},
{"pk":"137", "title":"团购:北京98台球俱乐部(回龙观),12元,畅打一小时", "description":"12元,畅打一小时", "time":"2014.1.24 至 2014.3.31", "baidu_lat":"40.0881677", "baidu_lng":"116.3647790", "tel":"010-80752885", "address":"北京昌平区回龙观和谐家园二区西门", "link":"http://bj.meituan.com/deal/5129093.html"},
{"pk":"138", "title":"团购:北京益嘉盈点台球俱乐部(北下关),9.9元,畅打一小时", "description":"9.9元,畅打一小时", "time":"2013.8.22 至 2014.5.31", "baidu_lat":"39.9567882", "baidu_lng":"116.3613402", "tel":"010-62247477", "address":"北京海淀区西直门北大街甲43号金运大厦B座B1楼(中信银行金运大厦支行地下)", "link":"http://bj.meituan.com/deal/1356165.html"},
{"pk":"139", "title":"团购:北京翔天畅海台球俱乐部(牡丹园/北太平庄),19.9元,畅打两小时", "description":"19.9元,畅打两小时", "time":"2013.8.1 至 2014.8.1", "baidu_lat":"39.9733928", "baidu_lng":"116.3743438", "tel":"010-62050784", "address":"北京海淀区北三环中路32号二层(北太平桥西路南,超市发西侧2楼)", "link":"http://bj.meituan.com/deal/4786373.html"},
{"pk":"140", "title":"团购:北京后海银锭台球俱乐部(后海/什刹海),19.9元,畅打两小时", "description":"19.9元,畅打两小时", "time":"2013.10.14 至 2014.4.13", "baidu_lat":"39.9448192", "baidu_lng":"116.4015269", "tel":"010-64020227", "address":"北京西城区地安门外大街31号(后海天堂慢摇吧对面)", "link":"http://bj.meituan.com/deal/4281059.html"},
{"pk":"141", "title":"团购:北京和平8号台球俱乐部(安贞),35元,畅打四小时", "description":"35元,畅打四小时", "time":"2013.9.12 至 2014.6.9", "baidu_lat":"39.9680593", "baidu_lng":"116.4376271", "tel":"010-52182575", "address":"北京朝阳区和平街青年沟东路8号B1楼", "link":"http://bj.meituan.com/deal/2098095.html"},
{"pk":"142", "title":"团购:北京当代之光台球俱乐部(亚运村),18.8元,畅打两小时", "description":"18.8元,畅打两小时", "time":"2013.3.26 至 2014.7.23", "baidu_lat":"40.0053313", "baidu_lng":"116.4197569", "tel":"010-64969588", "address":"北京朝阳区慧中北路安慧北里逸园28号楼", "link":"http://bj.meituan.com/deal/9676931.html"},
{"pk":"143", "title":"团购:北京博登台球俱乐部(万柳),19元,畅打一小时", "description":"19元,畅打一小时", "time":"2013.11.23 至 2014.11.22", "baidu_lat":"39.9665855", "baidu_lng":"116.3089033", "tel":"010-58815361", "address":"北京海淀区长春桥路11号万柳亿城中心C座B1楼(近浏阳河大酒店)", "link":"http://bj.meituan.com/deal/6777317.html"},
{"pk":"144", "title":"团购:北京朋海园台球俱乐部(黄村),22元,畅打两小时", "description":"22元,畅打两小时", "time":"2014.1.15 至 2014.4.14", "baidu_lat":"39.7371972", "baidu_lng":"116.3416940", "tel":"010-69294327", "address":"北京大兴区西大街大兴医院9号楼地下一层", "link":"http://bj.meituan.com/deal/5386176.html"},
{"pk":"145", "title":"团购:北京520台球俱乐部(劲松),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2014.1.18 至 2015.1.17", "baidu_lat":"39.8858748", "baidu_lng":"116.4741308", "tel":"010-87379688", "address":"北京朝阳区武圣东里农光里市场斜对面206号楼(木屋烧烤楼下)地下一层", "link":"http://bj.meituan.com/deal/9895972.html"},
{"pk":"146", "title":"团购:北京星期8台球俱乐部(回龙观),12元,畅打一小时", "description":"12元,畅打一小时", "time":"2013.11.26 至 2014.2.25", "baidu_lat":"40.0968995", "baidu_lng":"116.3120102", "tel":"010-61779510", "address":"北京昌平区回龙观北农酒店西侧华北电力大学北门", "link":"http://bj.meituan.com/deal/6654307.html"},
{"pk":"148", "title":"团购:北京黑桃8撞球馆上坡家园店(3店通用),18元,畅打一小时", "description":"18元,畅打一小时", "time":"2013.10.31 至 2014.10.28", "baidu_lat":"40.0899531", "baidu_lng":"116.3702907", "tel":"18911796160", "address":"北京昌平区济远街与龙锦3街交叉路口东行20米路南B1层", "link":"http://bj.meituan.com/deal/8637559.html"},
{"pk":"147", "title":"团购:北京黑桃8撞球馆旺龙花园店(3店通用),18元,畅打一小时", "description":"18元,畅打一小时", "time":"2013.10.31 至 2014.10.28", "baidu_lat":"40.0869599", "baidu_lng":"116.3729226", "tel":"18911706160", "address":"北京昌平区东小口镇霍营旺龙花园底商11号", "link":"http://bj.meituan.com/deal/8637559.html"},
{"pk":"149", "title":"团购:北京黑桃8撞球馆紫金新干线小区店(3店通用),18元,畅打一小时", "description":"18元,畅打一小时", "time":"2013.10.31 至 2014.10.28", "baidu_lat":"40.0856005", "baidu_lng":"116.3859510", "tel":"18911706160", "address":"北京昌平区紫金新干线小区底商", "link":"http://bj.meituan.com/deal/8637559.html"},
{"pk":"150", "title":"团购:北京璟点台球俱乐部昌平鼓楼西街店(昌平镇),22元,畅打三小时", "description":"22元,畅打三小时", "time":"2013.10.24 至 2014.10.23", "baidu_lat":"40.2297910", "baidu_lng":"116.2369225", "tel":"010-80101023", "address":"北京昌平区鼓楼西街12号地下(工商银行对面)", "link":"http://bj.meituan.com/deal/5739483.html"},
{"pk":"151", "title":"团购:北京金福德台球俱乐部(双井),20元,畅打两小时", "description":"20元,畅打两小时", "time":"2014年01月08日至2014年07月31日", "baidu_lat":"39.8999641", "baidu_lng":"116.4526116", "tel":"010-67735898", "address":"北京朝阳区忠实里南街甲6乙6号楼负一层", "link":"http://bj.nuomi.com/deal/5ecuu4m4.html"},
{"pk":"152", "title":"团购:北京夜时尚台球俱乐部通州北苑店(通州北苑),9.9元,畅打两小时", "description":"9.9元,畅打两小时", "time":"2013年11月12日至2014年03月16日", "baidu_lat":"39.9048952", "baidu_lng":"116.6457678", "tel":"010-52898803", "address":"北京通州区北苑南路鑫苑小区B1层", "link":"http://bj.nuomi.com/deal/iww3xjpc.html"},
{"pk":"153", "title":"团购:北京速D台球俱乐部(传媒大学),19.8元,畅打两小时", "description":"19.8元,畅打两小时", "time":"2013年12月25日至2014年03月31日", "baidu_lat":"39.9293697", "baidu_lng":"116.5566050", "tel":"010-65775398", "address":"北京朝阳区朝阳北路白家楼桥东58号楼B1层", "link":"http://bj.nuomi.com/deal/yxxjbcvo.html"},
{"pk":"154", "title":"团购:北京星期5台球俱乐部(洋桥),22元,畅打两小时", "description":"22元,畅打两小时", "time":"2014年01月29日至2014年06月30日", "baidu_lat":"39.8561241", "baidu_lng":"116.3947374", "tel":"010-51215593", "address":"北京丰台区马家堡东口洋桥大厦B1层", "link":"http://bj.nuomi.com/deal/et5kwleo.html"},
{"pk":"155", "title":"团购:北京金色年华台球俱乐部(劲松),18元,畅打两小时", "description":"18元,畅打两小时", "time":"2013年06月09日-2014年06月08日", "baidu_lat":"39.8951752", "baidu_lng":"116.4659855", "tel":"010-67751145", "address":"北京朝阳区劲松垂杨柳东里38号", "link":"http://bj.nuomi.com/deal/by8s1uy4.html"},
{"pk":"27", "title":"团购:北京凯乐台球俱乐部(苹果园),29.9元,畅打三小时", "description":"29.9元,畅打三小时", "time":"2013年09月03日-2014年04月01日", "baidu_lat":"39.9350493", "baidu_lng":"116.2191068", "tel":"010-88705828 ", "address":"石景山西黄新村雍景四季东门(北方工业大学北门) ", "link":"http://bj.nuomi.com/deal/krhcnvzo.html"},
{"pk":"79", "title":"团购:北京球动力大郊亭店(北京欢乐谷),38.8元,畅打三小时", "description":"38.8元,畅打三小时", "time":"2013年12月14日至2014年03月02日", "baidu_lat":"39.8986729", "baidu_lng":"116.5003570", "tel":"010-85788252", "address":"北京市朝阳区东四环大郊亭桥东200米(7天连锁酒店一层)", "link":"http://bj.nuomi.com/deal/hvkwctio.html"},
{"pk":"46", "title":"团购:北京黑湖台球俱乐部顺义站前街店(顺义),18元,畅打一小时", "description":"18元,畅打一小时", "time":"2013年11月21日至2014年02月28日", "baidu_lat":"40.1311662", "baidu_lng":"116.6553110", "tel":"010-69425177", "address":"北京市顺义区站前街2号", "link":"http://bj.nuomi.com/deal/rrcoauih.html"},
{"pk":"156", "title":"团购:北京龙辉台球俱乐部(黄村),21.8元,畅打两小时", "description":"21.8元,畅打两小时", "time":"2014年01月10日至2014年06月16日", "baidu_lat":"39.7635692", "baidu_lng":"116.3433008", "tel":"13611332193", "address":"北京大兴区康庄路28号,水晶广场写字楼6层", "link":"http://bj.nuomi.com/deal/br8kd0kn.html"},
{"pk":"157", "title":"团购:北京蓝旗星台球俱乐部(北京大学),68元,畅打九小时", "description":"68元,畅打九小时", "time":"2013年12月30日至2014年05月02日", "baidu_lat":"39.9985814", "baidu_lng":"116.3294837", "tel":"010-62769808", "address":"北京海淀区成府路125号蓝旗营5号楼", "link":"http://bj.nuomi.com/deal/eyiqgptj.html"},
{"pk":"158", "title":"团购:北京天成台球俱乐部(回龙观),19元,畅打两小时", "description":"19元,畅打两小时", "time":"2014年01月27日至2014年04月25日", "baidu_lat":"40.0932582", "baidu_lng":"116.3726076", "tel":"010-57240538", "address":"北京昌平区回龙观龙锦苑东一区99连锁旅馆地下一层", "link":"http://bj.nuomi.com/deal/jshsj6f0.html"},
{"pk":"159", "title":"团购:北京金盛世纪台球俱乐部(房山),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2014年01月16日至2014年04月21日", "baidu_lat":"39.7744745", "baidu_lng":"116.1728625", "tel":"010-80327799", "address":"北京房山区加州水郡商业广场D座地下1层", "link":"http://bj.nuomi.com/deal/gclacp3t.html"},
{"pk":"160", "title":"团购:北京撞8台球俱乐部(顺义),15元,畅打一小时", "description":"15元,畅打一小时", "time":"2013年12月23日至2014年05月07日", "baidu_lat":"40.1278487", "baidu_lng":"116.6526506", "tel":"13716685868", "address":"北京顺义区怡馨家园29号楼", "link":"http://bj.nuomi.com/deal/ztpeahfa.html"},
{"pk":"161", "title":"团购:北京七度台球俱乐部(昌平镇),12元,畅打一小时", "description":"12元,畅打一小时", "time":"2014年01月20日至2014年04月23日", "baidu_lat":"40.2274343", "baidu_lng":"116.2630334", "tel":"13901295581", "address":"北京昌平区府学路7号福地家园", "link":"http://bj.nuomi.com/deal/tczpxqkc.html"},
{"pk":"162", "title":"团购:北京628台球俱乐部(什刹海),28元,畅打两小时", "description":"28元,畅打两小时", "time":"2013年12月27日至2014年03月27日", "baidu_lat":"39.9392861", "baidu_lng":"116.3876966", "tel":"010-83288628", "address":"北京西城区地安门西大街143号 北大医院正对面", "link":"http://bj.nuomi.com/deal/5aicdi2s.html"},
{"pk":"163", "title":"团购:北京国兰棋牌台球俱乐部(广渠门),18元,畅打一小时", "description":"18元,畅打一小时", "time":"2013年08月19日至2014年02月22日", "baidu_lat":"39.9027377", "baidu_lng":"116.4376252", "tel":"010-67158216", "address":"北京东城区东花市大街南小市口6号", "link":"http://bj.nuomi.com/deal/bs4r1yff.html"},
{"pk":"164", "title":"团购:北京西上园台球俱乐部(新华大街),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2014年01月17日至2014年04月18日", "baidu_lat":"39.9097275", "baidu_lng":"116.6832917", "tel":"010-80570304", "address":"北京通州区西上园小区西门", "link":"http://bj.nuomi.com/deal/ob5gwwxk.html"},
{"pk":"165", "title":"团购:北京雅君台球俱乐部朝外店(朝外大街),36元,畅打两小时", "description":"36元,畅打两小时", "time":"2013年11月10日至2014年03月08日", "baidu_lat":"39.9284716", "baidu_lng":"116.4486272", "tel":"010-58790918", "address":"北京朝阳区朝外大街乙12号", "link":"http://bj.nuomi.com/deal/ugz52rgn.html"},
{"pk":"166", "title":"团购:北京君辉台球俱乐部(管庄),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年11月19日至2014年02月17日 ", "baidu_lat":"39.9177441", "baidu_lng":"116.6017418", "tel":"010-53665198", "address":"北京朝阳区杨闸环岛西plus365比格披萨地下一层", "link":"http://bj.nuomi.com/deal/ffdznqoj.html"},
{"pk":"167", "title":"团购:北京梦幻台球俱乐部(管庄),45元,畅打两小时", "description":"45元,畅打两小时", "time":"2013年12月23日至2014年05月31日", "baidu_lat":"39.9200769", "baidu_lng":"116.5871127", "tel":"13910813970", "address":"北京朝阳区管庄西里11号楼", "link":"http://bj.nuomi.com/deal/qiytfbwk.html"},
{"pk":"168", "title":"团购:北京K8台球俱乐部(沙河),10元,畅打一小时", "description":"10元,畅打一小时", "time":"2013年11月30日至2014年05月30日", "baidu_lat":"40.1570599", "baidu_lng":"116.2692001", "tel":"13521844695", "address":"北京昌平区沙河地铁站亿旺商场旁", "link":"http://bj.nuomi.com/deal/dewlhx1n.html"},
{"pk":"169", "title":"团购:北京奥辉启航台球俱乐部(传媒大学),19元,畅打两小时", "description":"19元,畅打两小时", "time":"2013年10月08日至2014年03月02日", "baidu_lat":"39.9220917", "baidu_lng":"116.5529168", "tel":"13810687710", "address":"北京朝阳区朝阳路传媒大学北门对面", "link":"http://bj.nuomi.com/deal/39cki6qe.html"},
{"pk":"170", "title":"团购:北京东绅台球俱乐部(酒仙桥),38元,畅打三小时", "description":"38元,畅打三小时", "time":"2013年10月20日-2014年02月22日", "baidu_lat":"39.9687972", "baidu_lng":"116.4970572", "tel":"010-64310217", "address":"北京朝阳区酒仙桥十一街区1楼", "link":"http://bj.nuomi.com/deal/dyvthllh.html"},
{"pk":"107", "title":"团购:北京云川台球俱乐部志新桥店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9903301", "baidu_lng":"116.3757489", "tel":"010-62018887 ", "address":"北京海淀区北四环志新桥向南200米路西 ", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"171", "title":"团购:北京云川台球俱乐部方庄店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.8721039", "baidu_lng":"116.4421229", "tel":"010-67622828", "address":"北京市丰台区蒲方路1号", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"113", "title":"团购:北京云川台球俱乐部三里屯店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9335159", "baidu_lng":"116.4625773", "tel":"010-68085558", "address":"北京市朝阳区三里屯南路16号泰悦豪庭B1楼", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"105", "title":"团购:北京云川台球俱乐部永定门店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.8709788", "baidu_lng":"116.4130107", "tel":"010-87893336 ", "address":"北京永定门外安乐林路天天家园小区内东侧 ", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"109", "title":"团购:北京云川台球俱乐部右安门店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.8658325", "baidu_lng":"116.3708853", "tel":"010-83527770 ", "address":"北京丰台区右安门外大街99号", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"172", "title":"团购:北京云川台球俱乐部西四店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9406734", "baidu_lng":"116.3858051", "tel":"010-66166490", "address":"北京市西城区西四北大街乙158号地下一层", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"173", "title":"团购:北京云川台球俱乐部保福寺店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9861512", "baidu_lng":"116.3339893", "tel":"010-62566598", "address":"北京市海淀区中关村南三街文化体育中心2层", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"174", "title":"团购:北京云川台球俱乐部马甸店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9881982", "baidu_lng":"116.3871993", "tel":"010-62379990", "address":"北京市朝阳区华严北里甲一号", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"175", "title":"团购:北京云川台球俱乐部五道口店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9991614", "baidu_lng":"116.3465476", "tel":"010-82386906", "address":"北京市海淀区成府路23号五道口宾馆", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"176", "title":"团购:北京云川台球俱乐部定慧桥店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9352214", "baidu_lng":"116.2687430", "tel":"010-58970485", "address":"北京市海淀区永定路乙1号院14楼2门地下1层(乐府江南小区门口)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"177", "title":"团购:北京云川台球俱乐部五棵松店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9068005", "baidu_lng":"116.2879723", "tel":"010-52126635", "address":"北京市海淀区今日家园8号地下一层", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"178", "title":"团购:北京云川台球俱乐部朝阳路店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9230932", "baidu_lng":"116.5330900", "tel":"010-65104646", "address":"北京市朝阳区朝阳路世纪天乐潮青汇商场5层", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"179", "title":"团购:北京云川台球俱乐部六里桥店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.8880627", "baidu_lng":"116.3228218", "tel":"010-63333884", "address":"北京市丰台区太平桥西路华源1街4号楼(青年餐厅地下1层)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"180", "title":"团购:北京云川台球俱乐部酒仙桥店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9810040", "baidu_lng":"116.4969695", "tel":"010-64369489", "address":"北京市朝阳区酒仙桥路甲12号电子城科技大厦地下2层(临近比格餐厅)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"181", "title":"团购:北京云川台球俱乐部远洋山水店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9095537", "baidu_lng":"116.2467660", "tel":"010-88697183", "address":"北京市石景山区鲁谷东大街(远洋山水小区西门)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"182", "title":"团购:北京云川台球俱乐部岳各庄店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.8669009", "baidu_lng":"116.2657588", "tel":"010-63871652", "address":"北京市丰台区五里店卢沟桥路和光里小区2号楼地下1层(临近岳各庄检测场)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"183", "title":"团购:北京云川台球俱乐部将台路店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9721626", "baidu_lng":"116.5093368", "tel":"010-84598422", "address":"北京市朝阳区酒仙桥驼房营西里甲5号2楼云顶时尚台球俱乐部(临近乐食尚餐厅)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"184", "title":"团购:北京云川台球俱乐部良乡店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.7415050", "baidu_lng":"116.1481470", "tel":"010-69365789", "address":"北京市房山区良乡拱辰大街49号(科豪大厦4层)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"185", "title":"团购:北京云川台球俱乐部学知桥店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9796657", "baidu_lng":"116.3547898", "tel":"010-82050295", "address":"北京市海淀区知春路太月园3号楼地下一层", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"186", "title":"团购:北京云川台球俱乐部草桥店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.8515247", "baidu_lng":"116.3702949", "tel":"010-51471999", "address":"北京市丰台区北甲地路10号院三层", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"187", "title":"团购:北京云川台球俱乐部大屯店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"", "baidu_lng":"40.0093974", "tel":"116.4153867", "address":"北京市朝阳区亚运村安立路66号安立花园1号楼", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"188", "title":"团购:北京云川台球俱乐部新街口店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9444717", "baidu_lng":"116.3737303", "tel":"010-66537177", "address":"北京市西城区西直门内赵登禹路冠英园西区20楼B1楼(近地铁4号线新街口站D口)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"189", "title":"团购:北京云川台球俱乐部交大店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9557359", "baidu_lng":"116.3543208", "tel":"010-82164788", "address":"北京市海淀区交大东路25号", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"190", "title":"团购:北京云川台球俱乐部白纸坊店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.8828696", "baidu_lng":"116.3533838", "tel":"010-63388918", "address":"北京市丰台区鸭子桥路信德园小区5-7(临近农业银行)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"191", "title":"团购:北京云川台球俱乐部花园桥店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9381604", "baidu_lng":"116.2958428", "tel":"010-88138880", "address":"北京市海淀区八里庄北里23号楼2层(临近碧水云天洗浴中心)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"192", "title":"团购:北京云川台球俱乐部丰体店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.8580647", "baidu_lng":"116.2928752", "tel":"010-63865550", "address":"北京市丰台区文体路58号(丰体工人俱乐部地下2层)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"193", "title":"团购:北京云川台球俱乐部鲁谷店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9078076", "baidu_lng":"116.2467837", "tel":"010-88697770", "address":"北京市石景山区雕塑园南街29号楼远洋山水小区东门(远洋山水售楼处地下1层)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"194", "title":"团购:北京云川台球俱乐部成寿寺店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.8596740", "baidu_lng":"116.4446929", "tel":"010-51228316", "address":"北京市丰台区方庄南路9号院方庄桥南300米(谱田大厦B1层)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"195", "title":"团购:北京云川台球俱乐部石佛营店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9351938", "baidu_lng":"116.5114810", "tel":"010-85819589", "address":"北京市朝阳区石佛营炫特区西门商业楼3层(临近卜蜂莲花超市)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"196", "title":"团购:北京云川台球俱乐部正阳桥店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.8559464", "baidu_lng":"116.2941854", "tel":"010-63833680转0", "address":"北京市丰台区正阳大街正阳北里18号楼底商1层(临近北京国阳医院)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"197", "title":"团购:北京云川台球俱乐部四惠店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9176263", "baidu_lng":"116.5012654", "tel":"010-85865147", "address":"北京市朝阳区八里庄西里75号楼远洋天地小区南门(临近四惠地铁站D口)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"198", "title":"团购:北京云川台球俱乐部广外店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.8923402", "baidu_lng":"116.3462058", "tel":"010-63334440", "address":"北京市西城区宣武门广安门外红居街5号楼", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"199", "title":"团购:北京云川台球俱乐部丰台东路店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.8543703", "baidu_lng":"116.3225823", "tel":"010-83619585", "address":"北京市丰台区丰台东路樊家村甲3号", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"200", "title":"团购:北京云川台球俱乐部马家堡店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.8437473", "baidu_lng":"116.3711004", "tel":"010-67570773", "address":"北京市丰台区马家堡嘉园路星河苑小区西门(安太妇产医院对面)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"201", "title":"团购:北京云川台球俱乐部九州店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9568581", "baidu_lng":"116.2797742", "tel":"010-88498575", "address":"北京市海淀区西四环北路71号郦城A区3号楼地下一层", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"202", "title":"团购:北京云川台球俱乐部望京店(36店通用),30元,畅打两小时", "description":"30元,畅打两小时", "time":"2013年12月07日至2014年03月31日", "baidu_lat":"39.9925667", "baidu_lng":"116.4852965", "tel":"010-64773066", "address":"北京市朝阳区广顺大街19号院会所(临近新世界百货)", "link":"http://bj.nuomi.com/deal/cpiv9rzm.html"},
{"pk":"203", "title":"团购:北京奕承龙台球俱乐部,28元,畅打两小时", "description":"28元,畅打两小时", "time":"2014-01-01至2014-04-01", "baidu_lat":"39.8570611", "baidu_lng":"116.3160968", "tel":"010-84001818", "address":"北京丰台区丰桥路8号院甲1号楼1-09号三环新城底商", "link":"http://t.dianping.com/deal/2102963"},
]
couponobj = []
dateformat = ['%Y年%m月%d日', '%Y-%m-%d', '%Y.%m.%d']
for coupon in coupons:
period = coupon['time']
timearray = re.split('至',period)
if len(timearray) != 2:
timearray = re.split('-',period)
obj = Coupon(poolroom_id=coupon['pk'], title=coupon['title'], description=coupon['description'], discount=20, startdate=getDate(dateformat, timearray[0].strip()),
enddate=getDate(dateformat, timearray[1].strip()), url=coupon['link'], type=1, status=1)
couponobj.append(obj)
Coupon.objects.bulk_create(couponobj)
self.stdout.write('Successfully import "%s" coupon records.\n' % len(coupons))
def getDate(dateformat, datestr):
for formatstr in dateformat:
try:
return datetime.strptime(datestr, formatstr)
except Exception, e:
print e
pass
raise Exception('invalid date string: "%s"' %(datestr))
| 225.601307
| 324
| 0.653098
| 4,387
| 34,517
| 5.08662
| 0.242079
| 0.035447
| 0.05521
| 0.04302
| 0.341205
| 0.339682
| 0.23715
| 0.222989
| 0.212637
| 0.1365
| 0
| 0.213788
| 0.092737
| 34,517
| 153
| 325
| 225.601307
| 0.498787
| 0.001072
| 0
| 0
| 0
| 0
| 0.703723
| 0.192136
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.007042
| 0.042254
| null | null | 0.007042
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c35919806f42d2e0ef34a35a61767a0c490bc25e
| 245
|
py
|
Python
|
saleor/__init__.py
|
taedori81/stylishclothing
|
5ef8a978a9f7636ed0f1c840d4926e76d46c4c1a
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/__init__.py
|
taedori81/stylishclothing
|
5ef8a978a9f7636ed0f1c840d4926e76d46c4c1a
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/__init__.py
|
taedori81/stylishclothing
|
5ef8a978a9f7636ed0f1c840d4926e76d46c4c1a
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
def manage():
import os
import sys
from django.core.management import execute_from_command_line
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "stylish.settings")
execute_from_command_line(sys.argv)
| 22.272727
| 71
| 0.75102
| 33
| 245
| 5.333333
| 0.666667
| 0.125
| 0.204545
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155102
| 245
| 10
| 72
| 24.5
| 0.850242
| 0.081633
| 0
| 0
| 0
| 0
| 0.169643
| 0.098214
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.5
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5edb26a90b8097fbd404fa7aaf0f1d09490bc35e
| 365
|
py
|
Python
|
colleague.py
|
AmineAsli/tkinter-notepad
|
c2f254bb9d9c9bb28c69753949e5127a6124af85
|
[
"BSD-3-Clause"
] | null | null | null |
colleague.py
|
AmineAsli/tkinter-notepad
|
c2f254bb9d9c9bb28c69753949e5127a6124af85
|
[
"BSD-3-Clause"
] | null | null | null |
colleague.py
|
AmineAsli/tkinter-notepad
|
c2f254bb9d9c9bb28c69753949e5127a6124af85
|
[
"BSD-3-Clause"
] | null | null | null |
class Colleague:
"""
provides the basic functionality of storing a mediator's
instance inside component objects.
"""
def __init__(self, mediator=None):
self._mediator = mediator
@property
def mediator(self):
return self._mediator
@mediator.setter
def mediator(self, mediator):
self._mediator = mediator
| 22.8125
| 60
| 0.657534
| 39
| 365
| 5.974359
| 0.564103
| 0.257511
| 0.257511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.263014
| 365
| 15
| 61
| 24.333333
| 0.866171
| 0.249315
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.111111
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
5eeb8445dcde3208ce36b3be568e72662d245f17
| 13
|
py
|
Python
|
Uche Clare/Phase 1/Python Basic 1/Day 11/Task 93.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6
|
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
Uche Clare/Phase 1/Python Basic 1/Day 11/Task 93.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 8
|
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
Uche Clare/Phase 1/Python Basic 1/Day 11/Task 93.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 39
|
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
print(id(70))
| 13
| 13
| 0.692308
| 3
| 13
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 0
| 13
| 1
| 13
| 13
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
5eee5cd9768e97d5de194eb5521697ff2a558bad
| 50
|
py
|
Python
|
pycrybittrex/__init__.py
|
namiazad/pycrybittrex
|
fce4dbfde112fd31b5ede0ff1432a4c3afeb3668
|
[
"Apache-2.0"
] | null | null | null |
pycrybittrex/__init__.py
|
namiazad/pycrybittrex
|
fce4dbfde112fd31b5ede0ff1432a4c3afeb3668
|
[
"Apache-2.0"
] | null | null | null |
pycrybittrex/__init__.py
|
namiazad/pycrybittrex
|
fce4dbfde112fd31b5ede0ff1432a4c3afeb3668
|
[
"Apache-2.0"
] | null | null | null |
from .bittrex import *
from .definitions import *
| 16.666667
| 26
| 0.76
| 6
| 50
| 6.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 50
| 2
| 27
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
48e6c3a411f7bdf8756b33bce7aaf6c1c2d5cf8e
| 44
|
py
|
Python
|
MLPApproximatorTest/__init__.py
|
HalfInner/MLPApproximator
|
9bc08e14d8f50f323a0453f02d2230c4e4195bee
|
[
"MIT"
] | null | null | null |
MLPApproximatorTest/__init__.py
|
HalfInner/MLPApproximator
|
9bc08e14d8f50f323a0453f02d2230c4e4195bee
|
[
"MIT"
] | null | null | null |
MLPApproximatorTest/__init__.py
|
HalfInner/MLPApproximator
|
9bc08e14d8f50f323a0453f02d2230c4e4195bee
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020
# Kajetan Brzuszczak
| 14.666667
| 21
| 0.704545
| 5
| 44
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 0.204545
| 44
| 2
| 22
| 22
| 0.771429
| 0.863636
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
48fd65e842615852a1e210389bfe1194479bfe59
| 104
|
py
|
Python
|
enthought/persistence/project_loader.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/persistence/project_loader.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/persistence/project_loader.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from apptools.persistence.project_loader import *
| 26
| 49
| 0.855769
| 13
| 104
| 6.384615
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105769
| 104
| 3
| 50
| 34.666667
| 0.892473
| 0.115385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d28de0bc832ef6788586f3b167e2af785223ed74
| 427
|
py
|
Python
|
{{cookiecutter.project_slug}}/backend/app/app/crud/__init__.py
|
PythonWongXb/full_stack_sqlite3
|
71f2ca21354152be2857536c04f362c6d843a9a6
|
[
"MIT"
] | 1
|
2021-03-17T14:12:28.000Z
|
2021-03-17T14:12:28.000Z
|
{{cookiecutter.project_slug}}/backend/app/app/crud/__init__.py
|
PythonWongXb/full_stack_sqlite3
|
71f2ca21354152be2857536c04f362c6d843a9a6
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/backend/app/app/crud/__init__.py
|
PythonWongXb/full_stack_sqlite3
|
71f2ca21354152be2857536c04f362c6d843a9a6
|
[
"MIT"
] | 1
|
2021-02-11T23:39:09.000Z
|
2021-02-11T23:39:09.000Z
|
import os
os.sys.path.append(r"C:\Users\Administrator\Desktop\full-stack-fastapi-postgresql\{{cookiecutter.project_slug}}\backend\app")
from .crud_item import item
from .crud_user import user
# For a new basic set of CRUD operations you could just do
# from .base import CRUDBase
# from app.models.item import Item
# from app.schemas.item import ItemCreate, ItemUpdate
# item = CRUDBase[Item, ItemCreate, ItemUpdate](Item)
| 30.5
| 125
| 0.782201
| 65
| 427
| 5.092308
| 0.615385
| 0.090634
| 0.084592
| 0.108761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117096
| 427
| 13
| 126
| 32.846154
| 0.877984
| 0.515222
| 0
| 0
| 0
| 0.25
| 0.507463
| 0.507463
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d2948af5ff4207b0156eb6f786c345303fb860c2
| 225
|
py
|
Python
|
main.py
|
CatalinCaldararu/Selenium-Wordpress-Article-Upload
|
a600a818febfdcc7ccb6595032214ad1ad2b9d3c
|
[
"MIT"
] | null | null | null |
main.py
|
CatalinCaldararu/Selenium-Wordpress-Article-Upload
|
a600a818febfdcc7ccb6595032214ad1ad2b9d3c
|
[
"MIT"
] | null | null | null |
main.py
|
CatalinCaldararu/Selenium-Wordpress-Article-Upload
|
a600a818febfdcc7ccb6595032214ad1ad2b9d3c
|
[
"MIT"
] | null | null | null |
from Utils.recursiveDeleteArticles import recursiveDeleteArticles
fromt Utils.countFilesInSubDirs import txtFileCountInSubdirectories
txtFileCountInSubdirectories()
recursiveDeleteArticles(600)
txtFileCountInSubdirectories()
| 37.5
| 67
| 0.915556
| 14
| 225
| 14.714286
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014019
| 0.048889
| 225
| 6
| 68
| 37.5
| 0.948598
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.4
| null | null | 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d2c53bc053bb85404ab6ed46f0f0f53180f27460
| 120
|
py
|
Python
|
game/rl/dqn/utils/layer_params/linear.py
|
DenisioMytnysiano/pacman
|
34ceddc8492b6dbb8b56306ec0702dc01bf3475e
|
[
"MIT"
] | null | null | null |
game/rl/dqn/utils/layer_params/linear.py
|
DenisioMytnysiano/pacman
|
34ceddc8492b6dbb8b56306ec0702dc01bf3475e
|
[
"MIT"
] | null | null | null |
game/rl/dqn/utils/layer_params/linear.py
|
DenisioMytnysiano/pacman
|
34ceddc8492b6dbb8b56306ec0702dc01bf3475e
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
@dataclass(eq=False)
class LinearParams:
in_features: int
out_features: int
| 15
| 33
| 0.766667
| 15
| 120
| 6
| 0.8
| 0.244444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 120
| 7
| 34
| 17.142857
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
d2db3fa7c93ba7a8c3466db506162e31965cdbd4
| 360
|
py
|
Python
|
scicopia_tools/exceptions.py
|
pikatech/Scicopia-tools
|
0e19d694adeae862e3db92779d204e4944cc47bc
|
[
"MIT"
] | null | null | null |
scicopia_tools/exceptions.py
|
pikatech/Scicopia-tools
|
0e19d694adeae862e3db92779d204e4944cc47bc
|
[
"MIT"
] | null | null | null |
scicopia_tools/exceptions.py
|
pikatech/Scicopia-tools
|
0e19d694adeae862e3db92779d204e4944cc47bc
|
[
"MIT"
] | 1
|
2021-06-18T16:00:35.000Z
|
2021-06-18T16:00:35.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 18 20:45:13 2021
@author: tech
"""
class ScicopiaException(Exception):
"""Base-class for all exceptions with Scicopia."""
class DBError(ScicopiaException):
"""Exceptions related to ArangoDB."""
class ConfigError(ScicopiaException):
"""Exceptions related to Configuration."""
| 22.5
| 54
| 0.7
| 42
| 360
| 6
| 0.785714
| 0.214286
| 0.269841
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045603
| 0.147222
| 360
| 16
| 55
| 22.5
| 0.775244
| 0.577778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
d2e72d32682a275bd87ba66e81c742a3e7ff6526
| 71
|
py
|
Python
|
journey11/src/main/simple/srcsinkwithtimestamp.py
|
parrisma/AI-intuition
|
3b081696b1d226815e029cbb536fac5e4d3de9a7
|
[
"MIT"
] | null | null | null |
journey11/src/main/simple/srcsinkwithtimestamp.py
|
parrisma/AI-intuition
|
3b081696b1d226815e029cbb536fac5e4d3de9a7
|
[
"MIT"
] | 4
|
2020-04-26T18:18:22.000Z
|
2020-05-16T14:47:32.000Z
|
journey11/src/main/simple/srcsinkwithtimestamp.py
|
parrisma/AI-intuition
|
3b081696b1d226815e029cbb536fac5e4d3de9a7
|
[
"MIT"
] | null | null | null |
import datetime
from journey11.src.interface.srcsink import SrcSink
| 11.833333
| 51
| 0.830986
| 9
| 71
| 6.555556
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0.126761
| 71
| 5
| 52
| 14.2
| 0.919355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
825003d99a049847a1b2076031d127f174ecad0e
| 694
|
py
|
Python
|
opentmi_client/api/__init__.py
|
OpenTMI/opentmi-pyclient
|
034c539d36fe13a2d6538ea421e4c01f00f5687d
|
[
"MIT"
] | null | null | null |
opentmi_client/api/__init__.py
|
OpenTMI/opentmi-pyclient
|
034c539d36fe13a2d6538ea421e4c01f00f5687d
|
[
"MIT"
] | 36
|
2018-06-18T10:03:58.000Z
|
2022-03-30T00:16:31.000Z
|
opentmi_client/api/__init__.py
|
OpenTMI/opentmi-pyclient
|
034c539d36fe13a2d6538ea421e4c01f00f5687d
|
[
"MIT"
] | 1
|
2019-04-17T08:49:24.000Z
|
2019-04-17T08:49:24.000Z
|
"""
Collect all public opentmi API's
"""
from opentmi_client.api.client import create, OpenTmiClient
from opentmi_client.api.build import Build, Vcs, Ci, Target, Hardware
from opentmi_client.api.result import Result
from opentmi_client.api.result import Job
from opentmi_client.api.result import Environment
from opentmi_client.api.result import Sut
from opentmi_client.api.result import Dut
from opentmi_client.api.result import Execution
from opentmi_client.api.result import Framework
from opentmi_client.api.result import File
from opentmi_client.api.result import Provider
from opentmi_client.api.event import Event
from opentmi_client.api.testcase import Testcase
Client = OpenTmiClient
| 36.526316
| 69
| 0.845821
| 104
| 694
| 5.519231
| 0.259615
| 0.249129
| 0.385017
| 0.452962
| 0.501742
| 0.501742
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097983
| 694
| 18
| 70
| 38.555556
| 0.916933
| 0.04611
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.928571
| 0
| 0.928571
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
82594aded8889b8ea0c3c69c1e390173191f6061
| 225
|
py
|
Python
|
frictionless/program/__init__.py
|
augusto-herrmann/frictionless-py
|
b4ff35f064141a2c04882edb592666ca6b066776
|
[
"MIT"
] | 247
|
2020-08-04T16:42:09.000Z
|
2022-03-30T11:54:54.000Z
|
frictionless/program/__init__.py
|
augusto-herrmann/frictionless-py
|
b4ff35f064141a2c04882edb592666ca6b066776
|
[
"MIT"
] | 444
|
2020-07-29T09:13:59.000Z
|
2022-03-31T14:54:57.000Z
|
frictionless/program/__init__.py
|
augusto-herrmann/frictionless-py
|
b4ff35f064141a2c04882edb592666ca6b066776
|
[
"MIT"
] | 60
|
2020-09-04T11:39:34.000Z
|
2022-03-23T18:59:51.000Z
|
from .api import program_api
from .describe import program_describe
from .extract import program_extract
from .main import program, program_main
from .transform import program_transform
from .validate import program_validate
| 32.142857
| 40
| 0.857778
| 31
| 225
| 6.032258
| 0.290323
| 0.417112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 225
| 6
| 41
| 37.5
| 0.935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
825ef735dfbea37dab959172b5783c6540d539b4
| 35,663
|
py
|
Python
|
test/old_tests/_test_put.py
|
syaiful6/aerospike-client-python
|
59fa0d36aa899a164282643fe49b27d12aaf323f
|
[
"Apache-2.0"
] | 105
|
2015-01-07T09:51:13.000Z
|
2022-03-24T04:23:54.000Z
|
test/old_tests/_test_put.py
|
syaiful6/aerospike-client-python
|
59fa0d36aa899a164282643fe49b27d12aaf323f
|
[
"Apache-2.0"
] | 180
|
2015-01-01T19:29:50.000Z
|
2022-03-19T14:14:06.000Z
|
test/old_tests/_test_put.py
|
syaiful6/aerospike-client-python
|
59fa0d36aa899a164282643fe49b27d12aaf323f
|
[
"Apache-2.0"
] | 94
|
2015-01-21T19:17:48.000Z
|
2022-01-31T07:17:47.000Z
|
# -*- coding: utf-8 -*-
import pytest
import sys
import marshal
from .test_base_class import TestBaseClass
from aerospike import exception as e
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
except:
print("Please install aerospike python client.")
sys.exit(1)
class TestPut(TestBaseClass):
def setup_class(cls):
"""
Setup class
"""
hostlist, user, password = TestBaseClass.get_hosts()
config = {"hosts": hostlist}
config_strict_types = {"hosts": hostlist, "strict_types": False}
if user is None and password is None:
TestPut.client = aerospike.client(config).connect()
TestPut.client_strict_types = aerospike.client(
config_strict_types).connect()
else:
TestPut.client = aerospike.client(config).connect(user, password)
TestPut.client_strict_types = aerospike.client(
config_strict_types).connect(user, password)
TestPut.skip_old_server = True
versioninfo = TestPut.client.info('version')
for keys in versioninfo:
for value in versioninfo[keys]:
if value is not None:
versionlist = value[
value.find("build") + 6:value.find("\n")].split(".")
if int(versionlist[0]) >= 3 and int(versionlist[1]) >= 6:
TestPut.skip_old_server = False
def teardown_class(cls):
TestPut.client.close()
TestPut.client_strict_types.close()
def setup_method(self, method):
"""
Setup method
"""
self.delete_keys = []
def teardown_method(self, method):
"""
Teardown method
"""
for key in self.delete_keys:
TestPut.client.remove(key)
def test_put_with_string_record(self):
"""
Invoke put() for a record with string data.
"""
key = ('test', 'demo', 1)
bins = {"name": "John"}
assert 0 == TestPut.client.put(key, bins)
(key, _, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
self.delete_keys.append(key)
def test_put_with_multiple_bins(self):
"""
Invoke put() with multiple bins and multiple types of data.
Covers list, map, bytearray, integer.
"""
key = ('test', 'demo', 1)
bins = {
'i': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8")],
's': {"key": "asd';q;'1';"},
'b': 1234,
'l': '!@#@#$QSDAsd;as'
}
assert 0 == TestPut.client.put(key, bins)
(key, _, bins) = TestPut.client.get(key)
assert {
'i': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8")],
's': {"key": "asd';q;'1';"},
'b': 1234,
'l': '!@#@#$QSDAsd;as'
} == bins
self.delete_keys.append(key)
def test_put_with_no_parameters(self):
"""
Invoke put() without any parameters.
"""
with pytest.raises(TypeError) as typeError:
TestPut.client.put()
assert "argument 'key' (pos 1)" in str(
typeError.value)
def test_put_without_record(self):
"""
Invoke put() without any record data.
"""
key = ('test', 'demo', 1)
with pytest.raises(TypeError) as typeError:
TestPut.client.put(key)
assert "Required argument 'bins' (pos 2) not found" in str(
typeError.value)
def test_put_with_none_key(self):
"""
Invoke put() with None as key.
"""
bins = {"name": "John"}
try:
TestPut.client.put(None, bins)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == 'key is invalid'
def test_put_with_none_namespace_in_key(self):
"""
Invoke put() with None namespace in key.
"""
key = (None, "demo", 1)
bins = {"name": "Steve"}
try:
TestPut.client.put(key, bins)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "namespace must be a string"
def test_put_and_get_with_none_set_in_key(self):
"""
Invoke put() with None set in key.
"""
key = ("test", None, 1)
bins = {"name": "John"}
assert 0 == TestPut.client.put(key, bins)
_, _, bins = TestPut.client.get(key)
assert {"name": "John"} == bins
self.delete_keys.append(key)
def test_put_with_none_primary_key_in_key(self):
"""
Invoke put() with None primary key in key.
"""
key = ("test", "demo", None)
bins = {"name": "John"}
try:
TestPut.client.put(key, bins)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "either key or digest is required"
def test_put_with_bytearray_primary_key(self):
"""
Invoke put() with bytearray primary key in key.
"""
key = ("test", "demo", bytearray("asd;as[d'as;d", "utf-8"))
bins = {"name": "John"}
TestPut.client.put(key, bins)
(key, _, bins) = TestPut.client.get(key)
assert bins == {"name": "John"}
self.delete_keys.append(key)
def test_put_with_string_type_record(self):
"""
Invoke put() with string typed record.
"""
key = ('test', 'demo', 15)
kvs = "Name : John"
try:
TestPut.client.put(key, kvs)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Record should be passed as bin-value pair"
def test_put_with_wrong_ns_and_set(self):
"""
Invoke put() with wrong ns and set.
"""
key = ('demo', 'test', 1)
bins = {'a': ['!@#!#$%#', bytearray('ASD@#$AR#$@#ERQ#', 'utf-8')]}
try:
TestPut.client.put(key, bins)
except e.NamespaceNotFound as exception:
assert exception.code == 20
assert exception.msg == 'AEROSPIKE_ERR_NAMESPACE_NOT_FOUND'
def test_put_with_nonexistent_namespace(self):
"""
Invoke put() with non-existent namespace.
"""
key = ('test1', 'demo', 1)
bins = {'i': 'asdadasd'}
try:
TestPut.client.put(key, bins)
except e.NamespaceNotFound as exception:
assert exception.code == 20
assert exception.msg == 'AEROSPIKE_ERR_NAMESPACE_NOT_FOUND'
def test_put_with_nonexistent_set(self):
"""
Invoke put() with non-existent set.
"""
key = ('test', 'unknown_set', 1)
bins = {'a': {'k': [bytearray("askluy3oijs", "utf-8")]}}
res = TestPut.client.put(key, bins)
assert res == 0
(key, _, bins) = TestPut.client.get(key)
assert bins == {'a': {'k': [bytearray(b'askluy3oijs')]}}
self.delete_keys.append(key)
def test_put_boolean_record(self):
"""
Invoke put() for boolean data record.
"""
key = ('test', 'demo', 1)
bins = {"is_present": False}
res = TestPut.client.put(key, bins)
assert res == 0
(key, _, bins) = TestPut.client.get(key)
assert bins == {"is_present": False}
self.delete_keys.append(key)
"""
def test_put_unicode_string(self):
#Invoke put() for unicode record.
key = ('test', 'demo', 1)
bins = { "unicode_string": u"\ud83d\ude04" }
res = TestPut.client.put( key, bins )
assert res == 0
(key , meta, bins) = TestPut.client.get(key)
assert bins['unicode_string'] == u"\ud83d\ude04"
self.delete_keys.append( key )
#self.client.remove(key)
def test_put_unicode_key(self):
# Invoke put() for unicode key.
key = ('test', 'demo', u"\ud83d\ude04")
rec = {
"unicode_string": u"\ud83d\ude04"
}
res = TestPut.client.put( key, rec )
assert res == 0
(key , meta, bins) = TestPut.client.get(key)
assert bins == rec
key = ('test', 'demo', u"\ud83d\ude04")
self.delete_keys.append( key )
"""
def test_put_unicode_string_in_map(self):
# Invoke put() for unicode record.
key = ('test', 'demo', 1)
rec = {'a': {u'aa': u'11'}, 'b': {u'bb': u'22'}}
res = TestPut.client.put(key, rec)
assert res == 0
(key, _, bins) = TestPut.client.get(key)
assert bins == rec
self.delete_keys.append(key)
def test_put_unicode_string_in_list(self):
# Invoke put() for unicode record.
key = ('test', 'demo', 1)
rec = {'a': [u'aa', u'bb', 1, u'bb', u'aa']}
res = TestPut.client.put(key, rec)
assert res == 0
(key, _, bins) = TestPut.client.get(key)
assert bins == rec
self.delete_keys.append(key)
def test_put_unicode_string_in_key(self):
# Invoke put() for unicode record.
key = ('test', 'demo', "bb")
rec = {'a': [u'aa', 2, u'aa', 4, u'cc', 3, 2, 1]}
res = TestPut.client.put(key, rec)
assert res == 0
(key, _, bins) = TestPut.client.get(key)
assert bins == rec
self.delete_keys.append(key)
def test_put_with_float_data(self):
# Invoke put() for float data record.
key = ('test', 'demo', 1)
rec = {"pi": 3.141}
res = TestPut.client.put(key, rec)
assert res == 0
_, _, bins = TestPut.client.get(key)
assert bins == {'pi': 3.141}
self.delete_keys.append(key)
def test_put_with_float_data_within_list(self):
# Invoke put() for float data record within list.
key = ('test', 'demo', 1)
rec = {"double_list": [3.141, 4.123, 6.285]}
res = TestPut.client.put(key, rec)
assert res == 0
_, _, bins = TestPut.client.get(key)
assert bins == {'double_list': [3.141, 4.123, 6.285]}
self.delete_keys.append(key)
def test_put_with_float_data_within_map(self):
# Invoke put() for float data record within map.
key = ('test', 'demo', 1)
rec = {"double_map": {"1": 3.141, "2": 4.123, "3": 6.285}}
res = TestPut.client.put(key, rec)
assert res == 0
_, _, bins = TestPut.client.get(key)
assert bins == {'double_map': {"1": 3.141, "2": 4.123, "3": 6.285}}
self.delete_keys.append(key)
def test_put_with_string_meta_and_string_policies(self):
"""
Invoke put() for metadata and policies.
"""
key = ('test', 'demo', 1)
rec = {'i': 12}
try:
TestPut.client.put(key, rec, "meta", "policies")
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "policy must be a dict"
def test_put_with_string_record_generation(self):
"""
Invoke put() for a record with string data, metadata and ttl
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
meta = {'gen': 3, 'ttl': 25000}
policy = {'timeout': 1000}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
assert meta['gen'] != None
self.delete_keys.append(key)
def test_put_with_generation_string(self):
"""
Invoke put() for a record with generation as string
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
meta = {'gen': "wrong", 'ttl': 25000}
policy = {'timeout': 1000}
try:
TestPut.client.put(key, rec, meta, policy)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Generation should be an int or long"
# self.delete_keys.append( key )
def test_put_with_ttl_string(self):
"""
Invoke put() for a record with ttl as string
"""
key = ('test', 'demo', 1)
rec = {
"name": "John"
}
meta = {
'gen': 3,
'ttl': "25000"
}
policy = {
'timeout': 1000
}
try:
TestPut.client.put(key, rec, meta, policy)
except e.ParamError as exception:
assert exception.code == -2
# assert exception.msg == "TTL should be an int or long"
# self.delete_keys.append( key )
def test_put_with_generation_bool(self):
"""
Invoke put() for a record with generation as boolean.
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
meta = {'gen': True, 'ttl': 25000}
policy = {'timeout': 1000}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
assert meta['gen'] != None
self.delete_keys.append(key)
def test_put_with_ttl_boolean(self):
"""
Invoke put() for a record with ttl as boolean.
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
meta = {'gen': 3, 'ttl': True}
policy = {'timeout': 1000}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
assert meta['gen'] != None
self.delete_keys.append(key)
def test_put_with_policy_timeout_string(self):
"""
Invoke put() for a record with policy timeout as string
"""
key = ('test', 'demo', 1)
rec = {
"name": "John"
}
meta = {
'gen': 3,
'ttl': 25000
}
policy = {
'timeout': "1000"
}
try:
TestPut.client.put(key, rec, meta, policy)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == 'timeout is invalid'
def test_put_with_policy_gen_EQ_positive(self):
"""
Invoke put() for a record with generation as EQ positive
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
meta = {'gen': 2, 'ttl': 25000}
policy = {'timeout': 1000}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
gen = meta['gen']
rec = {"name": "Smith"}
policy = {
'timeout': 1000,
'gen': aerospike.POLICY_GEN_EQ,
'commit_level': aerospike.POLICY_COMMIT_LEVEL_ALL
}
meta = {'gen': gen}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "Smith"} == bins
self.delete_keys.append(key)
def test_put_with_policy_gen_EQ_less(self):
"""
Invoke put() for a record with generation as EQ less
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
meta = {'gen': 2, 'ttl': 25000}
policy = {'timeout': 1000}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
rec = {"name": "Smith"}
policy = {'timeout': 1000, 'gen': aerospike.POLICY_GEN_EQ}
meta = {'gen': 10}
try:
TestPut.client.put(key, rec, meta, policy)
except e.RecordGenerationError as exception:
assert exception.code == 3
assert exception.msg == 'AEROSPIKE_ERR_RECORD_GENERATION'
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
self.delete_keys.append(key)
def test_put_with_policy_exists_create_negative(self):
"""
Invoke put() for a record with all policies.
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
meta = {'gen': 2, 'ttl': 25000}
policy = {
'timeout': 1000,
'gen': aerospike.POLICY_GEN_IGNORE,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND,
}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
rec = {"name": "Smith"}
policy = {'timeout': 1000, 'exists': aerospike.POLICY_EXISTS_CREATE}
meta = {'gen': 2}
try:
TestPut.client.put(key, rec, meta, policy)
except e.RecordExistsError as exception:
assert exception.code == 5
assert exception.msg == 'AEROSPIKE_ERR_RECORD_EXISTS'
assert exception.bin == {'name': 'Smith'}
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
self.delete_keys.append(key)
def test_put_with_policy_exists_create_positive(self):
"""
Invoke put() for a record with all policies.
"""
key = ('test', 'demo', 1)
rec = {"name": "Smith"}
meta = {'gen': 2, 'ttl': 25000}
policy = {
'timeout': 1000,
'exists': aerospike.POLICY_EXISTS_CREATE,
'gen': aerospike.POLICY_GEN_IGNORE,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND,
'commit_level': aerospike.POLICY_COMMIT_LEVEL_MASTER
}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "Smith"} == bins
self.delete_keys.append(key)
def test_put_with_policy_exists_replace_negative(self):
"""
Invoke put() for a record with replace policy negative.
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
meta = {'gen': 2, 'ttl': 25000}
policy = {
'timeout': 1000,
'exists': aerospike.POLICY_EXISTS_REPLACE,
'gen': aerospike.POLICY_GEN_IGNORE,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND
}
try:
assert 0 == TestPut.client.put(key, rec, meta, policy)
except e.RecordNotFound as exception:
assert exception.code == 2
assert exception.msg == 'AEROSPIKE_ERR_RECORD_NOT_FOUND'
# self.delete_keys.append( key )
def test_put_with_policy_exists_create_or_replace_positive(self):
"""
Invoke put() for a record with create or replace policy positive.
"""
key = ('test', 'demo', 1)
rec = {"name": "Smith"}
meta = {'gen': 2, 'ttl': 25000}
policy = {
'timeout': 1000,
'exists': aerospike.POLICY_EXISTS_CREATE_OR_REPLACE,
'gen': aerospike.POLICY_GEN_IGNORE,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND
}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "Smith"} == bins
self.delete_keys.append(key)
def test_put_with_policy_exists_ignore(self):
"""
Invoke put() for a record with ignore.
"""
key = ('test', 'demo', 1)
rec = {"name": "Smith"}
meta = {'gen': 2, 'ttl': 25000}
policy = {
'timeout': 1000,
'exists': aerospike.POLICY_EXISTS_IGNORE,
'gen': aerospike.POLICY_GEN_IGNORE,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND
}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "Smith"} == bins
self.delete_keys.append(key)
def test_put_with_policy_replace_positive(self):
"""
Invoke put() for a record with replace positive.
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
meta = {'gen': 2, 'ttl': 25000}
policy = {
'timeout': 1000,
'gen': aerospike.POLICY_GEN_IGNORE,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND,
}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
rec = {"name": "Smith"}
meta = {'gen': 2, 'ttl': 25000}
policy = {'timeout': 1000, 'exists': aerospike.POLICY_EXISTS_REPLACE}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "Smith"} == bins
self.delete_keys.append(key)
def test_put_with_policy_exists_update_positive(self):
"""
Invoke put() for a record with all policies.
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
meta = {'gen': 2, 'ttl': 25000}
policy = {
'timeout': 1000,
'gen': aerospike.POLICY_GEN_IGNORE,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND,
}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
rec = {"name": "Smith"}
meta = {'gen': 2, 'ttl': 25000}
policy = {'timeout': 1000, 'exists': aerospike.POLICY_EXISTS_UPDATE}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "Smith"} == bins
self.delete_keys.append(key)
def test_put_with_policy_exists_update_negative(self):
"""
Invoke put() for a record with update policy negative.
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
meta = {'gen': 2, 'ttl': 25000}
policy = {
'timeout': 1000,
'exists': aerospike.POLICY_EXISTS_UPDATE,
'gen': aerospike.POLICY_GEN_IGNORE,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND
}
try:
assert 0 == TestPut.client.put(key, rec, meta, policy)
except e.RecordNotFound as exception:
assert exception.code == 2
assert exception.msg == 'AEROSPIKE_ERR_RECORD_NOT_FOUND'
# self.delete_keys.append( key )
def test_put_with_policy_gen_GT_lesser(self):
"""
Invoke put() for a record with generation as GT lesser
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
meta = {'gen': 2, 'ttl': 25000}
policy = {'timeout': 1000}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
gen = meta['gen']
rec = {"name": "Smith"}
policy = {'timeout': 1000, 'gen': aerospike.POLICY_GEN_GT}
meta = {'gen': gen}
try:
TestPut.client.put(key, rec, meta, policy)
except e.RecordGenerationError as exception:
assert exception.code == 3
assert exception.msg == 'AEROSPIKE_ERR_RECORD_GENERATION'
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
self.delete_keys.append(key)
def test_put_with_policy_gen_GT_positive(self):
"""
Invoke put() for a record with generation as GT positive
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
meta = {'gen': 2, 'ttl': 25000}
policy = {'timeout': 1000}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
gen = meta['gen']
assert gen == 1
rec = {"name": "Smith"}
policy = {'timeout': 1000, 'gen': aerospike.POLICY_GEN_GT}
meta = {'gen': gen + 5}
TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "Smith"} == bins
self.delete_keys.append(key)
def test_put_with_policy_gen_ignore(self):
"""
Invoke put() for a record with generation as gen_ignore
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
meta = {'gen': 2, 'ttl': 25000}
policy = {'timeout': 1000}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
gen = meta['gen']
rec = {"name": "Smith"}
policy = {'timeout': 1000, 'gen': aerospike.POLICY_GEN_IGNORE}
meta = {'gen': gen}
assert 0 == TestPut.client.put(key, rec, meta, policy)
(key, meta, bins) = TestPut.client.get(key)
assert {"name": "Smith"} == bins
self.delete_keys.append(key)
def test_put_with_set_unicode_string(self):
"""
Invoke put() with set is unicode string.
"""
key = ('test', 'demo', 1)
rec = {"name": "John"}
assert 0 == TestPut.client.put(key, rec)
(key, _, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
self.delete_keys.append(key)
def test_put_with_unicode_bin(self):
"""
Invoke put() with unicode bin.
"""
key = ('test', 'demo', 1)
rec = {
u'i': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8")],
's': {"key": "asd';q;'1';"},
'b': 1234,
'l': '!@#@#$QSDAsd;as'
}
assert 0 == TestPut.client.put(key, rec)
(key, _, bins) = TestPut.client.get(key)
assert {
'i': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8")],
's': {"key": "asd';q;'1';"},
'b': 1234,
'l': '!@#@#$QSDAsd;as'
} == bins
self.delete_keys.append(key)
def test_put_set(self):
"""
Invoke put() set.
"""
key = ('test', 'demo', 1)
rec = {"is_present": set([1, 2])}
res = TestPut.client.put(key, rec)
assert res == 0
(key, _, bins) = TestPut.client.get(key)
assert bins == {"is_present": set([1, 2])}
self.delete_keys.append(key)
def test_put_frozenset(self):
"""
Invoke put() frozenSet.
"""
key = ('test', 'demo', 1)
cities = frozenset(["Frankfurt", "Basel", "Freiburg"])
rec = {'fSet': cities}
res = TestPut.client.put(key, rec)
assert res == 0
(key, _, bins) = TestPut.client.get(key)
assert bins == {'fSet': frozenset(["Frankfurt", "Basel", "Freiburg"])}
self.delete_keys.append(key)
def test_put_tuple(self):
"""
Invoke put() tuple.
"""
key = ('test', 'demo', 1)
rec = {'seq': tuple('abc')}
res = TestPut.client.put(key, rec)
assert res == 0
(key, _, bins) = TestPut.client.get(key)
assert bins == {'seq': ('a', 'b', 'c')}
self.delete_keys.append(key)
def test_put_none_data(self):
"""
Invoke put() None.
"""
key = ('test', 'demo', 1)
rec_none = {"is_present": None}
res = TestPut.client.put(key, rec_none)
assert res == 0
(key, _, bins) = TestPut.client.get(key)
assert bins == {"is_present": None}
self.delete_keys.append(key)
def test_put_map_containing_tuple(self):
"""
Invoke put() maap containing tuple.
"""
key = ('test', 'demo', 1)
rec = {'seq': {'bb': tuple('abc')}}
res = TestPut.client.put(key, rec)
assert res == 0
(key, _, bins) = TestPut.client.get(key)
assert bins == {'seq': {u'bb': ('a', 'b', 'c')}}
self.delete_keys.append(key)
def test_put_serializer_default(self):
"""
Invoke put() with mixed data record with no class or instance
serializer or deserializer. Python option should get called by default
"""
key = ('test', 'demo', 1)
rec = {
'map': {"key": "asd';q;'1';",
"pi": 3.14},
'normal': 1234,
'special': '!@#@#$QSDAsd;as',
'list': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8")],
'bytes': bytearray("asd;as[d'as;d", "utf-8"),
'nestedlist': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8"),
[1, bytearray("asd;as[d'as;d", "utf-8")]],
'nestedmap': {
"key": "asd';q;'1';",
"pi": 3.14,
"nest": {"pi1": 3.12,
"t": 1}
},
}
res = TestPut.client.put(key, rec, {}, {})
assert res == 0
_, _, bins = TestPut.client.get(key)
assert bins == {
'map': {"key": "asd';q;'1';",
"pi": 3.14},
'normal': 1234,
'special': '!@#@#$QSDAsd;as',
'list': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8")],
'bytes': bytearray("asd;as[d'as;d", "utf-8"),
'nestedlist': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8"),
[1, bytearray("asd;as[d'as;d", "utf-8")]],
'nestedmap':
{"key": "asd';q;'1';",
"pi": 3.14,
"nest": {"pi1": 3.12,
"t": 1}},
}
self.delete_keys.append(key)
def test_put_user_serializer_no_deserializer(self):
"""
Invoke put() for float data record with user serializer is
registered, but deserializer is not registered.
"""
key = ('test', 'demo', 1)
rec = {"pi": 3.14}
def serialize_function(val):
return marshal.dumps(val)
aerospike.set_serializer(serialize_function)
res = TestPut.client.put(key, rec, {}, {}, aerospike.SERIALIZER_USER)
assert res == 0
_, _, bins = TestPut.client.get(key)
if TestPut.skip_old_server is False:
assert bins == {'pi': 3.14}
else:
assert bins == {'pi': bytearray(b'g\x1f\x85\xebQ\xb8\x1e\t@')}
self.delete_keys.append(key)
def test_put_record_with_bin_name_exceeding_max_limit(self):
"""
Invoke put() with bin name exceeding the max limit of bin name.
"""
key = ('test', 'demo', 'put_rec')
put_record = {
'containers_free': [],
'containers_used': [
{'cluster_id': 'bob',
'container_id': 1,
'port': 4000}
],
'list_of_map': [{'test': 'bar'}],
'map_of_list': {'fizz': ['b', 'u', 'z', 'z']},
'ports_free': [],
'ports_unused': [4100, 4200, 4300],
'provider_id': u'i-f01fc206'
}
try:
TestPut.client.put(key, put_record)
except e.BinNameError as exception:
assert exception.code == 21
assert exception.msg == "A bin name should not exceed 14 characters limit"
def test_put_with_string_record_without_connection(self):
"""
Invoke put() for a record with string data without connection
"""
config = {"hosts": [("127.0.0.1", 3000)]}
client1 = aerospike.client(config)
key = ('test', 'demo', 1)
bins = {"name": "John"}
try:
client1.put(key, bins)
except e.ClusterError as exception:
assert exception.code == 11
assert exception.msg == 'No connection to aerospike cluster'
def test_put_with_integer_greater_than_maxisze(self):
"""
Invoke put() for a record with integer greater than max size
"""
key = ('test', 'demo', 1)
bins = {"no": 111111111111111111111111111111111111111111111}
try:
TestPut.client.put(key, bins)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == 'integer value exceeds sys.maxsize'
except SystemError as exception:
pass
def test_put_with_integer_no_exception_raised_CLIENT598(self):
"""
Invoke put() for a record with integer equal to -1. No exception
raised. Test for CLIENT-598
"""
key = ('test', 'demo', 1)
bins = {"no": -1}
TestPut.client.put(key, bins)
(key, _, bins) = TestPut.client.get(key)
assert bins == {"no": -1}
self.delete_keys.append(key)
def test_put_with_key_as_an_integer_greater_than_maxisze(self):
"""
Invoke put() for a record with integer greater than max size
"""
key = ('test', 'demo', 1111111111111111111111111111111111)
bins = {"no": 11}
try:
assert 0 == TestPut.client.put(key, bins)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == 'integer value for KEY exceeds sys.maxsize'
except SystemError as exception:
pass
def test_put_record_set_to_aerospike_null(self):
"""
Invoke put() for a record with bin set to aerospike_null
"""
key = ('test', 'demo', 1)
bins = {"name": "John", "no": 3}
assert 0 == TestPut.client.put(key, bins)
(key, _, bins) = TestPut.client.get(key)
assert {"name": "John", "no": 3} == bins
bins = {"no": aerospike.null}
assert 0 == TestPut.client.put(key, bins)
(key, _, bins) = TestPut.client.get(key)
assert {"name": "John"} == bins
self.delete_keys.append(key)
def test_put_strict_types_bin_length(self):
"""
Invoke put() for a record with strict type set to false and bin
length more than 14 characters
"""
key = ('test', 'demo', 1)
maxlength = ""
for _ in range(20):
maxlength = maxlength + "a"
bins = {"name": "John", maxlength: 3}
assert 0 == TestPut.client_strict_types.put(key, bins)
(key, _, bins) = TestPut.client_strict_types.get(key)
assert {"name": "John"} == bins
self.delete_keys.append(key)
def test_put_strict_types_empty_record(self):
"""
Invoke put() for a record with strict type set to false and record
is empty
"""
key = ('test', 'demo', 1)
bins = {}
try:
assert 0 == TestPut.client_strict_types.put(key, bins)
(key, _, bins) = TestPut.client_strict_types.get(key)
assert bins is None
except e.InvalidRequest:
pass
| 28.439394
| 86
| 0.521381
| 4,128
| 35,663
| 4.370155
| 0.07437
| 0.090078
| 0.05765
| 0.066353
| 0.806596
| 0.778714
| 0.739024
| 0.706264
| 0.671397
| 0.637029
| 0
| 0.02887
| 0.332754
| 35,663
| 1,253
| 87
| 28.462091
| 0.72924
| 0.087177
| 0
| 0.644444
| 0
| 0
| 0.112634
| 0.007962
| 0
| 0
| 0
| 0
| 0.191667
| 1
| 0.086111
| false
| 0.011111
| 0.009722
| 0.001389
| 0.098611
| 0.001389
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
82a791bd3864b3211607b85091ecc5276e9b9ad2
| 166
|
py
|
Python
|
username_and_password.py
|
maria1226/Basics_Python
|
d994a1e00f602bb478809d47a937793ea7d1f116
|
[
"MIT"
] | 1
|
2020-08-04T14:33:59.000Z
|
2020-08-04T14:33:59.000Z
|
username_and_password.py
|
maria1226/Basics_Python
|
d994a1e00f602bb478809d47a937793ea7d1f116
|
[
"MIT"
] | null | null | null |
username_and_password.py
|
maria1226/Basics_Python
|
d994a1e00f602bb478809d47a937793ea7d1f116
|
[
"MIT"
] | null | null | null |
username=input()
correct_password=input()
input_password=input()
while input_password!=correct_password:
input_password=input()
print(f'Welcome,{username}!')
| 23.714286
| 39
| 0.771084
| 20
| 166
| 6.15
| 0.4
| 0.422764
| 0.325203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090361
| 166
| 7
| 40
| 23.714286
| 0.81457
| 0
| 0
| 0.333333
| 0
| 0
| 0.113772
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.666667
| 0
| 0
| 0
| 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
82bf9267dab90d27e385fdc832ea6b96ca404f01
| 185
|
py
|
Python
|
curethinkdb/__init__.py
|
guyskk/curethinkdb
|
7df7d5646f387ca462f7a21ca214f4708b8578b8
|
[
"MIT"
] | null | null | null |
curethinkdb/__init__.py
|
guyskk/curethinkdb
|
7df7d5646f387ca462f7a21ca214f4708b8578b8
|
[
"MIT"
] | null | null | null |
curethinkdb/__init__.py
|
guyskk/curethinkdb
|
7df7d5646f387ca462f7a21ca214f4708b8578b8
|
[
"MIT"
] | null | null | null |
from rethinkdb import * # noqa
from .net import Connection, ConnectionPool # noqa
def set_loop_type_curio():
import rethinkdb.net
rethinkdb.net.connection_type = Connection
| 23.125
| 51
| 0.762162
| 23
| 185
| 5.956522
| 0.521739
| 0.175182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172973
| 185
| 7
| 52
| 26.428571
| 0.895425
| 0.048649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7d79ef7aee6e27bf0f779264c3a4a8f3478493be
| 441
|
py
|
Python
|
awkward-numba/awkward/numba/array/masked.py
|
smit2k14/awkward-array
|
a2645fdaed1a6997c4677ae47cbb2cd0663e8a21
|
[
"BSD-3-Clause"
] | null | null | null |
awkward-numba/awkward/numba/array/masked.py
|
smit2k14/awkward-array
|
a2645fdaed1a6997c4677ae47cbb2cd0663e8a21
|
[
"BSD-3-Clause"
] | null | null | null |
awkward-numba/awkward/numba/array/masked.py
|
smit2k14/awkward-array
|
a2645fdaed1a6997c4677ae47cbb2cd0663e8a21
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-array/blob/master/LICENSE
import awkward.array.masked
from .base import NumbaMethods
class MaskedArrayNumba(NumbaMethods, awkward.array.masked.MaskedArray):
pass
class BitMaskedArrayNumba(NumbaMethods, awkward.array.masked.BitMaskedArray):
pass
class IndexedMaskedArrayNumba(NumbaMethods, awkward.array.masked.IndexedMaskedArray):
pass
| 27.5625
| 91
| 0.804989
| 51
| 441
| 6.960784
| 0.607843
| 0.169014
| 0.202817
| 0.253521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002513
| 0.097506
| 441
| 15
| 92
| 29.4
| 0.889447
| 0.249433
| 0
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.375
| 0.25
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
7db6abac9197a991676a7bfc53f92c7ca6a2a663
| 1,377
|
py
|
Python
|
test/color_test.py
|
TimeExceed/draw.py
|
361dceda86a983815f48b898b3d6c75c776f6aa0
|
[
"BSD-3-Clause"
] | null | null | null |
test/color_test.py
|
TimeExceed/draw.py
|
361dceda86a983815f48b898b3d6c75c776f6aa0
|
[
"BSD-3-Clause"
] | null | null | null |
test/color_test.py
|
TimeExceed/draw.py
|
361dceda86a983815f48b898b3d6c75c776f6aa0
|
[
"BSD-3-Clause"
] | null | null | null |
import testa
from fathom import Point, ORIGIN
import fathom.tikz as tikz
import fathom.colors as colors
@testa.is_(expect=r'''
\documentclass[UTF8]{ctexart}
\usepackage[a0paper]{geometry}
\usepackage{tikz}
\usetikzlibrary{arrows.meta,arrows}
\pagestyle{empty}
\begin{document}
\begin{tikzpicture}[>=Stealth]
\fill[color=black!50] (1.00cm,1.00cm) circle [radius=1.00cm];
\draw[color=red!50] (1.00cm,1.00cm) circle [radius=1.00cm];
\end{tikzpicture}
\end{document}
''')
def scaled_color():
canvas = tikz.Canvas()
canvas.new_circle(center=Point(1, 1), radius=1,
pen_color=colors.RED.scale(0.5),
brush_color=colors.BLACK.scale(0.5))
return canvas.draw()
@testa.is_(expect=r'''
\documentclass[UTF8]{ctexart}
\usepackage[a0paper]{geometry}
\usepackage{tikz}
\usetikzlibrary{arrows.meta,arrows}
\pagestyle{empty}
\begin{document}
\begin{tikzpicture}[>=Stealth]
\fill[color=black!white] (1.00cm,1.00cm) circle [radius=1.00cm];
\draw[color=red!green] (1.00cm,1.00cm) circle [radius=1.00cm];
\end{tikzpicture}
\end{document}
''')
def mixed_color():
canvas = tikz.Canvas()
canvas.new_circle(center=Point(1, 1), radius=1,
pen_color=colors.RED.mix(colors.GREEN),
brush_color=colors.BLACK.mix(colors.WHITE))
return canvas.draw()
if __name__ == '__main__':
testa.main()
| 27
| 65
| 0.682643
| 188
| 1,377
| 4.904255
| 0.297872
| 0.065076
| 0.02603
| 0.043384
| 0.737527
| 0.737527
| 0.737527
| 0.737527
| 0.737527
| 0.733189
| 0
| 0.046036
| 0.148148
| 1,377
| 50
| 66
| 27.54
| 0.739983
| 0
| 0
| 0.636364
| 0
| 0.090909
| 0.501089
| 0.228758
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7dd2e3dee06d2af43e04d8fb67fa6beafa4a584a
| 343
|
py
|
Python
|
tests/test_utils.py
|
flug/gonzo
|
0f2eb189581e7833a9dec8dafa52ba8ca1765afa
|
[
"Apache-2.0"
] | 8
|
2015-01-14T10:29:41.000Z
|
2016-02-07T16:59:20.000Z
|
tests/test_utils.py
|
flug/gonzo
|
0f2eb189581e7833a9dec8dafa52ba8ca1765afa
|
[
"Apache-2.0"
] | 7
|
2015-01-14T10:29:22.000Z
|
2018-10-01T13:00:52.000Z
|
tests/test_utils.py
|
flug/gonzo
|
0f2eb189581e7833a9dec8dafa52ba8ca1765afa
|
[
"Apache-2.0"
] | 2
|
2015-01-14T10:31:39.000Z
|
2018-10-01T12:56:06.000Z
|
import pytest
from gonzo.utils import last_index
def test_last_index():
assert last_index([1, 1], 1) == 1
assert last_index([1, 1, 2], 1) == 1
def test_last_index_missing():
with pytest.raises(ValueError):
last_index([1], 2)
def test_last_index_empty():
with pytest.raises(ValueError):
last_index([], 1)
| 18.052632
| 40
| 0.658892
| 52
| 343
| 4.096154
| 0.326923
| 0.338028
| 0.187793
| 0.225352
| 0.497653
| 0.338028
| 0.338028
| 0
| 0
| 0
| 0
| 0.04428
| 0.209913
| 343
| 18
| 41
| 19.055556
| 0.741697
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.272727
| true
| 0
| 0.181818
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7de72b57f96b16ec53b51e75304996939a3edef1
| 42,025
|
py
|
Python
|
heatclient/tests/unit/test_template_utils.py
|
enterstudio/python-heatclient
|
954e475a6a0a12432ec325d7579460fabcf3f40a
|
[
"Apache-2.0"
] | null | null | null |
heatclient/tests/unit/test_template_utils.py
|
enterstudio/python-heatclient
|
954e475a6a0a12432ec325d7579460fabcf3f40a
|
[
"Apache-2.0"
] | null | null | null |
heatclient/tests/unit/test_template_utils.py
|
enterstudio/python-heatclient
|
954e475a6a0a12432ec325d7579460fabcf3f40a
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import tempfile
import mox
import six
from six.moves.urllib import error
from six.moves.urllib import request
import testtools
from testtools import matchers
import yaml
from heatclient.common import template_utils
from heatclient.common import utils
from heatclient import exc
class ShellEnvironmentTest(testtools.TestCase):
template_a = b'{"heat_template_version": "2013-05-23"}'
def setUp(self):
super(ShellEnvironmentTest, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.VerifyAll)
self.addCleanup(self.m.UnsetStubs)
def collect_links(self, env, content, url, env_base_url=''):
jenv = yaml.safe_load(env)
files = {}
if url:
self.m.StubOutWithMock(request, 'urlopen')
request.urlopen(url).AndReturn(six.BytesIO(content))
request.urlopen(url).AndReturn(six.BytesIO(content))
self.m.ReplayAll()
template_utils.resolve_environment_urls(
jenv.get('resource_registry'), files, env_base_url)
if url:
self.assertEqual(content.decode('utf-8'), files[url])
def test_ignore_env_keys(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file = '/home/my/dir/env.yaml'
env = b'''
resource_registry:
resources:
bar:
hooks: pre_create
restricted_actions: replace
'''
request.urlopen('file://%s' % env_file).AndReturn(
six.BytesIO(env))
self.m.ReplayAll()
_, env_dict = template_utils.process_environment_and_files(
env_file)
self.assertEqual(
{u'resource_registry': {u'resources': {
u'bar': {u'hooks': u'pre_create',
u'restricted_actions': u'replace'}}}},
env_dict)
self.m.VerifyAll()
def test_process_environment_file(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file = '/home/my/dir/env.yaml'
env = b'''
resource_registry:
"OS::Thingy": "file:///home/b/a.yaml"
'''
request.urlopen('file://%s' % env_file).AndReturn(
six.BytesIO(env))
request.urlopen('file:///home/b/a.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/a.yaml').AndReturn(
six.BytesIO(self.template_a))
self.m.ReplayAll()
files, env_dict = template_utils.process_environment_and_files(
env_file)
self.assertEqual(
{'resource_registry': {
'OS::Thingy': 'file:///home/b/a.yaml'}},
env_dict)
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/b/a.yaml'])
def test_process_environment_relative_file(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file = '/home/my/dir/env.yaml'
env_url = 'file:///home/my/dir/env.yaml'
env = b'''
resource_registry:
"OS::Thingy": a.yaml
'''
request.urlopen(env_url).AndReturn(
six.BytesIO(env))
request.urlopen('file:///home/my/dir/a.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/my/dir/a.yaml').AndReturn(
six.BytesIO(self.template_a))
self.m.ReplayAll()
self.assertEqual(
env_url,
utils.normalise_file_path_to_url(env_file))
self.assertEqual(
'file:///home/my/dir',
utils.base_url_for_url(env_url))
files, env_dict = template_utils.process_environment_and_files(
env_file)
self.assertEqual(
{'resource_registry': {
'OS::Thingy': 'file:///home/my/dir/a.yaml'}},
env_dict)
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/my/dir/a.yaml'])
def test_process_environment_relative_file_up(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file = '/home/my/dir/env.yaml'
env_url = 'file:///home/my/dir/env.yaml'
env = b'''
resource_registry:
"OS::Thingy": ../bar/a.yaml
'''
request.urlopen(env_url).AndReturn(
six.BytesIO(env))
request.urlopen('file:///home/my/bar/a.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/my/bar/a.yaml').AndReturn(
six.BytesIO(self.template_a))
self.m.ReplayAll()
env_url = 'file://%s' % env_file
self.assertEqual(
env_url,
utils.normalise_file_path_to_url(env_file))
self.assertEqual(
'file:///home/my/dir',
utils.base_url_for_url(env_url))
files, env_dict = template_utils.process_environment_and_files(
env_file)
self.assertEqual(
{'resource_registry': {
'OS::Thingy': 'file:///home/my/bar/a.yaml'}},
env_dict)
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/my/bar/a.yaml'])
def test_process_environment_url(self):
env = b'''
resource_registry:
"OS::Thingy": "a.yaml"
'''
url = 'http://no.where/some/path/to/file.yaml'
tmpl_url = 'http://no.where/some/path/to/a.yaml'
self.m.StubOutWithMock(request, 'urlopen')
request.urlopen(url).AndReturn(six.BytesIO(env))
request.urlopen(tmpl_url).AndReturn(six.BytesIO(self.template_a))
request.urlopen(tmpl_url).AndReturn(six.BytesIO(self.template_a))
self.m.ReplayAll()
files, env_dict = template_utils.process_environment_and_files(
url)
self.assertEqual({'resource_registry': {'OS::Thingy': tmpl_url}},
env_dict)
self.assertEqual(self.template_a.decode('utf-8'), files[tmpl_url])
def test_process_environment_empty_file(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file = '/home/my/dir/env.yaml'
env = b''
request.urlopen('file://%s' % env_file).AndReturn(six.BytesIO(env))
self.m.ReplayAll()
files, env_dict = template_utils.process_environment_and_files(
env_file)
self.assertEqual({}, env_dict)
self.assertEqual({}, files)
def test_no_process_environment_and_files(self):
files, env = template_utils.process_environment_and_files()
self.assertEqual({}, env)
self.assertEqual({}, files)
def test_process_multiple_environments_and_files(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file1 = '/home/my/dir/env1.yaml'
env_file2 = '/home/my/dir/env2.yaml'
env1 = b'''
parameters:
"param1": "value1"
resource_registry:
"OS::Thingy1": "file:///home/b/a.yaml"
'''
env2 = b'''
parameters:
"param2": "value2"
resource_registry:
"OS::Thingy2": "file:///home/b/b.yaml"
'''
request.urlopen('file://%s' % env_file1).AndReturn(
six.BytesIO(env1))
request.urlopen('file:///home/b/a.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/a.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file://%s' % env_file2).AndReturn(
six.BytesIO(env2))
request.urlopen('file:///home/b/b.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/b.yaml').AndReturn(
six.BytesIO(self.template_a))
self.m.ReplayAll()
files, env = template_utils.process_multiple_environments_and_files(
[env_file1, env_file2])
self.assertEqual(
{
'resource_registry': {
'OS::Thingy1': 'file:///home/b/a.yaml',
'OS::Thingy2': 'file:///home/b/b.yaml'},
'parameters': {
'param1': 'value1',
'param2': 'value2'}
},
env)
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/b/a.yaml'])
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/b/b.yaml'])
def test_process_multiple_environments_default_resources(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file1 = '/home/my/dir/env1.yaml'
env_file2 = '/home/my/dir/env2.yaml'
env1 = b'''
resource_registry:
resources:
resource1:
"OS::Thingy1": "file:///home/b/a.yaml"
resource2:
"OS::Thingy2": "file:///home/b/b.yaml"
'''
env2 = b'''
resource_registry:
resources:
resource1:
"OS::Thingy3": "file:///home/b/a.yaml"
resource2:
"OS::Thingy4": "file:///home/b/b.yaml"
'''
request.urlopen('file://%s' % env_file1).InAnyOrder().AndReturn(
six.BytesIO(env1))
request.urlopen('file:///home/b/a.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/b.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/a.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/b.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file://%s' % env_file2).InAnyOrder().AndReturn(
six.BytesIO(env2))
request.urlopen('file:///home/b/a.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/b.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/a.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/b.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
self.m.ReplayAll()
files, env = template_utils.process_multiple_environments_and_files(
[env_file1, env_file2])
self.assertEqual(
{
'resource_registry': {
'resources': {
'resource1': {
'OS::Thingy1': 'file:///home/b/a.yaml',
'OS::Thingy3': 'file:///home/b/a.yaml'
},
'resource2': {
'OS::Thingy2': 'file:///home/b/b.yaml',
'OS::Thingy4': 'file:///home/b/b.yaml'
}
}
}
},
env)
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/b/a.yaml'])
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/b/b.yaml'])
def test_no_process_multiple_environments_and_files(self):
files, env = template_utils.process_multiple_environments_and_files()
self.assertEqual({}, env)
self.assertEqual({}, files)
def test_process_multiple_environments_and_files_from_object(self):
env_object = 'http://no.where/path/to/env.yaml'
env1 = b'''
parameters:
"param1": "value1"
resource_registry:
"OS::Thingy1": "b/a.yaml"
'''
self.m.ReplayAll()
self.object_requested = False
def env_path_is_object(object_url):
return True
def object_request(method, object_url):
self.object_requested = True
self.assertEqual('GET', method)
self.assertTrue(object_url.startswith("http://no.where/path/to/"))
if object_url == env_object:
return env1
else:
return self.template_a
files, env = template_utils.process_multiple_environments_and_files(
env_paths=[env_object], env_path_is_object=env_path_is_object,
object_request=object_request)
self.assertEqual(
{
'resource_registry': {
'OS::Thingy1': 'http://no.where/path/to/b/a.yaml'},
'parameters': {'param1': 'value1'}
},
env)
self.assertEqual(self.template_a.decode('utf-8'),
files['http://no.where/path/to/b/a.yaml'])
def test_process_multiple_environments_and_files_tracker(self):
# Setup
self.m.StubOutWithMock(request, 'urlopen')
env_file1 = '/home/my/dir/env1.yaml'
env1 = b'''
parameters:
"param1": "value1"
resource_registry:
"OS::Thingy1": "file:///home/b/a.yaml"
'''
request.urlopen('file://%s' % env_file1).AndReturn(
six.BytesIO(env1))
request.urlopen('file:///home/b/a.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/a.yaml').AndReturn(
six.BytesIO(self.template_a))
self.m.ReplayAll()
# Test
env_file_list = []
files, env = template_utils.process_multiple_environments_and_files(
[env_file1], env_list_tracker=env_file_list)
# Verify
expected_env = {'parameters': {'param1': 'value1'},
'resource_registry':
{'OS::Thingy1': 'file:///home/b/a.yaml'}
}
self.assertEqual(expected_env, env)
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/b/a.yaml'])
self.assertEqual(['file:///home/my/dir/env1.yaml'], env_file_list)
self.assertIn('file:///home/my/dir/env1.yaml', files)
self.assertEqual(expected_env,
json.loads(files['file:///home/my/dir/env1.yaml']))
def test_process_environment_relative_file_tracker(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file = '/home/my/dir/env.yaml'
env_url = 'file:///home/my/dir/env.yaml'
env = b'''
resource_registry:
"OS::Thingy": a.yaml
'''
request.urlopen(env_url).AndReturn(
six.BytesIO(env))
request.urlopen('file:///home/my/dir/a.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/my/dir/a.yaml').AndReturn(
six.BytesIO(self.template_a))
self.m.ReplayAll()
self.assertEqual(
env_url,
utils.normalise_file_path_to_url(env_file))
self.assertEqual(
'file:///home/my/dir',
utils.base_url_for_url(env_url))
env_file_list = []
files, env = template_utils.process_multiple_environments_and_files(
[env_file], env_list_tracker=env_file_list)
# Verify
expected_env = {'resource_registry':
{'OS::Thingy': 'file:///home/my/dir/a.yaml'}}
self.assertEqual(expected_env, env)
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/my/dir/a.yaml'])
self.assertEqual(['file:///home/my/dir/env.yaml'], env_file_list)
self.assertEqual(json.dumps(expected_env),
files['file:///home/my/dir/env.yaml'])
def test_global_files(self):
url = 'file:///home/b/a.yaml'
env = '''
resource_registry:
"OS::Thingy": "%s"
''' % url
self.collect_links(env, self.template_a, url)
def test_nested_files(self):
url = 'file:///home/b/a.yaml'
env = '''
resource_registry:
resources:
freddy:
"OS::Thingy": "%s"
''' % url
self.collect_links(env, self.template_a, url)
def test_http_url(self):
url = 'http://no.where/container/a.yaml'
env = '''
resource_registry:
"OS::Thingy": "%s"
''' % url
self.collect_links(env, self.template_a, url)
def test_with_base_url(self):
url = 'ftp://no.where/container/a.yaml'
env = '''
resource_registry:
base_url: "ftp://no.where/container/"
resources:
server_for_me:
"OS::Thingy": a.yaml
'''
self.collect_links(env, self.template_a, url)
def test_with_built_in_provider(self):
env = '''
resource_registry:
resources:
server_for_me:
"OS::Thingy": OS::Compute::Server
'''
self.collect_links(env, self.template_a, None)
def test_with_env_file_base_url_file(self):
url = 'file:///tmp/foo/a.yaml'
env = '''
resource_registry:
resources:
server_for_me:
"OS::Thingy": a.yaml
'''
env_base_url = 'file:///tmp/foo'
self.collect_links(env, self.template_a, url, env_base_url)
def test_with_env_file_base_url_http(self):
url = 'http://no.where/path/to/a.yaml'
env = '''
resource_registry:
resources:
server_for_me:
"OS::Thingy": to/a.yaml
'''
env_base_url = 'http://no.where/path'
self.collect_links(env, self.template_a, url, env_base_url)
def test_unsupported_protocol(self):
env = '''
resource_registry:
"OS::Thingy": "sftp://no.where/dev/null/a.yaml"
'''
jenv = yaml.safe_load(env)
fields = {'files': {}}
self.assertRaises(exc.CommandError,
template_utils.get_file_contents,
jenv['resource_registry'],
fields)
class TestGetTemplateContents(testtools.TestCase):
def setUp(self):
super(TestGetTemplateContents, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.VerifyAll)
self.addCleanup(self.m.UnsetStubs)
def test_get_template_contents_file(self):
with tempfile.NamedTemporaryFile() as tmpl_file:
tmpl = (b'{"AWSTemplateFormatVersion" : "2010-09-09",'
b' "foo": "bar"}')
tmpl_file.write(tmpl)
tmpl_file.flush()
files, tmpl_parsed = template_utils.get_template_contents(
tmpl_file.name)
self.assertEqual({"AWSTemplateFormatVersion": "2010-09-09",
"foo": "bar"}, tmpl_parsed)
self.assertEqual({}, files)
def test_get_template_contents_file_empty(self):
with tempfile.NamedTemporaryFile() as tmpl_file:
ex = self.assertRaises(
exc.CommandError,
template_utils.get_template_contents,
tmpl_file.name)
self.assertEqual(
'Could not fetch template from file://%s' % tmpl_file.name,
str(ex))
def test_get_template_file_nonextant(self):
nonextant_file = '/template/dummy/file/path/and/name.yaml'
ex = self.assertRaises(
error.URLError,
template_utils.get_template_contents,
nonextant_file)
self.assertEqual(
"<urlopen error [Errno 2] No such file or directory: '%s'>"
% nonextant_file,
str(ex))
def test_get_template_contents_file_none(self):
ex = self.assertRaises(
exc.CommandError,
template_utils.get_template_contents)
self.assertEqual(
('Need to specify exactly one of --template-file, '
'--template-url or --template-object'),
str(ex))
def test_get_template_contents_file_none_existing(self):
files, tmpl_parsed = template_utils.get_template_contents(
existing=True)
self.assertIsNone(tmpl_parsed)
self.assertEqual({}, files)
def test_get_template_contents_parse_error(self):
with tempfile.NamedTemporaryFile() as tmpl_file:
tmpl = b'{"foo": "bar"'
tmpl_file.write(tmpl)
tmpl_file.flush()
ex = self.assertRaises(
exc.CommandError,
template_utils.get_template_contents,
tmpl_file.name)
self.assertThat(
str(ex),
matchers.MatchesRegex(
'Error parsing template file://%s ' % tmpl_file.name))
def test_get_template_contents_url(self):
tmpl = b'{"AWSTemplateFormatVersion" : "2010-09-09", "foo": "bar"}'
url = 'http://no.where/path/to/a.yaml'
self.m.StubOutWithMock(request, 'urlopen')
request.urlopen(url).AndReturn(six.BytesIO(tmpl))
self.m.ReplayAll()
files, tmpl_parsed = template_utils.get_template_contents(
template_url=url)
self.assertEqual({"AWSTemplateFormatVersion": "2010-09-09",
"foo": "bar"}, tmpl_parsed)
self.assertEqual({}, files)
def test_get_template_contents_object(self):
tmpl = '{"AWSTemplateFormatVersion" : "2010-09-09", "foo": "bar"}'
url = 'http://no.where/path/to/a.yaml'
self.m.ReplayAll()
self.object_requested = False
def object_request(method, object_url):
self.object_requested = True
self.assertEqual('GET', method)
self.assertEqual('http://no.where/path/to/a.yaml', object_url)
return tmpl
files, tmpl_parsed = template_utils.get_template_contents(
template_object=url,
object_request=object_request)
self.assertEqual({"AWSTemplateFormatVersion": "2010-09-09",
"foo": "bar"}, tmpl_parsed)
self.assertEqual({}, files)
self.assertTrue(self.object_requested)
def test_get_nested_stack_template_contents_object(self):
tmpl = ('{"heat_template_version": "2016-04-08",'
'"resources": {'
'"FooBar": {'
'"type": "foo/bar.yaml"}}}')
url = 'http://no.where/path/to/a.yaml'
self.m.ReplayAll()
self.object_requested = False
def object_request(method, object_url):
self.object_requested = True
self.assertEqual('GET', method)
self.assertTrue(object_url.startswith("http://no.where/path/to/"))
if object_url == url:
return tmpl
else:
return '{"heat_template_version": "2016-04-08"}'
files, tmpl_parsed = template_utils.get_template_contents(
template_object=url,
object_request=object_request)
self.assertEqual(files['http://no.where/path/to/foo/bar.yaml'],
'{"heat_template_version": "2016-04-08"}')
self.assertTrue(self.object_requested)
def check_non_utf8_content(self, filename, content):
base_url = 'file:///tmp'
url = '%s/%s' % (base_url, filename)
template = {'resources':
{'one_init':
{'type': 'OS::Heat::CloudConfig',
'properties':
{'cloud_config':
{'write_files':
[{'path': '/tmp/%s' % filename,
'content': {'get_file': url},
'encoding': 'b64'}]}}}}}
self.m.StubOutWithMock(request, 'urlopen')
raw_content = base64.decodestring(content)
response = six.BytesIO(raw_content)
request.urlopen(url).AndReturn(response)
self.m.ReplayAll()
files = {}
template_utils.resolve_template_get_files(
template, files, base_url)
self.assertEqual({url: content}, files)
def test_get_zip_content(self):
filename = 'heat.zip'
content = b'''\
UEsDBAoAAAAAAEZZWkRbOAuBBQAAAAUAAAAIABwAaGVhdC50eHRVVAkAAxRbDVNYh\
t9SdXgLAAEE\n6AMAAATpAwAAaGVhdApQSwECHgMKAAAAAABGWVpEWzgLgQUAAAAF\
AAAACAAYAAAAAAABAAAApIEA\nAAAAaGVhdC50eHRVVAUAAxRbDVN1eAsAAQToAwA\
ABOkDAABQSwUGAAAAAAEAAQBOAAAARwAAAAAA\n'''
# zip has '\0' in stream
self.assertIn(b'\0', base64.decodestring(content))
decoded_content = base64.decodestring(content)
if six.PY3:
self.assertRaises(UnicodeDecodeError, decoded_content.decode)
else:
self.assertRaises(
UnicodeDecodeError,
json.dumps,
{'content': decoded_content})
self.check_non_utf8_content(
filename=filename, content=content)
def test_get_utf16_content(self):
filename = 'heat.utf16'
content = b'//4tTkhTCgA=\n'
# utf6 has '\0' in stream
self.assertIn(b'\0', base64.decodestring(content))
decoded_content = base64.decodestring(content)
if six.PY3:
self.assertRaises(UnicodeDecodeError, decoded_content.decode)
else:
self.assertRaises(
UnicodeDecodeError,
json.dumps,
{'content': decoded_content})
self.check_non_utf8_content(filename=filename, content=content)
def test_get_gb18030_content(self):
filename = 'heat.gb18030'
content = b'1tDO5wo=\n'
# gb18030 has no '\0' in stream
self.assertNotIn('\0', base64.decodestring(content))
decoded_content = base64.decodestring(content)
if six.PY3:
self.assertRaises(UnicodeDecodeError, decoded_content.decode)
else:
self.assertRaises(
UnicodeDecodeError,
json.dumps,
{'content': decoded_content})
self.check_non_utf8_content(filename=filename, content=content)
class TestTemplateGetFileFunctions(testtools.TestCase):
hot_template = b'''heat_template_version: 2013-05-23
resources:
resource1:
type: OS::type1
properties:
foo: {get_file: foo.yaml}
bar:
get_file:
'http://localhost/bar.yaml'
resource2:
type: OS::type1
properties:
baz:
- {get_file: baz/baz1.yaml}
- {get_file: baz/baz2.yaml}
- {get_file: baz/baz3.yaml}
ignored_list: {get_file: [ignore, me]}
ignored_dict: {get_file: {ignore: me}}
ignored_none: {get_file: }
'''
def setUp(self):
super(TestTemplateGetFileFunctions, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.VerifyAll)
self.addCleanup(self.m.UnsetStubs)
def test_hot_template(self):
self.m.StubOutWithMock(request, 'urlopen')
tmpl_file = '/home/my/dir/template.yaml'
url = 'file:///home/my/dir/template.yaml'
request.urlopen(url).AndReturn(
six.BytesIO(self.hot_template))
request.urlopen(
'http://localhost/bar.yaml').InAnyOrder().AndReturn(
six.BytesIO(b'bar contents'))
request.urlopen(
'file:///home/my/dir/foo.yaml').InAnyOrder().AndReturn(
six.BytesIO(b'foo contents'))
request.urlopen(
'file:///home/my/dir/baz/baz1.yaml').InAnyOrder().AndReturn(
six.BytesIO(b'baz1 contents'))
request.urlopen(
'file:///home/my/dir/baz/baz2.yaml').InAnyOrder().AndReturn(
six.BytesIO(b'baz2 contents'))
request.urlopen(
'file:///home/my/dir/baz/baz3.yaml').InAnyOrder().AndReturn(
six.BytesIO(b'baz3 contents'))
self.m.ReplayAll()
files, tmpl_parsed = template_utils.get_template_contents(
template_file=tmpl_file)
self.assertEqual({
'http://localhost/bar.yaml': b'bar contents',
'file:///home/my/dir/foo.yaml': b'foo contents',
'file:///home/my/dir/baz/baz1.yaml': b'baz1 contents',
'file:///home/my/dir/baz/baz2.yaml': b'baz2 contents',
'file:///home/my/dir/baz/baz3.yaml': b'baz3 contents',
}, files)
self.assertEqual({
'heat_template_version': '2013-05-23',
'resources': {
'resource1': {
'type': 'OS::type1',
'properties': {
'bar': {'get_file': 'http://localhost/bar.yaml'},
'foo': {'get_file': 'file:///home/my/dir/foo.yaml'},
},
},
'resource2': {
'type': 'OS::type1',
'properties': {
'baz': [
{'get_file': 'file:///home/my/dir/baz/baz1.yaml'},
{'get_file': 'file:///home/my/dir/baz/baz2.yaml'},
{'get_file': 'file:///home/my/dir/baz/baz3.yaml'},
],
'ignored_list': {'get_file': ['ignore', 'me']},
'ignored_dict': {'get_file': {'ignore': 'me'}},
'ignored_none': {'get_file': None},
},
}
}
}, tmpl_parsed)
def test_hot_template_outputs(self):
self.m.StubOutWithMock(request, 'urlopen')
tmpl_file = '/home/my/dir/template.yaml'
url = 'file://%s' % tmpl_file
foo_url = 'file:///home/my/dir/foo.yaml'
contents = b'''
heat_template_version: 2013-05-23\n\
outputs:\n\
contents:\n\
value:\n\
get_file: foo.yaml\n'''
request.urlopen(url).AndReturn(six.BytesIO(contents))
request.urlopen(foo_url).AndReturn(six.BytesIO(b'foo contents'))
self.m.ReplayAll()
files = template_utils.get_template_contents(
template_file=tmpl_file)[0]
self.assertEqual({foo_url: b'foo contents'}, files)
def test_hot_template_same_file(self):
self.m.StubOutWithMock(request, 'urlopen')
tmpl_file = '/home/my/dir/template.yaml'
url = 'file://%s' % tmpl_file
foo_url = 'file:///home/my/dir/foo.yaml'
contents = b'''
heat_template_version: 2013-05-23\n
outputs:\n\
contents:\n\
value:\n\
get_file: foo.yaml\n\
template:\n\
value:\n\
get_file: foo.yaml\n'''
request.urlopen(url).AndReturn(six.BytesIO(contents))
# asserts that is fetched only once even though it is
# referenced in the template twice
request.urlopen(foo_url).AndReturn(six.BytesIO(b'foo contents'))
self.m.ReplayAll()
files = template_utils.get_template_contents(
template_file=tmpl_file)[0]
self.assertEqual({foo_url: b'foo contents'}, files)
class TestTemplateTypeFunctions(testtools.TestCase):
hot_template = b'''heat_template_version: 2013-05-23
parameters:
param1:
type: string
resources:
resource1:
type: foo.yaml
properties:
foo: bar
resource2:
type: OS::Heat::ResourceGroup
properties:
resource_def:
type: spam/egg.yaml
'''
foo_template = b'''heat_template_version: "2013-05-23"
parameters:
foo:
type: string
'''
egg_template = b'''heat_template_version: "2013-05-23"
parameters:
egg:
type: string
'''
def setUp(self):
super(TestTemplateTypeFunctions, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.VerifyAll)
self.addCleanup(self.m.UnsetStubs)
def test_hot_template(self):
self.m.StubOutWithMock(request, 'urlopen')
tmpl_file = '/home/my/dir/template.yaml'
url = 'file:///home/my/dir/template.yaml'
request.urlopen(
'file:///home/my/dir/foo.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.foo_template))
request.urlopen(
'file:///home/my/dir/foo.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.foo_template))
request.urlopen(url).InAnyOrder().AndReturn(
six.BytesIO(self.hot_template))
request.urlopen(
'file:///home/my/dir/spam/egg.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.egg_template))
request.urlopen(
'file:///home/my/dir/spam/egg.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.egg_template))
self.m.ReplayAll()
files, tmpl_parsed = template_utils.get_template_contents(
template_file=tmpl_file)
self.assertEqual(yaml.safe_load(self.foo_template.decode('utf-8')),
json.loads(files.get('file:///home/my/dir/foo.yaml')))
self.assertEqual(
yaml.safe_load(self.egg_template.decode('utf-8')),
json.loads(files.get('file:///home/my/dir/spam/egg.yaml')))
self.assertEqual({
u'heat_template_version': u'2013-05-23',
u'parameters': {
u'param1': {
u'type': u'string'
}
},
u'resources': {
u'resource1': {
u'type': u'file:///home/my/dir/foo.yaml',
u'properties': {u'foo': u'bar'}
},
u'resource2': {
u'type': u'OS::Heat::ResourceGroup',
u'properties': {
u'resource_def': {
u'type': u'file:///home/my/dir/spam/egg.yaml'
}
}
}
}
}, tmpl_parsed)
class TestTemplateInFileFunctions(testtools.TestCase):
hot_template = b'''heat_template_version: 2013-05-23
resources:
resource1:
type: OS::Heat::Stack
properties:
template: {get_file: foo.yaml}
'''
foo_template = b'''heat_template_version: "2013-05-23"
resources:
foo:
type: OS::Type1
properties:
config: {get_file: bar.yaml}
'''
bar_template = b'''heat_template_version: "2013-05-23"
parameters:
bar:
type: string
'''
def setUp(self):
super(TestTemplateInFileFunctions, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.VerifyAll)
self.addCleanup(self.m.UnsetStubs)
def test_hot_template(self):
self.m.StubOutWithMock(request, 'urlopen')
tmpl_file = '/home/my/dir/template.yaml'
url = 'file:///home/my/dir/template.yaml'
foo_url = 'file:///home/my/dir/foo.yaml'
bar_url = 'file:///home/my/dir/bar.yaml'
request.urlopen(url).InAnyOrder().AndReturn(
six.BytesIO(self.hot_template))
request.urlopen(foo_url).InAnyOrder().AndReturn(
six.BytesIO(self.foo_template))
request.urlopen(foo_url).InAnyOrder().AndReturn(
six.BytesIO(self.foo_template))
request.urlopen(bar_url).InAnyOrder().AndReturn(
six.BytesIO(self.bar_template))
request.urlopen(bar_url).InAnyOrder().AndReturn(
six.BytesIO(self.bar_template))
self.m.ReplayAll()
files, tmpl_parsed = template_utils.get_template_contents(
template_file=tmpl_file)
self.assertEqual(yaml.safe_load(self.bar_template.decode('utf-8')),
json.loads(files.get('file:///home/my/dir/bar.yaml')))
self.assertEqual({
u'heat_template_version': u'2013-05-23',
u'resources': {
u'foo': {
u'type': u'OS::Type1',
u'properties': {
u'config': {
u'get_file': u'file:///home/my/dir/bar.yaml'
}
}
}
}
}, json.loads(files.get('file:///home/my/dir/foo.yaml')))
self.assertEqual({
u'heat_template_version': u'2013-05-23',
u'resources': {
u'resource1': {
u'type': u'OS::Heat::Stack',
u'properties': {
u'template': {
u'get_file': u'file:///home/my/dir/foo.yaml'
}
}
}
}
}, tmpl_parsed)
class TestNestedIncludes(testtools.TestCase):
hot_template = b'''heat_template_version: 2013-05-23
parameters:
param1:
type: string
resources:
resource1:
type: foo.yaml
properties:
foo: bar
resource2:
type: OS::Heat::ResourceGroup
properties:
resource_def:
type: spam/egg.yaml
with: {get_file: spam/ham.yaml}
'''
egg_template = b'''heat_template_version: 2013-05-23
parameters:
param1:
type: string
resources:
resource1:
type: one.yaml
properties:
foo: bar
resource2:
type: OS::Heat::ResourceGroup
properties:
resource_def:
type: two.yaml
with: {get_file: three.yaml}
'''
foo_template = b'''heat_template_version: "2013-05-23"
parameters:
foo:
type: string
'''
def setUp(self):
super(TestNestedIncludes, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.VerifyAll)
self.addCleanup(self.m.UnsetStubs)
def test_env_nested_includes(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file = '/home/my/dir/env.yaml'
env_url = 'file:///home/my/dir/env.yaml'
env = b'''
resource_registry:
"OS::Thingy": template.yaml
'''
template_url = u'file:///home/my/dir/template.yaml'
foo_url = u'file:///home/my/dir/foo.yaml'
egg_url = u'file:///home/my/dir/spam/egg.yaml'
ham_url = u'file:///home/my/dir/spam/ham.yaml'
one_url = u'file:///home/my/dir/spam/one.yaml'
two_url = u'file:///home/my/dir/spam/two.yaml'
three_url = u'file:///home/my/dir/spam/three.yaml'
request.urlopen(env_url).AndReturn(
six.BytesIO(env))
request.urlopen(template_url).AndReturn(
six.BytesIO(self.hot_template))
request.urlopen(template_url).AndReturn(
six.BytesIO(self.hot_template))
request.urlopen(foo_url).InAnyOrder().AndReturn(
six.BytesIO(self.foo_template))
request.urlopen(egg_url).InAnyOrder().AndReturn(
six.BytesIO(self.egg_template))
request.urlopen(ham_url).InAnyOrder().AndReturn(
six.BytesIO(b'ham contents'))
request.urlopen(one_url).InAnyOrder().AndReturn(
six.BytesIO(self.foo_template))
request.urlopen(two_url).InAnyOrder().AndReturn(
six.BytesIO(self.foo_template))
request.urlopen(three_url).InAnyOrder().AndReturn(
six.BytesIO(b'three contents'))
request.urlopen(foo_url).InAnyOrder().AndReturn(
six.BytesIO(self.foo_template))
request.urlopen(egg_url).InAnyOrder().AndReturn(
six.BytesIO(self.egg_template))
request.urlopen(one_url).InAnyOrder().AndReturn(
six.BytesIO(self.foo_template))
request.urlopen(two_url).InAnyOrder().AndReturn(
six.BytesIO(self.foo_template))
self.m.ReplayAll()
files, env_dict = template_utils.process_environment_and_files(
env_file)
self.assertEqual(
{'resource_registry': {
'OS::Thingy': template_url}},
env_dict)
self.assertEqual({
u'heat_template_version': u'2013-05-23',
u'parameters': {u'param1': {u'type': u'string'}},
u'resources': {
u'resource1': {
u'properties': {u'foo': u'bar'},
u'type': foo_url
},
u'resource2': {
u'type': u'OS::Heat::ResourceGroup',
u'properties': {
u'resource_def': {
u'type': egg_url},
u'with': {u'get_file': ham_url}
}
}
}
}, json.loads(files.get(template_url)))
self.assertEqual(yaml.safe_load(self.foo_template.decode('utf-8')),
json.loads(files.get(foo_url)))
self.assertEqual({
u'heat_template_version': u'2013-05-23',
u'parameters': {u'param1': {u'type': u'string'}},
u'resources': {
u'resource1': {
u'properties': {u'foo': u'bar'},
u'type': one_url},
u'resource2': {
u'type': u'OS::Heat::ResourceGroup',
u'properties': {
u'resource_def': {u'type': two_url},
u'with': {u'get_file': three_url}
}
}
}
}, json.loads(files.get(egg_url)))
self.assertEqual(b'ham contents',
files.get(ham_url))
self.assertEqual(yaml.safe_load(self.foo_template.decode('utf-8')),
json.loads(files.get(one_url)))
self.assertEqual(yaml.safe_load(self.foo_template.decode('utf-8')),
json.loads(files.get(two_url)))
self.assertEqual(b'three contents',
files.get(three_url))
| 34.904485
| 79
| 0.559358
| 4,631
| 42,025
| 4.905852
| 0.066508
| 0.040143
| 0.029711
| 0.040055
| 0.822615
| 0.791628
| 0.746424
| 0.709758
| 0.677363
| 0.6484
| 0
| 0.014733
| 0.303867
| 42,025
| 1,203
| 80
| 34.9335
| 0.761853
| 0.016823
| 0
| 0.661463
| 0
| 0
| 0.271963
| 0.101148
| 0
| 0
| 0
| 0
| 0.090732
| 1
| 0.049756
| false
| 0
| 0.012683
| 0.000976
| 0.084878
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
814fa30becb014fb5eb40c9cfc6b78e8173df3ea
| 147
|
py
|
Python
|
app/demo/__init__.py
|
dalepotter/govuk-frontend-wtf-demo
|
dff1f8fe77c39372059f875fd291008c3b1e3f95
|
[
"MIT"
] | null | null | null |
app/demo/__init__.py
|
dalepotter/govuk-frontend-wtf-demo
|
dff1f8fe77c39372059f875fd291008c3b1e3f95
|
[
"MIT"
] | null | null | null |
app/demo/__init__.py
|
dalepotter/govuk-frontend-wtf-demo
|
dff1f8fe77c39372059f875fd291008c3b1e3f95
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
bp = Blueprint("demo", __name__, template_folder="../templates/demo")
from app.demo import routes # noqa: E402,F401
| 24.5
| 69
| 0.748299
| 20
| 147
| 5.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046875
| 0.129252
| 147
| 5
| 70
| 29.4
| 0.773438
| 0.102041
| 0
| 0
| 0
| 0
| 0.161538
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 5
|
818454b367ccef74f23641e6409ce74074852572
| 561
|
py
|
Python
|
python/lib/Lib/site-packages/django/views/generic/__init__.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:53:39.000Z
|
2018-12-29T09:53:42.000Z
|
python/lib/Lib/site-packages/django/views/generic/__init__.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 1
|
2021-06-30T10:10:56.000Z
|
2021-06-30T10:10:56.000Z
|
python/lib/Lib/site-packages/django/views/generic/__init__.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2017-08-04T02:42:35.000Z
|
2021-06-30T10:10:34.000Z
|
from django.views.generic.base import View, TemplateView, RedirectView
from django.views.generic.dates import (ArchiveIndexView, YearArchiveView, MonthArchiveView,
WeekArchiveView, DayArchiveView, TodayArchiveView,
DateDetailView)
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
class GenericViewError(Exception):
"""A problem in a generic view."""
pass
| 43.153846
| 92
| 0.714795
| 55
| 561
| 7.290909
| 0.581818
| 0.124688
| 0.187032
| 0.274314
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.219251
| 561
| 12
| 93
| 46.75
| 0.915525
| 0.049911
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.111111
| 0.555556
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
81991f7000cb9415bf6a6aa200d280316e217cac
| 7,955
|
py
|
Python
|
tests/test_score.py
|
jlward/cribbage
|
8c2d55464b2f75954d6c3a372cd2594ce190c6b3
|
[
"MIT"
] | null | null | null |
tests/test_score.py
|
jlward/cribbage
|
8c2d55464b2f75954d6c3a372cd2594ce190c6b3
|
[
"MIT"
] | null | null | null |
tests/test_score.py
|
jlward/cribbage
|
8c2d55464b2f75954d6c3a372cd2594ce190c6b3
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from card import Card
from score import ScoreHand, ScorePegging
class ScoreHandTestCase(TestCase):
def test_flush_not_crib(self):
cards = [
Card(number=1, suit='a'),
Card(number=2, suit='a'),
Card(number=3, suit='a'),
Card(number=4, suit='a'),
]
cut_card = Card(
number=5,
suit='a',
)
score = ScoreHand(cards, cut_card)
self.assertEqual(score.check_for_flush(), 5)
cut_card.suit = 'b'
self.assertEqual(score.check_for_flush(), 4)
cards[0].suit = 'b'
cut_card.suit = 'a'
self.assertEqual(score.check_for_flush(), 0)
def test_flush_crib(self):
cards = [
Card(number=1, suit='a'),
Card(number=2, suit='a'),
Card(number=3, suit='a'),
Card(number=4, suit='a'),
]
cut_card = Card(
number=5,
suit='a',
)
score = ScoreHand(cards, cut_card, is_crib=True)
self.assertEqual(score.check_for_flush(), 5)
cut_card.suit = 'b'
self.assertEqual(score.check_for_flush(), 0)
cards[0].suit = 'b'
cut_card.suit = 'a'
self.assertEqual(score.check_for_flush(), 0)
def test_check_for_straight(self):
cards = [
Card(number=1, suit='a'),
Card(number=2, suit='a'),
Card(number=3, suit='a'),
Card(number=4, suit='a'),
]
cut_card = Card(
number=5,
suit='a',
)
score = ScoreHand(cards, cut_card)
self.assertEqual(score.check_for_straight(), 5)
cards[2].number = 9
self.assertEqual(score.check_for_straight(), 0)
cards[2].number = 3
cards[3].number = 5
self.assertEqual(score.check_for_straight(), 3)
cards[3].number = 3
self.assertEqual(score.check_for_straight(), 6)
def test_check_for_pairs(self):
cards = [
Card(number=1, suit='a'),
Card(number=2, suit='a'),
Card(number=3, suit='a'),
Card(number=4, suit='a'),
]
cut_card = Card(
number=5,
suit='a',
)
score = ScoreHand(cards, cut_card)
self.assertEqual(score.check_for_pairs(), 0)
cut_card.number = 1
self.assertEqual(score.check_for_pairs(), 2)
cards[1].number = 1
self.assertEqual(score.check_for_pairs(), 6)
cards[3].number = 3
self.assertEqual(score.check_for_pairs(), 8)
def test_check_for_15s(self):
cards = [
Card(number=1, suit='a'),
Card(number=1, suit='a'),
Card(number=3, suit='a'),
Card(number=4, suit='a'),
]
cut_card = Card(
number=5,
suit='a',
)
score = ScoreHand(cards, cut_card)
self.assertEqual(score.check_for_15s(), 0)
cards[1].number = 2
self.assertEqual(score.check_for_15s(), 2)
for card in cards:
card.number = 5
cut_card.number = 12
self.assertEqual(score.check_for_15s(), 16)
def test_check_for_nobs(self):
cards = [
Card(number=1, suit='a'),
Card(number=1, suit='a'),
Card(number=3, suit='a'),
Card(number=4, suit='a'),
]
cut_card = Card(
number=5,
suit='a',
)
score = ScoreHand(cards, cut_card)
self.assertEqual(score.check_for_nobs(), 0)
cards[0].number = 11
self.assertEqual(score.check_for_nobs(), 1)
cut_card.suit = 'b'
self.assertEqual(score.check_for_nobs(), 0)
def test_score_hand(self):
cards = [
Card(number=1, suit='a'),
Card(number=1, suit='a'),
Card(number=3, suit='a'),
Card(number=4, suit='a'),
]
cut_card = Card(
number=5,
suit='a',
)
score = ScoreHand(cards, cut_card)
self.assertEqual(score.score_hand(), 10)
class ScorePeggingTestCase(TestCase):
def assert_round(self, cards):
stack = []
for number, expected_points in cards:
stack.append(Card(number=number, suit='a'))
score = ScorePegging(stack)
self.assertEqual(score.score(), expected_points)
def test_magic_numbers_15(self):
cards = [
Card(number=10, suit='a'),
Card(number=5, suit='a'),
]
score = ScorePegging(cards)
self.assertEqual(score.check_for_magic_numbers(), True)
cards[0].number = 12
self.assertEqual(score.check_for_magic_numbers(), True)
cards[0].number = 4
self.assertEqual(score.check_for_magic_numbers(), False)
def test_magic_numbers_31(self):
cards = [
Card(number=10, suit='a'),
Card(number=11, suit='a'),
Card(number=12, suit='a'),
Card(number=1, suit='a'),
]
score = ScorePegging(cards)
self.assertEqual(score.check_for_magic_numbers(), True)
cards[0].number = 8
self.assertEqual(score.check_for_magic_numbers(), False)
def test_check_for_pair_points(self):
cards = [
Card(number=10, suit='a'),
Card(number=11, suit='a'),
Card(number=12, suit='a'),
Card(number=1, suit='a'),
]
score = ScorePegging(cards)
self.assertEqual(score.check_for_pair_points(), 0)
cards[3].number = 12
self.assertEqual(score.check_for_pair_points(), 2)
cards[1].number = 12
self.assertEqual(score.check_for_pair_points(), 6)
cards[0].number = 12
self.assertEqual(score.check_for_pair_points(), 12)
def test_check_for_straight_random_outside(self):
cards = [
Card(number=10, suit='a'),
Card(number=12, suit='a'),
]
score = ScorePegging(cards)
self.assertEqual(score.check_for_straight_points(), 0)
cards.append(
Card(number=11, suit='a'),
)
self.assertEqual(score.check_for_straight_points(), 3)
cards.append(
Card(number=9, suit='a'),
)
self.assertEqual(score.check_for_straight_points(), 4)
cards[1].number = 2
self.assertEqual(score.check_for_straight_points(), 0)
def test_check_for_straight_inside_with_other_straights(self):
cards = [
Card(number=1, suit='a'),
Card(number=2, suit='a'),
Card(number=3, suit='a'),
Card(number=5, suit='a'),
Card(number=6, suit='a'),
Card(number=7, suit='a'),
]
score = ScorePegging(cards)
self.assertEqual(score.check_for_straight_points(), 3)
cards.append(
Card(number=4, suit='a'),
)
self.assertEqual(score.check_for_straight_points(), 7)
def test_score_magic_numbers(self):
cards = [
(10, 0),
(5, 2),
(10, 0),
(6, 2),
]
self.assert_round(cards)
def test_score_pairs(self):
cards = [
(10, 0),
(10, 2),
(10, 6),
]
self.assert_round(cards)
def test_score_striaghts(self):
cards = [
(1, 0),
(2, 0),
(3, 3),
(5, 0),
(6, 0),
(4, 6),
]
self.assert_round(cards)
def test_score_fifteen_with_pair(self):
cards = [
(5, 0),
(5, 2),
(5, 8),
]
self.assert_round(cards)
def test_score_fifteen_with_straight(self):
cards = [
(4, 0),
(5, 0),
(6, 5),
]
self.assert_round(cards)
| 29.246324
| 66
| 0.520553
| 951
| 7,955
| 4.173502
| 0.071504
| 0.151172
| 0.186445
| 0.220459
| 0.791887
| 0.782817
| 0.737969
| 0.713278
| 0.680524
| 0.527589
| 0
| 0.037476
| 0.342552
| 7,955
| 271
| 67
| 29.354244
| 0.721415
| 0
| 0
| 0.576132
| 0
| 0
| 0.008045
| 0
| 0
| 0
| 0
| 0
| 0.176955
| 1
| 0.074074
| false
| 0
| 0.012346
| 0
| 0.09465
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
819c16923cd95d9f72d431bb1610dfd07940c16a
| 203
|
py
|
Python
|
views/__init__.py
|
oogou11/shop_api
|
a35f08d43e52f28f306409558725ce0c6fc5abe6
|
[
"MIT"
] | null | null | null |
views/__init__.py
|
oogou11/shop_api
|
a35f08d43e52f28f306409558725ce0c6fc5abe6
|
[
"MIT"
] | null | null | null |
views/__init__.py
|
oogou11/shop_api
|
a35f08d43e52f28f306409558725ce0c6fc5abe6
|
[
"MIT"
] | null | null | null |
from views.law import web_law
from views.shop import api_shop
from views.users import api_user
from views.search import api_search
from views.wechart import api_wechart
from views.image import api_image
| 29
| 37
| 0.852217
| 36
| 203
| 4.638889
| 0.333333
| 0.323353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118227
| 203
| 6
| 38
| 33.833333
| 0.932961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
81ce62ff1124bf762488ce78ff1fb889e4a5e0f1
| 41
|
py
|
Python
|
python/testData/resolve/multiFile/fromImportStar/FromImportStar.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/resolve/multiFile/fromImportStar/FromImportStar.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/resolve/multiFile/fromImportStar/FromImportStar.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from ImportedFile import *
func()
#<ref>
| 10.25
| 26
| 0.707317
| 5
| 41
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 41
| 4
| 27
| 10.25
| 0.828571
| 0.121951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
81cec6bde9c24fab553e9367280b43bbdc8bfc92
| 124
|
py
|
Python
|
examples/simple_example/sample_app/admin.py
|
lizhiying/django-thumbs
|
6d04048cda034819d65df29fceac1f66cad65581
|
[
"MIT"
] | 20
|
2016-01-20T09:49:16.000Z
|
2022-03-18T12:46:06.000Z
|
examples/simple_example/sample_app/admin.py
|
lizhiying/django-thumbs
|
6d04048cda034819d65df29fceac1f66cad65581
|
[
"MIT"
] | null | null | null |
examples/simple_example/sample_app/admin.py
|
lizhiying/django-thumbs
|
6d04048cda034819d65df29fceac1f66cad65581
|
[
"MIT"
] | 10
|
2015-11-20T17:15:06.000Z
|
2022-01-17T03:24:02.000Z
|
# -*- encoding: utf-8 -*-
from sample_app.models import Photo
from django.contrib import admin
admin.site.register(Photo)
| 17.714286
| 35
| 0.75
| 18
| 124
| 5.111111
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009259
| 0.129032
| 124
| 6
| 36
| 20.666667
| 0.842593
| 0.185484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
81d71d217bab5069df9691f2d4f565f89e886d00
| 36,612
|
py
|
Python
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/common/profiles/profile_common.py
|
lmnotran/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 82
|
2016-06-29T17:24:43.000Z
|
2021-04-16T06:49:17.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/common/profiles/profile_common.py
|
lmnotran/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 6
|
2022-01-12T18:22:08.000Z
|
2022-03-25T10:19:27.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/common/profiles/profile_common.py
|
lmnotran/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 56
|
2016-08-02T10:50:50.000Z
|
2021-07-19T08:57:34.000Z
|
from pycalcmodel.core.output import ModelOutput, ModelOutputType
from pyradioconfig.calculator_model_framework.interfaces.iprofile import IProfile
from py_2_and_3_compatibility import *
"""
Generic template to build a Model Output based on a hardware register
"""
def _buildModelOutputStringFromRegisterField(hw_string, category):
return "ModelOutput(model.vars.{0}, '{1}', ModelOutputType.SVD_REG_FIELD, readable_name = '{0}')".format(hw_string, category)
"""
Builds the inputs and outputs of the CRC block
"""
def buildCrcInputs(model, profile):
IProfile.make_optional_input(profile, model.vars.crc_poly, 'crc', readable_name="CRC Polynomial", default=model.vars.crc_poly.var_enum.CRC_16 )
IProfile.make_optional_input(profile, model.vars.crc_seed, 'crc', readable_name="CRC Seed", default=long(0), value_limit_min=long(0), value_limit_max=long(0xFFFFFFFF))
IProfile.make_optional_input(profile, model.vars.crc_byte_endian, 'crc', readable_name="CRC Byte Endian", default=model.vars.crc_byte_endian.var_enum.MSB_FIRST)
IProfile.make_optional_input(profile, model.vars.crc_bit_endian, 'crc', readable_name="CRC Output Bit Endian", default=model.vars.crc_bit_endian.var_enum.MSB_FIRST)
IProfile.make_optional_input(profile, model.vars.crc_pad_input, 'crc', readable_name="CRC Input Padding", default=False)
IProfile.make_optional_input(profile, model.vars.crc_input_order, 'crc', readable_name="CRC Input Bit Endian", default=model.vars.crc_input_order.var_enum.LSB_FIRST)
IProfile.make_optional_input(profile, model.vars.crc_invert, 'crc', readable_name="CRC Invert", default=False)
def buildCrcOutputs(model, profile, family):
# These are named differently in 90nm parts vs Panther
if family in ["dumbo", "jumbo", "nerio", "nixi"]:
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('CRC_CTRL_PADCRCINPUT', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('CRC_CTRL_BITSPERWORD', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('CRC_CTRL_BITREVERSE', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('CRC_CTRL_BYTEREVERSE', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('CRC_CTRL_INPUTBITORDER', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('CRC_CTRL_CRCWIDTH', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('CRC_CTRL_OUTPUTINV', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('CRC_INIT_INIT', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('CRC_POLY_POLY', 'crc')))
else:
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('RFCRC_CTRL_PADCRCINPUT', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('RFCRC_CTRL_BITSPERWORD', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('RFCRC_CTRL_BITREVERSE', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('RFCRC_CTRL_BYTEREVERSE', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('RFCRC_CTRL_INPUTBITORDER', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('RFCRC_CTRL_CRCWIDTH', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('RFCRC_CTRL_OUTPUTINV', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('RFCRC_INIT_INIT', 'crc')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('RFCRC_POLY_POLY', 'crc')))
"""
Builds the inputs and outputs of the Whitening block
"""
def buildWhiteInputs(model, profile):
IProfile.make_optional_input(profile, model.vars.white_poly, 'whitening', default=model.vars.white_poly.var_enum.NONE, readable_name="Whitening Polynomial")
IProfile.make_optional_input(profile, model.vars.white_seed, 'whitening', default=0x0000FFFF, readable_name="Whitening Seed", value_limit_min=0, value_limit_max=0xFFFF)
IProfile.make_optional_input(profile, model.vars.white_output_bit, 'whitening', default=0, readable_name="Whitening Output Bit", value_limit_min=0, value_limit_max=0x0F)
def buildWhiteOutputs(model, profile):
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_WHITECTRL_SHROUTPUTSEL', 'whitening')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_WHITECTRL_XORFEEDBACK', 'whitening')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_WHITECTRL_FEEDBACKSEL', 'whitening')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_WHITEPOLY_POLY', 'whitening')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_WHITEINIT_WHITEINIT', 'whitening')))
"""
Builds the inputs and outputs of the FEC block
"""
def buildFecInputs(model, profile):
IProfile.make_optional_input(profile, model.vars.fec_en, 'Channel_Coding', default=model.vars.fec_en.var_enum.NONE, readable_name="FEC Algorithm")
def buildFecOutputs(model, profile):
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FECCTRL_CONVMODE', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FECCTRL_CONVDECODEMODE', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FECCTRL_CONVTRACEBACKDISABLE', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FECCTRL_CONVINV', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FECCTRL_INTERLEAVEMODE', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FECCTRL_INTERLEAVEFIRSTINDEX', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FECCTRL_INTERLEAVEWIDTH', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FECCTRL_CONVBUSLOCK', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FECCTRL_CONVSUBFRAMETERMINATE', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FECCTRL_SINGLEBLOCK', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FECCTRL_FORCE2FSK', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FECCTRL_CONVHARDERROR', 'fec')))
# NOTE: We are purposely removing FRC_CONVRAMADDR_CONVRAMADDR from the profile outputs because it needs to be
# written by the SW to a buffer allocated in a specific RAM region, no point simply writing it to 0x00000000
# profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_CONVRAMADDR_CONVRAMADDR', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_TRAILTXDATACTRL_TRAILTXDATA', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_TRAILTXDATACTRL_TRAILTXDATACNT', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_TRAILTXDATACTRL_TRAILTXDATAFORCE', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_CONVGENERATOR_GENERATOR0', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_CONVGENERATOR_GENERATOR1', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_CONVGENERATOR_RECURSIVE', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_CONVGENERATOR_NONSYSTEMATIC', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_PUNCTCTRL_PUNCT0', 'fec')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_PUNCTCTRL_PUNCT1', 'fec')))
"""
Builds the inputs and outputs of the general frame settings
"""
def buildFrameInputs(model, profile, family):
MIN_FRAME_LENGTH = 1
MAX_FRAME_LENGTH = 0x7fffffff
MIN_HEADER_LENGTH = 1
MAX_HEADER_LENGTH = 254
MAX_FRAME_TYPE_LENGTH = 0xffff
#Inputs
IProfile.make_optional_input(profile, model.vars.frame_bitendian, 'frame_general', default=model.vars.frame_bitendian.var_enum.LSB_FIRST, readable_name="Frame Bit Endian")
IProfile.make_optional_input(profile, model.vars.frame_length_type, 'frame_general', default=model.vars.frame_length_type.var_enum.FIXED_LENGTH, readable_name="Frame Length Algorithm")
IProfile.make_optional_input(profile, model.vars.header_en, 'frame_general', default=False, readable_name="Header Enable")
IProfile.make_optional_input(profile, model.vars.frame_coding, 'frame_general', default=model.vars.frame_coding.var_enum.NONE, readable_name="Frame Coding Method")
# IProfile.make_required_input(profile, model.vars.accept_crc_errors, 'frame', default=False)
# -- Payload --
IProfile.make_optional_input(profile, model.vars.payload_white_en, 'frame_payload', default=False, readable_name="Payload Whitening Enable")
IProfile.make_optional_input(profile, model.vars.payload_crc_en, 'frame_payload', default=True, readable_name="Insert/Check CRC after payload")
if family != 'dumbo':
IProfile.make_hidden_input(profile, model.vars.payload_addtrailtxdata_en, 'frame_payload', readable_name="Add Trail TX Data to subframe")
if family == 'nerio' or family == 'nixi': # TODO Is this correct? Do we include this in Nixi?
IProfile.make_hidden_input(profile, model.vars.payload_excludesubframewcnt_en, 'frame_payload', readable_name="Exclude words in subframe from Word Couunter")
# -- Header --
IProfile.make_optional_input(profile, model.vars.header_size, 'frame_header', default=1, readable_name="Header Size", value_limit_min=MIN_HEADER_LENGTH, value_limit_max=MAX_HEADER_LENGTH)
IProfile.make_optional_input(profile, model.vars.header_calc_crc, 'frame_header', default=False, readable_name="CRC Header")
# IProfile.make_required_input(profile, model.vars.header_include_crc, 'frame_header', readable_name="Insert/Check CRC after header", default=False, default_visibility=ModelInputDefaultVisibilityType.HIDDEN))
IProfile.make_optional_input(profile, model.vars.header_white_en, 'frame_header', default=False, readable_name="Whiten Header")
if family != 'dumbo':
IProfile.make_hidden_input(profile, model.vars.header_addtrailtxdata_en, 'frame_header', readable_name="Add Trail TX Data to subframe")
if family not in ["dumbo", "jumbo"]:
# TODO Is this correct? Do we include this in Nixi?
IProfile.make_hidden_input(profile, model.vars.header_excludesubframewcnt_en, 'frame_payload', readable_name="Exclude words in subframe from Word Couunter")
# -- Fixed Length --
# RAIL doesn't support packets longer than 255 (appended info inclusive) so restrict this.
# I do realize that header+frame_fixed_length can possibly exceed this.
IProfile.make_optional_input(profile, model.vars.fixed_length_size, 'frame_fixed_length', default=1, readable_name="Fixed Payload Size", value_limit_min=MIN_FRAME_LENGTH, value_limit_max=MAX_FRAME_LENGTH)
# -- Variable Length --
IProfile.make_optional_input(profile, model.vars.var_length_numbits, 'frame_var_length', default=8, readable_name="Variable Length Bit Size", value_limit_min=1, value_limit_max=12)
IProfile.make_optional_input(profile, model.vars.var_length_bitendian, 'frame_var_length', default=model.vars.var_length_bitendian.var_enum.LSB_FIRST, readable_name="Variable Length Bit Endian")
IProfile.make_optional_input(profile, model.vars.var_length_byteendian, 'frame_var_length', default=model.vars.var_length_byteendian.var_enum.LSB_FIRST, readable_name="Variable Length Byte Endian")
IProfile.make_optional_input(profile, model.vars.var_length_shift, 'frame_var_length', default=0, readable_name="Variable Length Bit Location", value_limit_min=0, value_limit_max=7)
IProfile.make_optional_input(profile, model.vars.var_length_minlength, 'frame_var_length', default=0, readable_name="Minimum Length", value_limit_min=0, value_limit_max=4095)
IProfile.make_optional_input(profile, model.vars.var_length_maxlength, 'frame_var_length', default=255, readable_name="Maximum Length", value_limit_min=0, value_limit_max=4095)
IProfile.make_optional_input(profile, model.vars.var_length_includecrc, 'frame_var_length', default=False, readable_name="Length Includes CRC Bytes")
IProfile.make_optional_input(profile, model.vars.var_length_adjust, 'frame_var_length', default=0, readable_name="Variable Frame Length Adjust", value_limit_min=-4096, value_limit_max=4095)
# -- Frame Type --
IProfile.make_optional_input(profile, model.vars.frame_type_loc, 'frame_type_length', default=0, readable_name="Frame Type Location", value_limit_min=0, value_limit_max=255)
IProfile.make_optional_input(profile, model.vars.frame_type_bits, 'frame_type_length', default=3, readable_name="Number of Frame Type Bits", value_limit_min=1, value_limit_max=3)
IProfile.make_optional_input(profile, model.vars.frame_type_lsbit, 'frame_type_length', default=0, readable_name="Frame Type Bit 0 Location", value_limit_min=0, value_limit_max=0x7)
IProfile.make_optional_input(profile, model.vars.frame_type_0_length, 'frame_type_length', default=16, readable_name="Frame Type 0 Length", value_limit_min=0, value_limit_max=MAX_FRAME_TYPE_LENGTH)
IProfile.make_optional_input(profile, model.vars.frame_type_1_length, 'frame_type_length', default=16, readable_name="Frame Type 1 Length", value_limit_min=0, value_limit_max=MAX_FRAME_TYPE_LENGTH)
IProfile.make_optional_input(profile, model.vars.frame_type_2_length, 'frame_type_length', default=16, readable_name="Frame Type 2 Length", value_limit_min=0, value_limit_max=MAX_FRAME_TYPE_LENGTH)
IProfile.make_optional_input(profile, model.vars.frame_type_3_length, 'frame_type_length', default=16, readable_name="Frame Type 3 Length", value_limit_min=0, value_limit_max=MAX_FRAME_TYPE_LENGTH)
IProfile.make_optional_input(profile, model.vars.frame_type_4_length, 'frame_type_length', default=16, readable_name="Frame Type 4 Length", value_limit_min=0, value_limit_max=MAX_FRAME_TYPE_LENGTH)
IProfile.make_optional_input(profile, model.vars.frame_type_5_length, 'frame_type_length', default=16, readable_name="Frame Type 5 Length", value_limit_min=0, value_limit_max=MAX_FRAME_TYPE_LENGTH)
IProfile.make_optional_input(profile, model.vars.frame_type_6_length, 'frame_type_length', default=16, readable_name="Frame Type 6 Length", value_limit_min=0, value_limit_max=MAX_FRAME_TYPE_LENGTH)
IProfile.make_optional_input(profile, model.vars.frame_type_7_length, 'frame_type_length', default=16, readable_name="Frame Type 7 Length", value_limit_min=0, value_limit_max=MAX_FRAME_TYPE_LENGTH)
IProfile.make_optional_input(profile, model.vars.frame_type_0_valid, 'frame_type_length', default=True, readable_name="Accept Frame Type 0")
IProfile.make_optional_input(profile, model.vars.frame_type_1_valid, 'frame_type_length', default=True, readable_name="Accept Frame Type 1")
IProfile.make_optional_input(profile, model.vars.frame_type_2_valid, 'frame_type_length', default=True, readable_name="Accept Frame Type 2")
IProfile.make_optional_input(profile, model.vars.frame_type_3_valid, 'frame_type_length', default=True, readable_name="Accept Frame Type 3")
IProfile.make_optional_input(profile, model.vars.frame_type_4_valid, 'frame_type_length', default=True, readable_name="Accept Frame Type 4")
IProfile.make_optional_input(profile, model.vars.frame_type_5_valid, 'frame_type_length', default=True, readable_name="Accept Frame Type 5")
IProfile.make_optional_input(profile, model.vars.frame_type_6_valid, 'frame_type_length', default=True, readable_name="Accept Frame Type 6")
IProfile.make_optional_input(profile, model.vars.frame_type_7_valid, 'frame_type_length', default=True, readable_name="Accept Frame Type 7")
IProfile.make_optional_input(profile, model.vars.frame_type_0_filter, 'frame_type_length', default=False, readable_name="Apply Address Filter for Frame Type 0")
IProfile.make_optional_input(profile, model.vars.frame_type_1_filter, 'frame_type_length', default=False, readable_name="Apply Address Filter for Frame Type 1")
IProfile.make_optional_input(profile, model.vars.frame_type_2_filter, 'frame_type_length', default=False, readable_name="Apply Address Filter for Frame Type 2")
IProfile.make_optional_input(profile, model.vars.frame_type_3_filter, 'frame_type_length', default=False, readable_name="Apply Address Filter for Frame Type 3")
IProfile.make_optional_input(profile, model.vars.frame_type_4_filter, 'frame_type_length', default=False, readable_name="Apply Address Filter for Frame Type 4")
IProfile.make_optional_input(profile, model.vars.frame_type_5_filter, 'frame_type_length', default=False, readable_name="Apply Address Filter for Frame Type 5")
IProfile.make_optional_input(profile, model.vars.frame_type_6_filter, 'frame_type_length', default=False, readable_name="Apply Address Filter for Frame Type 6")
IProfile.make_optional_input(profile, model.vars.frame_type_7_filter, 'frame_type_length', default=False, readable_name="Apply Address Filter for Frame Type 7")
def buildFrameOutputs(model, profile, family):
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FECCTRL_BLOCKWHITEMODE', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_CTRL_BITSPERWORD', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_CTRL_RXFCDMODE', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_CTRL_TXFCDMODE', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_CTRL_BITORDER', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_CTRL_UARTMODE', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_WCNTCMP0_FRAMELENGTH', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_WCNTCMP1_LENGTHFIELDLOC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_DFLCTRL_DFLINCLUDECRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_DFLCTRL_MINLENGTH', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_DFLCTRL_DFLBITS', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_DFLCTRL_DFLOFFSET', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_DFLCTRL_DFLSHIFT', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_DFLCTRL_DFLBITORDER', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_DFLCTRL_DFLMODE', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_MAXLENGTH_MAXLENGTH', 'frame')))
#These are named differently in 90nm parts vs Panther
if family in ["dumbo", "jumbo", "nerio", "nixi"]:
if family != 'dumbo':
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD0_ADDTRAILTXDATA', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD0_SKIPWHITE', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD0_SKIPCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD0_CALCCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD0_INCLUDECRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD0_BUFFER', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD0_WORDS', 'frame')))
if family != 'dumbo':
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD1_ADDTRAILTXDATA', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD1_SKIPWHITE', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD1_SKIPCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD1_CALCCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD1_INCLUDECRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD1_BUFFER', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD1_WORDS', 'frame')))
if family != 'dumbo':
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD2_ADDTRAILTXDATA', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD2_SKIPWHITE', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD2_SKIPCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD2_CALCCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD2_INCLUDECRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD2_BUFFER', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD2_WORDS', 'frame')))
if family != 'dumbo':
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD3_ADDTRAILTXDATA', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD3_SKIPWHITE', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD3_SKIPCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD3_CALCCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD3_INCLUDECRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD3_BUFFER', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD3_WORDS', 'frame')))
if family in ["nerio"]:
profile.outputs.append(ModelOutput(model.vars.FRC_FCD0_EXCLUDESUBFRAMEWCNT, '', ModelOutputType.SVD_REG_FIELD, readable_name='FRC.FCD0.EXCLUDESUBFRAMEWCNT'))
profile.outputs.append(ModelOutput(model.vars.FRC_FCD1_EXCLUDESUBFRAMEWCNT, '', ModelOutputType.SVD_REG_FIELD, readable_name='FRC.FCD1.EXCLUDESUBFRAMEWCNT'))
profile.outputs.append(ModelOutput(model.vars.FRC_FCD2_EXCLUDESUBFRAMEWCNT, '', ModelOutputType.SVD_REG_FIELD, readable_name='FRC.FCD2.EXCLUDESUBFRAMEWCNT'))
profile.outputs.append(ModelOutput(model.vars.FRC_FCD3_EXCLUDESUBFRAMEWCNT, '', ModelOutputType.SVD_REG_FIELD, readable_name='FRC.FCD3.EXCLUDESUBFRAMEWCNT'))
profile.outputs.append(ModelOutput(model.vars.FRC_CTRL_RATESELECT, '', ModelOutputType.SVD_REG_FIELD, readable_name='FRC.CTRL.RATESELECT'))
# These are named differently in 90nm parts vs Panther
if family not in ["dumbo", "jumbo", "nerio", "nixi"]:
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD0_ADDTRAILTXDATA', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD0_SKIPWHITE', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD0_SKIPCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD0_CALCCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD0_INCLUDECRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD0_BUFFER', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD0_WORDS', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD1_ADDTRAILTXDATA', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD1_SKIPWHITE', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD1_SKIPCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD1_CALCCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD1_INCLUDECRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD1_BUFFER', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD1_WORDS', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD2_ADDTRAILTXDATA', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD2_SKIPWHITE', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD2_SKIPCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD2_CALCCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD2_INCLUDECRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD2_BUFFER', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD2_WORDS', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD3_ADDTRAILTXDATA', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD3_SKIPWHITE', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD3_SKIPCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD3_CALCCRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD3_INCLUDECRC', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD3_BUFFER', 'frame')))
profile.outputs.append(eval(_buildModelOutputStringFromRegisterField('FRC_FCD3_WORDS', 'frame')))
profile.outputs.append(ModelOutput(model.vars.FRC_FCD0_EXCLUDESUBFRAMEWCNT, '', ModelOutputType.SVD_REG_FIELD, readable_name='FRC.FCD0.EXCLUDESUBFRAMEWCNT'))
profile.outputs.append(ModelOutput(model.vars.FRC_FCD1_EXCLUDESUBFRAMEWCNT, '', ModelOutputType.SVD_REG_FIELD, readable_name='FRC.FCD1.EXCLUDESUBFRAMEWCNT'))
profile.outputs.append(ModelOutput(model.vars.FRC_FCD2_EXCLUDESUBFRAMEWCNT, '', ModelOutputType.SVD_REG_FIELD, readable_name='FRC.FCD2.EXCLUDESUBFRAMEWCNT'))
profile.outputs.append(ModelOutput(model.vars.FRC_FCD3_EXCLUDESUBFRAMEWCNT, '', ModelOutputType.SVD_REG_FIELD, readable_name='FRC.FCD3.EXCLUDESUBFRAMEWCNT'))
profile.outputs.append(ModelOutput(model.vars.FRC_CTRL_RATESELECT, '', ModelOutputType.SVD_REG_FIELD, readable_name='FRC.CTRL.RATESELECT'))
# Output software variables
profile.outputs.append(ModelOutput(model.vars.frame_coding_array_packed, '', ModelOutputType.SW_VAR, readable_name='Packed Frame Coding Array'))
def buildLongRangeOutputs(model, profile):
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE_LRCORRTHD, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE.LRCORRTHD'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE_LRTIMCORRTHD, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE.LRTIMCORRTHD'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE_LRCORRSCHWIN, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE.LRTIMCORRTHD'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE_LRBLE, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE.LRCORRSCHWIN'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LRFRC_CI500, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE.LRBLE'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LRFRC_FRCACKTIMETHD, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LRFRC.CI500'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE_LRDEC, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LRFRC.FRCACKTIMETHD'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE_LRBLEDSA, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE.LRDEC'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE1_LRSS, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE.LRBLEDSA'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE1_LRTIMEOUTTHD, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE1.LRSS'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE6_LRCHPWRSPIKETH, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE1.LRTIMEOUTTHD'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE6_LRSPIKETHD, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE6.LRCHPWRSPIKETH'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE1_LRSPIKETHADD, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE6.LRSPIKETHD'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE1_CHPWRACCUDEL, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE1.LRSPIKETHADD'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE1_HYSVAL, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE1.CHPWRACCUDEL'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE1_AVGWIN, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE1.HYSVAL'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE2_LRCHPWRTH1, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE1.AVGWIN'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE2_LRCHPWRTH2, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE2.LRCHPWRTH1'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE2_LRCHPWRTH3, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE2.LRCHPWRTH2'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE2_LRCHPWRTH4, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE2.LRCHPWRTH3'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE3_LRCHPWRTH5, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE2.LRCHPWRTH4'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE3_LRCHPWRTH6, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE3.LRCHPWRTH5'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE3_LRCHPWRTH7, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE3.LRCHPWRTH6'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE3_LRCHPWRTH8, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE3.LRCHPWRTH7'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE4_LRCHPWRTH9, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE3.LRCHPWRTH8'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE4_LRCHPWRTH10, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE4.LRCHPWRTH9'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE4_LRCHPWRSH1, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE4.LRCHPWRTH10'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE4_LRCHPWRSH2, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE4.LRCHPWRSH1'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE4_LRCHPWRSH3, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE4.LRCHPWRSH2'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE4_LRCHPWRSH4, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE4.LRCHPWRSH3'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE5_LRCHPWRSH5, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE4.LRCHPWRSH4'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE5_LRCHPWRSH6, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE5.LRCHPWRSH5'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE5_LRCHPWRSH7, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE5.LRCHPWRSH6'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE5_LRCHPWRSH8, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE5.LRCHPWRSH7'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE5_LRCHPWRSH9, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE5.LRCHPWRSH8'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE5_LRCHPWRSH10, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE5.LRCHPWRSH9'))
profile.outputs.append(ModelOutput(model.vars.MODEM_LONGRANGE5_LRCHPWRSH11, '', ModelOutputType.SVD_REG_FIELD, readable_name='MODEM.LONGRANGE5.LRCHPWRSH1'))
def build_ircal_sw_vars(model, profile):
# Output Software Variables
profile.outputs.append(ModelOutput(model.vars.ircal_auxndiv, '', ModelOutputType.SW_VAR, readable_name='IRCAL auxndiv'))
profile.outputs.append(ModelOutput(model.vars.ircal_auxlodiv, '', ModelOutputType.SW_VAR, readable_name='IRCAL auxlodiv'))
profile.outputs.append(ModelOutput(model.vars.ircal_rampval, '', ModelOutputType.SW_VAR, readable_name='IRCAL rampval'))
profile.outputs.append(ModelOutput(model.vars.ircal_rxamppll, '', ModelOutputType.SW_VAR, readable_name='IRCAL rxamppll'))
profile.outputs.append(ModelOutput(model.vars.ircal_rxamppa, '', ModelOutputType.SW_VAR, readable_name='IRCAL rxamppa'))
profile.outputs.append(ModelOutput(model.vars.ircal_manufconfigvalid, '', ModelOutputType.SW_VAR, readable_name='IRCAL manufconfigvalid'))
profile.outputs.append(ModelOutput(model.vars.ircal_pllconfigvalid, '', ModelOutputType.SW_VAR, readable_name='IRCAL pllconfigvalid'))
profile.outputs.append(ModelOutput(model.vars.ircal_paconfigvalid, '', ModelOutputType.SW_VAR, readable_name='IRCAL paconfigvalid'))
profile.outputs.append(ModelOutput(model.vars.ircal_useswrssiaveraging, '', ModelOutputType.SW_VAR, readable_name='IRCAL useswrssiaveraging'))
profile.outputs.append(ModelOutput(model.vars.ircal_numrssitoavg, '', ModelOutputType.SW_VAR, readable_name='IRCAL numrssitoavg'))
profile.outputs.append(ModelOutput(model.vars.ircal_throwawaybeforerssi, '', ModelOutputType.SW_VAR, readable_name='IRCAL throwawaybeforerssi'))
profile.outputs.append(ModelOutput(model.vars.ircal_delayusbeforerssi, '', ModelOutputType.SW_VAR, readable_name='IRCAL delayusbeforerssi'))
profile.outputs.append(ModelOutput(model.vars.ircal_delayusbetweenswrssi, '', ModelOutputType.SW_VAR, readable_name='IRCAL delayusbetweenswrssi'))
profile.outputs.append(ModelOutput(model.vars.ircal_bestconfig, '', ModelOutputType.SW_VAR, readable_name='IRCAL bestconfig'))
# All but one (agcrssiperiod) of these were created for backwards compatibility with RAIL 1.x - remove in RAIL 2.x
profile.outputs.append(ModelOutput(model.vars.ircal_agcrssiperiod, '', ModelOutputType.SW_VAR, readable_name='IRCAL agcrssiperiod'))
profile.outputs.append(ModelOutput(model.vars.ircal_useswrssiaveraging2, '', ModelOutputType.SW_VAR, readable_name='IRCAL useswrssiaveraging new'))
profile.outputs.append(ModelOutput(model.vars.ircal_numrssitoavg2, '', ModelOutputType.SW_VAR, readable_name='IRCAL numrssitoavg new'))
profile.outputs.append(ModelOutput(model.vars.ircal_throwawaybeforerssi2, '', ModelOutputType.SW_VAR, readable_name='IRCAL throwawaybeforerssi new'))
profile.outputs.append(ModelOutput(model.vars.ircal_delayusbeforerssi2, '', ModelOutputType.SW_VAR, readable_name='IRCAL delayusbeforerssi new'))
profile.outputs.append(ModelOutput(model.vars.ircal_delayusbetweenswrssi2, '', ModelOutputType.SW_VAR, readable_name='IRCAL delayusbetweenswrssi new'))
| 102.268156
| 212
| 0.802852
| 4,143
| 36,612
| 6.794593
| 0.09148
| 0.092504
| 0.131439
| 0.099751
| 0.850018
| 0.83254
| 0.804867
| 0.727034
| 0.403908
| 0.375915
| 0
| 0.010689
| 0.085245
| 36,612
| 357
| 213
| 102.554622
| 0.829835
| 0.035426
| 0
| 0.267148
| 0
| 0
| 0.197284
| 0.066409
| 0
| 0
| 0.001401
| 0.002801
| 0
| 1
| 0.039711
| false
| 0
| 0.01083
| 0.00361
| 0.054152
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c4970a04930da9272f2b41e686a49664187858cf
| 8,160
|
py
|
Python
|
main.py
|
hzhang934748656/ChessAI
|
f1aff2959ee1efbf13b987cf5f95e4b4c90af62c
|
[
"MIT"
] | null | null | null |
main.py
|
hzhang934748656/ChessAI
|
f1aff2959ee1efbf13b987cf5f95e4b4c90af62c
|
[
"MIT"
] | null | null | null |
main.py
|
hzhang934748656/ChessAI
|
f1aff2959ee1efbf13b987cf5f95e4b4c90af62c
|
[
"MIT"
] | null | null | null |
import chess
from chessboard import display
from time import sleep
p = 1
PawnBottomMatrix = [[3*p,3*p,3*p,4*p,4*p,3*p,3*p,3*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[0*p,1*p,1*p,1*p,1*p,1*p,1*p,0*p],[0*p,0*p,0*p,0*p,0*p,0*p,0*p,0*p]]
PawnTopMatrix = [[0*p,0*p,0*p,0*p,0*p,0*p,0*p,0*p],[0*p,1*p,1*p,1*p,1*p,1*p,1*p,0*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[3*p,3*p,3*p,4*p,4*p,3*p,3*p,3*p]]
RockButtomMatrix = [[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p]]
RockTopMatrix = [[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p]]
BishopButtomMatrix = [[0*p,1*p,2*p,2*p,2*p,2*p,1*p,0*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[0*p,1*p,2*p,2*p,2*p,2*p,1*p,0*p]]
BishopTopMatrix = [[0*p,1*p,2*p,2*p,2*p,2*p,1*p,0*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[0*p,1*p,2*p,2*p,2*p,2*p,1*p,0*p]]
KingButtomMatrix = [[-5*p,1*p,2*p,2*p,2*p,2*p,1*p,-5*p],[-1*p,2*p,2*p,2*p,2*p,2*p,2*p,-1*p],[0*p,2*p,2*p,2*p,2*p,2*p,2*p,0*p],[0*p,2*p,2*p,2*p,2*p,2*p,2*p,0*p],[0*p,2*p,2*p,2*p,2*p,2*p,2*p,0*p],[0*p,2*p,2*p,2*p,2*p,2*p,2*p,0*p],[-1*p,2*p,2*p,2*p,2*p,2*p,2*p,-1*p],[-5*p,1*p,2*p,2*p,2*p,2*p,1*p,-5*p]]
KingTopMatrix = [[-5*p,1*p,2*p,2*p,2*p,2*p,1*p,-5*p],[-1*p,2*p,2*p,2*p,2*p,2*p,2*p,-1*p],[0*p,2*p,2*p,2*p,2*p,2*p,2*p,0*p],[0*p,2*p,2*p,2*p,2*p,2*p,2*p,0*p],[0*p,2*p,2*p,2*p,2*p,2*p,2*p,0*p],[0*p,2*p,2*p,2*p,2*p,2*p,2*p,0*p],[-1*p,2*p,2*p,2*p,2*p,2*p,2*p,-1*p],[-5*p,1*p,2*p,2*p,2*p,2*p,1*p,-5*p]]
QueenButtomMatrix = [[0*p,1*p,1*p,1*p,1*p,1*p,1*p,0*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[0*p,1*p,1*p,1*p,1*p,1*p,1*p,0*p]]
QueenTopMatrix = [[0*p,1*p,1*p,1*p,1*p,1*p,1*p,0*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1.5*p,2*p,2*p,2*p,2*p,2*p,2*p,1.5*p],[1*p,2*p,2*p,2*p,2*p,2*p,2*p,1*p],[0*p,1*p,1*p,1*p,1*p,1*p,1*p,0*p]]
KnightButtomMatrix = [[-2*p,-1*p,0*p,0*p,0*p,0*p,-1*p,-2*p],[-1*p,0*p,1*p,1*p,1*p,1*p,0*p,-1*p],[0*p,1*p,2*p,2*p,2*p,2*p,1*p,0*p],[0*p,1*p,2*p,2*p,2*p,2*p,1*p,0*p],[0*p,1*p,2*p,2*p,2*p,2*p,1*p,0*p],[0*p,1*p,2*p,2*p,2*p,2*p,1*p,0*p],[-1*p,0*p,1*p,1*p,1*p,1*p,0*p,-1*p],[-2*p,-1*p,0*p,0*p,0*p,0*p,-1*p,-2*p]]
KnightTopMatrix = [[-2*p,-1*p,0*p,0*p,0*p,0*p,-1*p,-2*p],[-1*p,0*p,1*p,1*p,1*p,1*p,0*p,-1*p],[0*p,1*p,2*p,2*p,2*p,2*p,1*p,0*p],[0*p,1*p,2*p,2*p,2*p,2*p,1*p,0*p],[0*p,1*p,2*p,2*p,2*p,2*p,1*p,0*p],[0*p,1*p,2*p,2*p,2*p,2*p,1*p,0*p],[-1*p,0*p,1*p,1*p,1*p,1*p,0*p,-1*p],[-2*p,-1*p,0*p,0*p,0*p,0*p,-1*p,-2*p]]
def getaction(depth, board, is_max):
possible_moves = board.legal_moves
best_value = -float("inf")
final_best_move = None
for x in possible_moves:
move = chess.Move.from_uci(str(x))
board.push(move)
value = max(best_value, minmax(depth - 1, board,-float("inf"),float("inf"), not is_max))
board.pop()
if( value > best_value):
best_value = value
final_best_move = move
return final_best_move
def minmax(depth, board, alpha, beta, is_max):
if(depth == 0):
return -evaluation(board)
possible_moves = board.legal_moves
if(is_max):
best_move = -float("inf")
for x in possible_moves:
move = chess.Move.from_uci(str(x))
board.push(move)
best_move = max(best_move,minmax(depth - 1, board,alpha,beta, not is_max))
board.pop()
alpha = max(alpha,best_move)
# do the alpha beta pruning
if beta <= alpha:
return best_move
return best_move
else:
best_move = float("inf")
for x in possible_moves:
move = chess.Move.from_uci(str(x))
board.push(move)
best_move = min(best_move, minmax(depth - 1, board,alpha,beta, not is_max))
board.pop()
beta = min(beta,best_move)
# do the alpha beta pruning
if(beta <= alpha):
return best_move
return best_move
def evaluation(board):
i = 0
evaluation = 0
while i < 63:
piece = board.piece_at(i)
if piece:
role = bool(board.piece_at(i).color)
x, y = i//8, i%8
evaluation += (getPieceValue(str(board.piece_at(i)),x,y) if role else -getPieceValue(str(board.piece_at(i)),x,y))
i += 1
return evaluation
def getPieceValue(piece, x, y):
if(piece == None):
return 0
value = 0
if piece == "P" or piece == "p":
value = 5 + PawnBottomMatrix[x][y]
if piece == "N" or piece == "n":
value = 10 + KingButtomMatrix[x][y]
if piece == "B" or piece == "b":
value = 20 + BishopButtomMatrix[x][y]
if piece == "R" or piece == "r":
value = 40 + RockButtomMatrix[x][y]
if piece == "Q" or piece == "q":
value = 100 + QueenButtomMatrix[x][y]
if piece == 'K' or piece == 'k':
value = 1000 + KingButtomMatrix[x][y]
return value
#player a8b8 AI should go c5d7 Player go b8b7 AI should go d7f8 (take queen by fork)
def test1():
return chess.Board("K4Q2/8/8/2n5/1b6/k7/8/8 w KQkq - 0 4")
#player c7d5 AI should go b1e4 player a8b8 AI should go e4d5 (take the knight by pin)
def test2():
return chess.Board("K7/2N5/8/8/3P4/8/8/kb6 w KQkq - 0 4")
#player b1a1 AI should go c7c5 player go b4c2 AI should go c5d4 (take the bishop by fork)
def test3():
return chess.Board("k1r5/ppp5/8/8/1N1B4/8/8/1K6 w KQkq - 0 4")
def main():
board = chess.Board()
n = 0
print(board)
display.start(board.fen())
print(chess.Piece)
while not board.is_game_over():
if n%2 == 0:
print(board.legal_moves)
move = input("Please enter a valid move or enter exit to quit: ")
if move == "exit":
break
if move == "test1":
board = test1()
print(board)
display.update(board.fen())
continue
if move == "test2":
board = test2()
print(board)
display.update(board.fen())
continue
if move == "test3":
board = test3()
print(board)
display.update(board.fen())
continue
move = chess.Move.from_uci(str(move))
board.push(move)
else:
print("AI move:")
move = getaction(3, board, True)
move = chess.Move.from_uci(str(move))
board.push(move)
print(board)
display.update(board.fen())
n += 1
print("Quit game successfully")
print(board.outcome)
display.update(board.fen())
if __name__ == "__main__":
main()
| 49.156627
| 325
| 0.507108
| 2,192
| 8,160
| 1.862682
| 0.064325
| 0.217487
| 0.324761
| 0.350723
| 0.610336
| 0.576537
| 0.568944
| 0.559393
| 0.544208
| 0.522165
| 0
| 0.143999
| 0.202574
| 8,160
| 165
| 326
| 49.454545
| 0.483479
| 0.0375
| 0
| 0.282443
| 0
| 0
| 0.031901
| 0.009375
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061069
| false
| 0
| 0.022901
| 0.022901
| 0.175573
| 0.076336
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c49961a246ffb6bf5f13314be5abde9ab1e1ccec
| 84
|
py
|
Python
|
telegramBots/utils/__init__.py
|
keys4words/bots
|
4f5c4342ad24d1fc9d39947c9940346d2ac2ec8b
|
[
"Apache-2.0"
] | null | null | null |
telegramBots/utils/__init__.py
|
keys4words/bots
|
4f5c4342ad24d1fc9d39947c9940346d2ac2ec8b
|
[
"Apache-2.0"
] | 7
|
2020-11-09T13:35:15.000Z
|
2021-10-22T04:58:07.000Z
|
telegramBots/utils/__init__.py
|
keys4words/bots
|
4f5c4342ad24d1fc9d39947c9940346d2ac2ec8b
|
[
"Apache-2.0"
] | null | null | null |
from . import db_api
from . import misc
from .notify_admins import on_startup_notify
| 28
| 44
| 0.833333
| 14
| 84
| 4.714286
| 0.642857
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130952
| 84
| 3
| 44
| 28
| 0.90411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c4c7266c6bae3404ae863594a322d883cdac2bc1
| 781
|
py
|
Python
|
test/test_tenants_page_all_of.py
|
CiscoDevNet/python-msx-sdk
|
d7e0a08c656504b4f4551d263e67c671a2a04b3f
|
[
"MIT"
] | null | null | null |
test/test_tenants_page_all_of.py
|
CiscoDevNet/python-msx-sdk
|
d7e0a08c656504b4f4551d263e67c671a2a04b3f
|
[
"MIT"
] | null | null | null |
test/test_tenants_page_all_of.py
|
CiscoDevNet/python-msx-sdk
|
d7e0a08c656504b4f4551d263e67c671a2a04b3f
|
[
"MIT"
] | null | null | null |
"""
MSX SDK
MSX SDK client. # noqa: E501
The version of the OpenAPI document: 1.0.9
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import python_msx_sdk
from python_msx_sdk.model.tenant import Tenant
globals()['Tenant'] = Tenant
from python_msx_sdk.model.tenants_page_all_of import TenantsPageAllOf
class TestTenantsPageAllOf(unittest.TestCase):
"""TenantsPageAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTenantsPageAllOf(self):
"""Test TenantsPageAllOf"""
# FIXME: construct object with mandatory attributes with example values
# model = TenantsPageAllOf() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 20.552632
| 79
| 0.68758
| 92
| 781
| 5.652174
| 0.565217
| 0.057692
| 0.069231
| 0.061538
| 0.080769
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014827
| 0.222791
| 781
| 37
| 80
| 21.108108
| 0.841845
| 0.37516
| 0
| 0.2
| 1
| 0
| 0.030973
| 0
| 0
| 0
| 0
| 0.027027
| 0
| 1
| 0.2
| false
| 0.2
| 0.333333
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
c4cdee2153bbcb8a22d5f895f52928752aa87160
| 75
|
py
|
Python
|
src/vak/transforms/__init__.py
|
yardencsGitHub/vak
|
04da97b02ded5acccab437c2538d0a1ded3bef80
|
[
"BSD-3-Clause"
] | 26
|
2019-03-04T20:08:57.000Z
|
2022-01-22T13:40:00.000Z
|
src/vak/transforms/__init__.py
|
yardencsGitHub/vak
|
04da97b02ded5acccab437c2538d0a1ded3bef80
|
[
"BSD-3-Clause"
] | 379
|
2019-03-03T12:16:05.000Z
|
2022-03-29T13:44:46.000Z
|
src/vak/transforms/__init__.py
|
yardencsGitHub/vak
|
04da97b02ded5acccab437c2538d0a1ded3bef80
|
[
"BSD-3-Clause"
] | 12
|
2019-11-22T21:19:19.000Z
|
2022-03-14T17:44:59.000Z
|
from .transforms import *
from vak.transforms.defaults import get_defaults
| 25
| 48
| 0.84
| 10
| 75
| 6.2
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 75
| 2
| 49
| 37.5
| 0.925373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c4dc43081b49b349e40b994413e83ec7ffd821a3
| 58
|
py
|
Python
|
tensorspace/builders/__init__.py
|
phrostbyten/tensorspace
|
e6b787f71e81943092d250cf0a1b497db679c448
|
[
"Apache-2.0"
] | null | null | null |
tensorspace/builders/__init__.py
|
phrostbyten/tensorspace
|
e6b787f71e81943092d250cf0a1b497db679c448
|
[
"Apache-2.0"
] | null | null | null |
tensorspace/builders/__init__.py
|
phrostbyten/tensorspace
|
e6b787f71e81943092d250cf0a1b497db679c448
|
[
"Apache-2.0"
] | null | null | null |
from .vectors import CaptionVectors
from .coco import Coco
| 29
| 35
| 0.844828
| 8
| 58
| 6.125
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 58
| 2
| 36
| 29
| 0.960784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4805f43408087ee8f2b5681c88108b4dcc580f51
| 45,904
|
py
|
Python
|
blenderlib/bgl.py
|
kabuku/blender-python
|
6e8eee4cd79242bb0a3cd1dd848da04bfe0a9182
|
[
"MIT"
] | 23
|
2015-04-18T02:37:19.000Z
|
2021-12-20T23:59:28.000Z
|
blenderlib/bgl.py
|
kabuku/blender-python
|
6e8eee4cd79242bb0a3cd1dd848da04bfe0a9182
|
[
"MIT"
] | null | null | null |
blenderlib/bgl.py
|
kabuku/blender-python
|
6e8eee4cd79242bb0a3cd1dd848da04bfe0a9182
|
[
"MIT"
] | 5
|
2017-05-06T07:20:40.000Z
|
2021-11-08T18:35:43.000Z
|
'''Open GL functions (bgl)
'''
GL_2D = 1536 #constant value
GL_2_BYTES = 5127 #constant value
GL_3D = 1537 #constant value
GL_3D_COLOR = 1538 #constant value
GL_3D_COLOR_TEXTURE = 1539 #constant value
GL_3_BYTES = 5128 #constant value
GL_4D_COLOR_TEXTURE = 1540 #constant value
GL_4_BYTES = 5129 #constant value
GL_ACCUM = 256 #constant value
GL_ACCUM_ALPHA_BITS = 3419 #constant value
GL_ACCUM_BLUE_BITS = 3418 #constant value
GL_ACCUM_BUFFER_BIT = 512 #constant value
GL_ACCUM_CLEAR_VALUE = 2944 #constant value
GL_ACCUM_GREEN_BITS = 3417 #constant value
GL_ACCUM_RED_BITS = 3416 #constant value
GL_ACTIVE_TEXTURE = 34016 #constant value
GL_ADD = 260 #constant value
GL_ALL_ATTRIB_BITS = 1048575 #constant value
GL_ALPHA = 6406 #constant value
GL_ALPHA_BIAS = 3357 #constant value
GL_ALPHA_BITS = 3413 #constant value
GL_ALPHA_SCALE = 3356 #constant value
GL_ALPHA_TEST = 3008 #constant value
GL_ALPHA_TEST_FUNC = 3009 #constant value
GL_ALPHA_TEST_REF = 3010 #constant value
GL_ALWAYS = 519 #constant value
GL_AMBIENT = 4608 #constant value
GL_AMBIENT_AND_DIFFUSE = 5634 #constant value
GL_AND = 5377 #constant value
GL_AND_INVERTED = 5380 #constant value
GL_AND_REVERSE = 5378 #constant value
GL_ATTRIB_STACK_DEPTH = 2992 #constant value
GL_AUTO_NORMAL = 3456 #constant value
GL_AUX0 = 1033 #constant value
GL_AUX1 = 1034 #constant value
GL_AUX2 = 1035 #constant value
GL_AUX3 = 1036 #constant value
GL_AUX_BUFFERS = 3072 #constant value
GL_BACK = 1029 #constant value
GL_BACK_LEFT = 1026 #constant value
GL_BACK_RIGHT = 1027 #constant value
GL_BITMAP = 6656 #constant value
GL_BITMAP_TOKEN = 1796 #constant value
GL_BLEND = 3042 #constant value
GL_BLEND_DST = 3040 #constant value
GL_BLEND_SRC = 3041 #constant value
GL_BLUE = 6405 #constant value
GL_BLUE_BIAS = 3355 #constant value
GL_BLUE_BITS = 3412 #constant value
GL_BLUE_SCALE = 3354 #constant value
GL_BYTE = 5120 #constant value
GL_CCW = 2305 #constant value
GL_CLAMP = 10496 #constant value
GL_CLEAR = 5376 #constant value
GL_CLIENT_ALL_ATTRIB_BITS = -1 #constant value
GL_CLIP_PLANE0 = 12288 #constant value
GL_CLIP_PLANE1 = 12289 #constant value
GL_CLIP_PLANE2 = 12290 #constant value
GL_CLIP_PLANE3 = 12291 #constant value
GL_CLIP_PLANE4 = 12292 #constant value
GL_CLIP_PLANE5 = 12293 #constant value
GL_COEFF = 2560 #constant value
GL_COLOR = 6144 #constant value
GL_COLOR_BUFFER_BIT = 16384 #constant value
GL_COLOR_CLEAR_VALUE = 3106 #constant value
GL_COLOR_INDEX = 6400 #constant value
GL_COLOR_INDEXES = 5635 #constant value
GL_COLOR_MATERIAL = 2903 #constant value
GL_COLOR_MATERIAL_FACE = 2901 #constant value
GL_COLOR_MATERIAL_PARAMETER = 2902 #constant value
GL_COLOR_WRITEMASK = 3107 #constant value
GL_COMPILE = 4864 #constant value
GL_COMPILE_AND_EXECUTE = 4865 #constant value
GL_COMPILE_STATUS = 35713 #constant value
GL_CONSTANT_ATTENUATION = 4615 #constant value
GL_COPY = 5379 #constant value
GL_COPY_INVERTED = 5388 #constant value
GL_COPY_PIXEL_TOKEN = 1798 #constant value
GL_CULL_FACE = 2884 #constant value
GL_CULL_FACE_MODE = 2885 #constant value
GL_CURRENT_BIT = 1 #constant value
GL_CURRENT_COLOR = 2816 #constant value
GL_CURRENT_INDEX = 2817 #constant value
GL_CURRENT_NORMAL = 2818 #constant value
GL_CURRENT_RASTER_COLOR = 2820 #constant value
GL_CURRENT_RASTER_DISTANCE = 2825 #constant value
GL_CURRENT_RASTER_INDEX = 2821 #constant value
GL_CURRENT_RASTER_POSITION = 2823 #constant value
GL_CURRENT_RASTER_POSITION_VALID = 2824 #constant value
GL_CURRENT_RASTER_TEXTURE_COORDS = 2822 #constant value
GL_CURRENT_TEXTURE_COORDS = 2819 #constant value
GL_CW = 2304 #constant value
GL_DECAL = 8449 #constant value
GL_DECR = 7683 #constant value
GL_DEPTH = 6145 #constant value
GL_DEPTH_BIAS = 3359 #constant value
GL_DEPTH_BITS = 3414 #constant value
GL_DEPTH_BUFFER_BIT = 256 #constant value
GL_DEPTH_CLEAR_VALUE = 2931 #constant value
GL_DEPTH_COMPONENT = 6402 #constant value
GL_DEPTH_COMPONENT32 = 33191 #constant value
GL_DEPTH_FUNC = 2932 #constant value
GL_DEPTH_RANGE = 2928 #constant value
GL_DEPTH_SCALE = 3358 #constant value
GL_DEPTH_TEST = 2929 #constant value
GL_DEPTH_WRITEMASK = 2930 #constant value
GL_DIFFUSE = 4609 #constant value
GL_DITHER = 3024 #constant value
GL_DOMAIN = 2562 #constant value
GL_DONT_CARE = 4352 #constant value
GL_DOUBLE = 5130 #constant value
GL_DOUBLEBUFFER = 3122 #constant value
GL_DRAW_BUFFER = 3073 #constant value
GL_DRAW_PIXEL_TOKEN = 1797 #constant value
GL_DST_ALPHA = 772 #constant value
GL_DST_COLOR = 774 #constant value
GL_EDGE_FLAG = 2883 #constant value
GL_EMISSION = 5632 #constant value
GL_ENABLE_BIT = 8192 #constant value
GL_EQUAL = 514 #constant value
GL_EQUIV = 5385 #constant value
GL_EVAL_BIT = 65536 #constant value
GL_EXP = 2048 #constant value
GL_EXP2 = 2049 #constant value
GL_EXTENSIONS = 7939 #constant value
GL_EYE_LINEAR = 9216 #constant value
GL_EYE_PLANE = 9474 #constant value
GL_FALSE = 0 #constant value
GL_FASTEST = 4353 #constant value
GL_FEEDBACK = 7169 #constant value
GL_FILL = 6914 #constant value
GL_FLAT = 7424 #constant value
GL_FLOAT = 5126 #constant value
GL_FOG = 2912 #constant value
GL_FOG_BIT = 128 #constant value
GL_FOG_COLOR = 2918 #constant value
GL_FOG_DENSITY = 2914 #constant value
GL_FOG_END = 2916 #constant value
GL_FOG_HINT = 3156 #constant value
GL_FOG_INDEX = 2913 #constant value
GL_FOG_MODE = 2917 #constant value
GL_FOG_START = 2915 #constant value
GL_FRAGMENT_SHADER = 35632 #constant value
GL_FRONT = 1028 #constant value
GL_FRONT_AND_BACK = 1032 #constant value
GL_FRONT_FACE = 2886 #constant value
GL_FRONT_LEFT = 1024 #constant value
GL_FRONT_RIGHT = 1025 #constant value
GL_GEQUAL = 518 #constant value
GL_GREATER = 516 #constant value
GL_GREEN = 6404 #constant value
GL_GREEN_BIAS = 3353 #constant value
GL_GREEN_BITS = 3411 #constant value
GL_GREEN_SCALE = 3352 #constant value
GL_HINT_BIT = 32768 #constant value
GL_INCR = 7682 #constant value
GL_INDEX_BITS = 3409 #constant value
GL_INDEX_CLEAR_VALUE = 3104 #constant value
GL_INDEX_MODE = 3120 #constant value
GL_INDEX_OFFSET = 3347 #constant value
GL_INDEX_SHIFT = 3346 #constant value
GL_INDEX_WRITEMASK = 3105 #constant value
GL_INT = 5124 #constant value
GL_INVALID_ENUM = 1280 #constant value
GL_INVALID_OPERATION = 1282 #constant value
GL_INVALID_VALUE = 1281 #constant value
GL_INVERT = 5386 #constant value
GL_KEEP = 7680 #constant value
GL_LEFT = 1030 #constant value
GL_LEQUAL = 515 #constant value
GL_LESS = 513 #constant value
GL_LIGHT0 = 16384 #constant value
GL_LIGHT1 = 16385 #constant value
GL_LIGHT2 = 16386 #constant value
GL_LIGHT3 = 16387 #constant value
GL_LIGHT4 = 16388 #constant value
GL_LIGHT5 = 16389 #constant value
GL_LIGHT6 = 16390 #constant value
GL_LIGHT7 = 16391 #constant value
GL_LIGHTING = 2896 #constant value
GL_LIGHTING_BIT = 64 #constant value
GL_LIGHT_MODEL_AMBIENT = 2899 #constant value
GL_LIGHT_MODEL_LOCAL_VIEWER = 2897 #constant value
GL_LIGHT_MODEL_TWO_SIDE = 2898 #constant value
GL_LINE = 6913 #constant value
GL_LINEAR = 9729 #constant value
GL_LINEAR_ATTENUATION = 4616 #constant value
GL_LINEAR_MIPMAP_LINEAR = 9987 #constant value
GL_LINEAR_MIPMAP_NEAREST = 9985 #constant value
GL_LINES = 1 #constant value
GL_LINE_BIT = 4 #constant value
GL_LINE_LOOP = 2 #constant value
GL_LINE_RESET_TOKEN = 1799 #constant value
GL_LINE_SMOOTH = 2848 #constant value
GL_LINE_SMOOTH_HINT = 3154 #constant value
GL_LINE_STIPPLE = 2852 #constant value
GL_LINE_STIPPLE_PATTERN = 2853 #constant value
GL_LINE_STIPPLE_REPEAT = 2854 #constant value
GL_LINE_STRIP = 3 #constant value
GL_LINE_TOKEN = 1794 #constant value
GL_LINE_WIDTH = 2849 #constant value
GL_LINE_WIDTH_GRANULARITY = 2851 #constant value
GL_LINE_WIDTH_RANGE = 2850 #constant value
GL_LIST_BASE = 2866 #constant value
GL_LIST_BIT = 131072 #constant value
GL_LIST_INDEX = 2867 #constant value
GL_LIST_MODE = 2864 #constant value
GL_LOAD = 257 #constant value
GL_LOGIC_OP = 3057 #constant value
GL_LOGIC_OP_MODE = 3056 #constant value
GL_LUMINANCE = 6409 #constant value
GL_LUMINANCE_ALPHA = 6410 #constant value
GL_MAP1_COLOR_4 = 3472 #constant value
GL_MAP1_GRID_DOMAIN = 3536 #constant value
GL_MAP1_GRID_SEGMENTS = 3537 #constant value
GL_MAP1_INDEX = 3473 #constant value
GL_MAP1_NORMAL = 3474 #constant value
GL_MAP1_TEXTURE_COORD_1 = 3475 #constant value
GL_MAP1_TEXTURE_COORD_2 = 3476 #constant value
GL_MAP1_TEXTURE_COORD_3 = 3477 #constant value
GL_MAP1_TEXTURE_COORD_4 = 3478 #constant value
GL_MAP1_VERTEX_3 = 3479 #constant value
GL_MAP1_VERTEX_4 = 3480 #constant value
GL_MAP2_COLOR_4 = 3504 #constant value
GL_MAP2_GRID_DOMAIN = 3538 #constant value
GL_MAP2_GRID_SEGMENTS = 3539 #constant value
GL_MAP2_INDEX = 3505 #constant value
GL_MAP2_NORMAL = 3506 #constant value
GL_MAP2_TEXTURE_COORD_1 = 3507 #constant value
GL_MAP2_TEXTURE_COORD_2 = 3508 #constant value
GL_MAP2_TEXTURE_COORD_3 = 3509 #constant value
GL_MAP2_TEXTURE_COORD_4 = 3510 #constant value
GL_MAP2_VERTEX_3 = 3511 #constant value
GL_MAP2_VERTEX_4 = 3512 #constant value
GL_MAP_COLOR = 3344 #constant value
GL_MAP_STENCIL = 3345 #constant value
GL_MATRIX_MODE = 2976 #constant value
GL_MAX_ATTRIB_STACK_DEPTH = 3381 #constant value
GL_MAX_CLIP_PLANES = 3378 #constant value
GL_MAX_EVAL_ORDER = 3376 #constant value
GL_MAX_LIGHTS = 3377 #constant value
GL_MAX_LIST_NESTING = 2865 #constant value
GL_MAX_MODELVIEW_STACK_DEPTH = 3382 #constant value
GL_MAX_NAME_STACK_DEPTH = 3383 #constant value
GL_MAX_PIXEL_MAP_TABLE = 3380 #constant value
GL_MAX_PROJECTION_STACK_DEPTH = 3384 #constant value
GL_MAX_TEXTURE_SIZE = 3379 #constant value
GL_MAX_TEXTURE_STACK_DEPTH = 3385 #constant value
GL_MAX_VIEWPORT_DIMS = 3386 #constant value
GL_MODELVIEW = 5888 #constant value
GL_MODELVIEW_MATRIX = 2982 #constant value
GL_MODELVIEW_STACK_DEPTH = 2979 #constant value
GL_MODULATE = 8448 #constant value
GL_MULT = 259 #constant value
GL_NAME_STACK_DEPTH = 3440 #constant value
GL_NAND = 5390 #constant value
GL_NEAREST = 9728 #constant value
GL_NEAREST_MIPMAP_LINEAR = 9986 #constant value
GL_NEAREST_MIPMAP_NEAREST = 9984 #constant value
GL_NEVER = 512 #constant value
GL_NICEST = 4354 #constant value
GL_NONE = 0 #constant value
GL_NOOP = 5381 #constant value
GL_NOR = 5384 #constant value
GL_NORMALIZE = 2977 #constant value
GL_NOTEQUAL = 517 #constant value
GL_NO_ERROR = 0 #constant value
GL_OBJECT_LINEAR = 9217 #constant value
GL_OBJECT_PLANE = 9473 #constant value
GL_ONE = 1 #constant value
GL_ONE_MINUS_DST_ALPHA = 773 #constant value
GL_ONE_MINUS_DST_COLOR = 775 #constant value
GL_ONE_MINUS_SRC_ALPHA = 771 #constant value
GL_ONE_MINUS_SRC_COLOR = 769 #constant value
GL_OR = 5383 #constant value
GL_ORDER = 2561 #constant value
GL_OR_INVERTED = 5389 #constant value
GL_OR_REVERSE = 5387 #constant value
GL_OUT_OF_MEMORY = 1285 #constant value
GL_PACK_ALIGNMENT = 3333 #constant value
GL_PACK_LSB_FIRST = 3329 #constant value
GL_PACK_ROW_LENGTH = 3330 #constant value
GL_PACK_SKIP_PIXELS = 3332 #constant value
GL_PACK_SKIP_ROWS = 3331 #constant value
GL_PACK_SWAP_BYTES = 3328 #constant value
GL_PASS_THROUGH_TOKEN = 1792 #constant value
GL_PERSPECTIVE_CORRECTION_HINT = 3152 #constant value
GL_PIXEL_MAP_A_TO_A = 3193 #constant value
GL_PIXEL_MAP_A_TO_A_SIZE = 3257 #constant value
GL_PIXEL_MAP_B_TO_B = 3192 #constant value
GL_PIXEL_MAP_B_TO_B_SIZE = 3256 #constant value
GL_PIXEL_MAP_G_TO_G = 3191 #constant value
GL_PIXEL_MAP_G_TO_G_SIZE = 3255 #constant value
GL_PIXEL_MAP_I_TO_A = 3189 #constant value
GL_PIXEL_MAP_I_TO_A_SIZE = 3253 #constant value
GL_PIXEL_MAP_I_TO_B = 3188 #constant value
GL_PIXEL_MAP_I_TO_B_SIZE = 3252 #constant value
GL_PIXEL_MAP_I_TO_G = 3187 #constant value
GL_PIXEL_MAP_I_TO_G_SIZE = 3251 #constant value
GL_PIXEL_MAP_I_TO_I = 3184 #constant value
GL_PIXEL_MAP_I_TO_I_SIZE = 3248 #constant value
GL_PIXEL_MAP_I_TO_R = 3186 #constant value
GL_PIXEL_MAP_I_TO_R_SIZE = 3250 #constant value
GL_PIXEL_MAP_R_TO_R = 3190 #constant value
GL_PIXEL_MAP_R_TO_R_SIZE = 3254 #constant value
GL_PIXEL_MAP_S_TO_S = 3185 #constant value
GL_PIXEL_MAP_S_TO_S_SIZE = 3249 #constant value
GL_PIXEL_MODE_BIT = 32 #constant value
GL_POINT = 6912 #constant value
GL_POINTS = 0 #constant value
GL_POINT_BIT = 2 #constant value
GL_POINT_SIZE = 2833 #constant value
GL_POINT_SIZE_GRANULARITY = 2835 #constant value
GL_POINT_SIZE_RANGE = 2834 #constant value
GL_POINT_SMOOTH = 2832 #constant value
GL_POINT_SMOOTH_HINT = 3153 #constant value
GL_POINT_TOKEN = 1793 #constant value
GL_POLYGON = 9 #constant value
GL_POLYGON_BIT = 8 #constant value
GL_POLYGON_MODE = 2880 #constant value
GL_POLYGON_OFFSET_FACTOR = 32824 #constant value
GL_POLYGON_OFFSET_FILL = 32823 #constant value
GL_POLYGON_OFFSET_LINE = 10754 #constant value
GL_POLYGON_OFFSET_POINT = 10753 #constant value
GL_POLYGON_OFFSET_UNITS = 10752 #constant value
GL_POLYGON_SMOOTH = 2881 #constant value
GL_POLYGON_SMOOTH_HINT = 3155 #constant value
GL_POLYGON_STIPPLE = 2882 #constant value
GL_POLYGON_STIPPLE_BIT = 16 #constant value
GL_POLYGON_TOKEN = 1795 #constant value
GL_POSITION = 4611 #constant value
GL_PROJECTION = 5889 #constant value
GL_PROJECTION_MATRIX = 2983 #constant value
GL_PROJECTION_STACK_DEPTH = 2980 #constant value
GL_Q = 8195 #constant value
GL_QUADRATIC_ATTENUATION = 4617 #constant value
GL_QUADS = 7 #constant value
GL_QUAD_STRIP = 8 #constant value
GL_R = 8194 #constant value
GL_READ_BUFFER = 3074 #constant value
GL_RED = 6403 #constant value
GL_RED_BIAS = 3349 #constant value
GL_RED_BITS = 3410 #constant value
GL_RED_SCALE = 3348 #constant value
GL_RENDER = 7168 #constant value
GL_RENDERER = 7937 #constant value
GL_RENDER_MODE = 3136 #constant value
GL_REPEAT = 10497 #constant value
GL_REPLACE = 7681 #constant value
GL_RETURN = 258 #constant value
GL_RGB = 6407 #constant value
GL_RGBA = 6408 #constant value
GL_RGBA_MODE = 3121 #constant value
GL_RIGHT = 1031 #constant value
GL_S = 8192 #constant value
GL_SCISSOR_BIT = 524288 #constant value
GL_SCISSOR_BOX = 3088 #constant value
GL_SCISSOR_TEST = 3089 #constant value
GL_SELECT = 7170 #constant value
GL_SET = 5391 #constant value
GL_SHADE_MODEL = 2900 #constant value
GL_SHININESS = 5633 #constant value
GL_SHORT = 5122 #constant value
GL_SMOOTH = 7425 #constant value
GL_SPECULAR = 4610 #constant value
GL_SPHERE_MAP = 9218 #constant value
GL_SPOT_CUTOFF = 4614 #constant value
GL_SPOT_DIRECTION = 4612 #constant value
GL_SPOT_EXPONENT = 4613 #constant value
GL_SRC_ALPHA = 770 #constant value
GL_SRC_ALPHA_SATURATE = 776 #constant value
GL_SRC_COLOR = 768 #constant value
GL_STACK_OVERFLOW = 1283 #constant value
GL_STACK_UNDERFLOW = 1284 #constant value
GL_STENCIL = 6146 #constant value
GL_STENCIL_BITS = 3415 #constant value
GL_STENCIL_BUFFER_BIT = 1024 #constant value
GL_STENCIL_CLEAR_VALUE = 2961 #constant value
GL_STENCIL_FAIL = 2964 #constant value
GL_STENCIL_FUNC = 2962 #constant value
GL_STENCIL_INDEX = 6401 #constant value
GL_STENCIL_PASS_DEPTH_FAIL = 2965 #constant value
GL_STENCIL_PASS_DEPTH_PASS = 2966 #constant value
GL_STENCIL_REF = 2967 #constant value
GL_STENCIL_TEST = 2960 #constant value
GL_STENCIL_VALUE_MASK = 2963 #constant value
GL_STENCIL_WRITEMASK = 2968 #constant value
GL_STEREO = 3123 #constant value
GL_SUBPIXEL_BITS = 3408 #constant value
GL_T = 8193 #constant value
GL_TEXTURE = 5890 #constant value
GL_TEXTURE0 = 33984 #constant value
GL_TEXTURE1 = 33985 #constant value
GL_TEXTURE2 = 33986 #constant value
GL_TEXTURE3 = 33987 #constant value
GL_TEXTURE4 = 33988 #constant value
GL_TEXTURE5 = 33989 #constant value
GL_TEXTURE6 = 33990 #constant value
GL_TEXTURE7 = 33991 #constant value
GL_TEXTURE8 = 33992 #constant value
GL_TEXTURE_1D = 3552 #constant value
GL_TEXTURE_2D = 3553 #constant value
GL_TEXTURE_BINDING_1D = 32872 #constant value
GL_TEXTURE_BINDING_2D = 32873 #constant value
GL_TEXTURE_BIT = 262144 #constant value
GL_TEXTURE_BORDER = 4101 #constant value
GL_TEXTURE_BORDER_COLOR = 4100 #constant value
GL_TEXTURE_COMPARE_MODE = 34892 #constant value
GL_TEXTURE_COMPONENTS = 4099 #constant value
GL_TEXTURE_ENV = 8960 #constant value
GL_TEXTURE_ENV_COLOR = 8705 #constant value
GL_TEXTURE_ENV_MODE = 8704 #constant value
GL_TEXTURE_GEN_MODE = 9472 #constant value
GL_TEXTURE_GEN_Q = 3171 #constant value
GL_TEXTURE_GEN_R = 3170 #constant value
GL_TEXTURE_GEN_S = 3168 #constant value
GL_TEXTURE_GEN_T = 3169 #constant value
GL_TEXTURE_HEIGHT = 4097 #constant value
GL_TEXTURE_MAG_FILTER = 10240 #constant value
GL_TEXTURE_MATRIX = 2984 #constant value
GL_TEXTURE_MIN_FILTER = 10241 #constant value
GL_TEXTURE_PRIORITY = 32870 #constant value
GL_TEXTURE_RESIDENT = 32871 #constant value
GL_TEXTURE_STACK_DEPTH = 2981 #constant value
GL_TEXTURE_WIDTH = 4096 #constant value
GL_TEXTURE_WRAP_S = 10242 #constant value
GL_TEXTURE_WRAP_T = 10243 #constant value
GL_TRANSFORM_BIT = 4096 #constant value
GL_TRIANGLES = 4 #constant value
GL_TRIANGLE_FAN = 6 #constant value
GL_TRIANGLE_STRIP = 5 #constant value
GL_TRUE = 1 #constant value
GL_UNPACK_ALIGNMENT = 3317 #constant value
GL_UNPACK_LSB_FIRST = 3313 #constant value
GL_UNPACK_ROW_LENGTH = 3314 #constant value
GL_UNPACK_SKIP_PIXELS = 3316 #constant value
GL_UNPACK_SKIP_ROWS = 3315 #constant value
GL_UNPACK_SWAP_BYTES = 3312 #constant value
GL_UNSIGNED_BYTE = 5121 #constant value
GL_UNSIGNED_INT = 5125 #constant value
GL_UNSIGNED_SHORT = 5123 #constant value
GL_VENDOR = 7936 #constant value
GL_VERSION = 7938 #constant value
GL_VERTEX_SHADER = 35633 #constant value
GL_VIEWPORT = 2978 #constant value
GL_VIEWPORT_BIT = 2048 #constant value
GL_XOR = 5382 #constant value
GL_ZERO = 0 #constant value
GL_ZOOM_X = 3350 #constant value
GL_ZOOM_Y = 3351 #constant value
def glAccum(*argv):
'''no string
'''
pass
def glActiveTexture(*argv):
'''no string
'''
pass
def glAlphaFunc(*argv):
'''no string
'''
pass
def glAreTexturesResident(*argv):
'''no string
'''
pass
def glAttachShader(*argv):
'''no string
'''
pass
def glBegin(*argv):
'''no string
'''
pass
def glBindTexture(*argv):
'''no string
'''
pass
def glBitmap(*argv):
'''no string
'''
pass
def glBlendFunc(*argv):
'''no string
'''
pass
def glCallList(*argv):
'''no string
'''
pass
def glCallLists(*argv):
'''no string
'''
pass
def glClear(*argv):
'''no string
'''
pass
def glClearAccum(*argv):
'''no string
'''
pass
def glClearColor(*argv):
'''no string
'''
pass
def glClearDepth(*argv):
'''no string
'''
pass
def glClearIndex(*argv):
'''no string
'''
pass
def glClearStencil(*argv):
'''no string
'''
pass
def glClipPlane(*argv):
'''no string
'''
pass
def glColor3b(*argv):
'''no string
'''
pass
def glColor3bv(*argv):
'''no string
'''
pass
def glColor3d(*argv):
'''no string
'''
pass
def glColor3dv(*argv):
'''no string
'''
pass
def glColor3f(*argv):
'''no string
'''
pass
def glColor3fv(*argv):
'''no string
'''
pass
def glColor3i(*argv):
'''no string
'''
pass
def glColor3iv(*argv):
'''no string
'''
pass
def glColor3s(*argv):
'''no string
'''
pass
def glColor3sv(*argv):
'''no string
'''
pass
def glColor3ub(*argv):
'''no string
'''
pass
def glColor3ubv(*argv):
'''no string
'''
pass
def glColor3ui(*argv):
'''no string
'''
pass
def glColor3uiv(*argv):
'''no string
'''
pass
def glColor3us(*argv):
'''no string
'''
pass
def glColor3usv(*argv):
'''no string
'''
pass
def glColor4b(*argv):
'''no string
'''
pass
def glColor4bv(*argv):
'''no string
'''
pass
def glColor4d(*argv):
'''no string
'''
pass
def glColor4dv(*argv):
'''no string
'''
pass
def glColor4f(*argv):
'''no string
'''
pass
def glColor4fv(*argv):
'''no string
'''
pass
def glColor4i(*argv):
'''no string
'''
pass
def glColor4iv(*argv):
'''no string
'''
pass
def glColor4s(*argv):
'''no string
'''
pass
def glColor4sv(*argv):
'''no string
'''
pass
def glColor4ub(*argv):
'''no string
'''
pass
def glColor4ubv(*argv):
'''no string
'''
pass
def glColor4ui(*argv):
'''no string
'''
pass
def glColor4uiv(*argv):
'''no string
'''
pass
def glColor4us(*argv):
'''no string
'''
pass
def glColor4usv(*argv):
'''no string
'''
pass
def glColorMask(*argv):
'''no string
'''
pass
def glColorMaterial(*argv):
'''no string
'''
pass
def glCompileShader(*argv):
'''no string
'''
pass
def glCopyPixels(*argv):
'''no string
'''
pass
def glCopyTexImage2D(*argv):
'''no string
'''
pass
def glCreateProgram(*argv):
'''no string
'''
pass
def glCreateShader(*argv):
'''no string
'''
pass
def glCullFace(*argv):
'''no string
'''
pass
def glDeleteLists(*argv):
'''no string
'''
pass
def glDeleteProgram(*argv):
'''no string
'''
pass
def glDeleteShader(*argv):
'''no string
'''
pass
def glDeleteTextures(*argv):
'''no string
'''
pass
def glDepthFunc(*argv):
'''no string
'''
pass
def glDepthMask(*argv):
'''no string
'''
pass
def glDepthRange(*argv):
'''no string
'''
pass
def glDetachShader(*argv):
'''no string
'''
pass
def glDisable(*argv):
'''no string
'''
pass
def glDrawBuffer(*argv):
'''no string
'''
pass
def glDrawPixels(*argv):
'''no string
'''
pass
def glEdgeFlag(*argv):
'''no string
'''
pass
def glEdgeFlagv(*argv):
'''no string
'''
pass
def glEnable(*argv):
'''no string
'''
pass
def glEnd(*argv):
'''no string
'''
pass
def glEndList(*argv):
'''no string
'''
pass
def glEvalCoord1d(*argv):
'''no string
'''
pass
def glEvalCoord1dv(*argv):
'''no string
'''
pass
def glEvalCoord1f(*argv):
'''no string
'''
pass
def glEvalCoord1fv(*argv):
'''no string
'''
pass
def glEvalCoord2d(*argv):
'''no string
'''
pass
def glEvalCoord2dv(*argv):
'''no string
'''
pass
def glEvalCoord2f(*argv):
'''no string
'''
pass
def glEvalCoord2fv(*argv):
'''no string
'''
pass
def glEvalMesh1(*argv):
'''no string
'''
pass
def glEvalMesh2(*argv):
'''no string
'''
pass
def glEvalPoint1(*argv):
'''no string
'''
pass
def glEvalPoint2(*argv):
'''no string
'''
pass
def glFeedbackBuffer(*argv):
'''no string
'''
pass
def glFinish(*argv):
'''no string
'''
pass
def glFlush(*argv):
'''no string
'''
pass
def glFogf(*argv):
'''no string
'''
pass
def glFogfv(*argv):
'''no string
'''
pass
def glFogi(*argv):
'''no string
'''
pass
def glFogiv(*argv):
'''no string
'''
pass
def glFrontFace(*argv):
'''no string
'''
pass
def glFrustum(*argv):
'''no string
'''
pass
def glGenLists(*argv):
'''no string
'''
pass
def glGenTextures(*argv):
'''no string
'''
pass
def glGetAttachedShaders(*argv):
'''no string
'''
pass
def glGetBooleanv(*argv):
'''no string
'''
pass
def glGetClipPlane(*argv):
'''no string
'''
pass
def glGetDoublev(*argv):
'''no string
'''
pass
def glGetError(*argv):
'''no string
'''
pass
def glGetFloatv(*argv):
'''no string
'''
pass
def glGetIntegerv(*argv):
'''no string
'''
pass
def glGetLightfv(*argv):
'''no string
'''
pass
def glGetLightiv(*argv):
'''no string
'''
pass
def glGetMapdv(*argv):
'''no string
'''
pass
def glGetMapfv(*argv):
'''no string
'''
pass
def glGetMapiv(*argv):
'''no string
'''
pass
def glGetMaterialfv(*argv):
'''no string
'''
pass
def glGetMaterialiv(*argv):
'''no string
'''
pass
def glGetPixelMapfv(*argv):
'''no string
'''
pass
def glGetPixelMapuiv(*argv):
'''no string
'''
pass
def glGetPixelMapusv(*argv):
'''no string
'''
pass
def glGetPolygonStipple(*argv):
'''no string
'''
pass
def glGetProgramInfoLog(*argv):
'''no string
'''
pass
def glGetProgramiv(*argv):
'''no string
'''
pass
def glGetShaderInfoLog(*argv):
'''no string
'''
pass
def glGetShaderSource(*argv):
'''no string
'''
pass
def glGetShaderiv(*argv):
'''no string
'''
pass
def glGetString(*argv):
'''no string
'''
pass
def glGetTexEnvfv(*argv):
'''no string
'''
pass
def glGetTexEnviv(*argv):
'''no string
'''
pass
def glGetTexGendv(*argv):
'''no string
'''
pass
def glGetTexGenfv(*argv):
'''no string
'''
pass
def glGetTexGeniv(*argv):
'''no string
'''
pass
def glGetTexImage(*argv):
'''no string
'''
pass
def glGetTexLevelParameterfv(*argv):
'''no string
'''
pass
def glGetTexLevelParameteriv(*argv):
'''no string
'''
pass
def glGetTexParameterfv(*argv):
'''no string
'''
pass
def glGetTexParameteriv(*argv):
'''no string
'''
pass
def glGetUniformLocation(*argv):
'''no string
'''
pass
def glHint(*argv):
'''no string
'''
pass
def glIndexMask(*argv):
'''no string
'''
pass
def glIndexd(*argv):
'''no string
'''
pass
def glIndexdv(*argv):
'''no string
'''
pass
def glIndexf(*argv):
'''no string
'''
pass
def glIndexfv(*argv):
'''no string
'''
pass
def glIndexi(*argv):
'''no string
'''
pass
def glIndexiv(*argv):
'''no string
'''
pass
def glIndexs(*argv):
'''no string
'''
pass
def glIndexsv(*argv):
'''no string
'''
pass
def glInitNames(*argv):
'''no string
'''
pass
def glIsEnabled(*argv):
'''no string
'''
pass
def glIsList(*argv):
'''no string
'''
pass
def glIsProgram(*argv):
'''no string
'''
pass
def glIsShader(*argv):
'''no string
'''
pass
def glIsTexture(*argv):
'''no string
'''
pass
def glLightModelf(*argv):
'''no string
'''
pass
def glLightModelfv(*argv):
'''no string
'''
pass
def glLightModeli(*argv):
'''no string
'''
pass
def glLightModeliv(*argv):
'''no string
'''
pass
def glLightf(*argv):
'''no string
'''
pass
def glLightfv(*argv):
'''no string
'''
pass
def glLighti(*argv):
'''no string
'''
pass
def glLightiv(*argv):
'''no string
'''
pass
def glLineStipple(*argv):
'''no string
'''
pass
def glLineWidth(*argv):
'''no string
'''
pass
def glLinkProgram(*argv):
'''no string
'''
pass
def glListBase(*argv):
'''no string
'''
pass
def glLoadIdentity(*argv):
'''no string
'''
pass
def glLoadMatrixd(*argv):
'''no string
'''
pass
def glLoadMatrixf(*argv):
'''no string
'''
pass
def glLoadName(*argv):
'''no string
'''
pass
def glLogicOp(*argv):
'''no string
'''
pass
def glMap1d(*argv):
'''no string
'''
pass
def glMap1f(*argv):
'''no string
'''
pass
def glMap2d(*argv):
'''no string
'''
pass
def glMap2f(*argv):
'''no string
'''
pass
def glMapGrid1d(*argv):
'''no string
'''
pass
def glMapGrid1f(*argv):
'''no string
'''
pass
def glMapGrid2d(*argv):
'''no string
'''
pass
def glMapGrid2f(*argv):
'''no string
'''
pass
def glMaterialf(*argv):
'''no string
'''
pass
def glMaterialfv(*argv):
'''no string
'''
pass
def glMateriali(*argv):
'''no string
'''
pass
def glMaterialiv(*argv):
'''no string
'''
pass
def glMatrixMode(*argv):
'''no string
'''
pass
def glMultMatrixd(*argv):
'''no string
'''
pass
def glMultMatrixf(*argv):
'''no string
'''
pass
def glNewList(*argv):
'''no string
'''
pass
def glNormal3b(*argv):
'''no string
'''
pass
def glNormal3bv(*argv):
'''no string
'''
pass
def glNormal3d(*argv):
'''no string
'''
pass
def glNormal3dv(*argv):
'''no string
'''
pass
def glNormal3f(*argv):
'''no string
'''
pass
def glNormal3fv(*argv):
'''no string
'''
pass
def glNormal3i(*argv):
'''no string
'''
pass
def glNormal3iv(*argv):
'''no string
'''
pass
def glNormal3s(*argv):
'''no string
'''
pass
def glNormal3sv(*argv):
'''no string
'''
pass
def glOrtho(*argv):
'''no string
'''
pass
def glPassThrough(*argv):
'''no string
'''
pass
def glPixelMapfv(*argv):
'''no string
'''
pass
def glPixelMapuiv(*argv):
'''no string
'''
pass
def glPixelMapusv(*argv):
'''no string
'''
pass
def glPixelStoref(*argv):
'''no string
'''
pass
def glPixelStorei(*argv):
'''no string
'''
pass
def glPixelTransferf(*argv):
'''no string
'''
pass
def glPixelTransferi(*argv):
'''no string
'''
pass
def glPixelZoom(*argv):
'''no string
'''
pass
def glPointSize(*argv):
'''no string
'''
pass
def glPolygonMode(*argv):
'''no string
'''
pass
def glPolygonOffset(*argv):
'''no string
'''
pass
def glPolygonStipple(*argv):
'''no string
'''
pass
def glPopAttrib(*argv):
'''no string
'''
pass
def glPopClientAttrib(*argv):
'''no string
'''
pass
def glPopMatrix(*argv):
'''no string
'''
pass
def glPopName(*argv):
'''no string
'''
pass
def glPrioritizeTextures(*argv):
'''no string
'''
pass
def glPushAttrib(*argv):
'''no string
'''
pass
def glPushClientAttrib(*argv):
'''no string
'''
pass
def glPushMatrix(*argv):
'''no string
'''
pass
def glPushName(*argv):
'''no string
'''
pass
def glRasterPos2d(*argv):
'''no string
'''
pass
def glRasterPos2dv(*argv):
'''no string
'''
pass
def glRasterPos2f(*argv):
'''no string
'''
pass
def glRasterPos2fv(*argv):
'''no string
'''
pass
def glRasterPos2i(*argv):
'''no string
'''
pass
def glRasterPos2iv(*argv):
'''no string
'''
pass
def glRasterPos2s(*argv):
'''no string
'''
pass
def glRasterPos2sv(*argv):
'''no string
'''
pass
def glRasterPos3d(*argv):
'''no string
'''
pass
def glRasterPos3dv(*argv):
'''no string
'''
pass
def glRasterPos3f(*argv):
'''no string
'''
pass
def glRasterPos3fv(*argv):
'''no string
'''
pass
def glRasterPos3i(*argv):
'''no string
'''
pass
def glRasterPos3iv(*argv):
'''no string
'''
pass
def glRasterPos3s(*argv):
'''no string
'''
pass
def glRasterPos3sv(*argv):
'''no string
'''
pass
def glRasterPos4d(*argv):
'''no string
'''
pass
def glRasterPos4dv(*argv):
'''no string
'''
pass
def glRasterPos4f(*argv):
'''no string
'''
pass
def glRasterPos4fv(*argv):
'''no string
'''
pass
def glRasterPos4i(*argv):
'''no string
'''
pass
def glRasterPos4iv(*argv):
'''no string
'''
pass
def glRasterPos4s(*argv):
'''no string
'''
pass
def glRasterPos4sv(*argv):
'''no string
'''
pass
def glReadBuffer(*argv):
'''no string
'''
pass
def glReadPixels(*argv):
'''no string
'''
pass
def glRectd(*argv):
'''no string
'''
pass
def glRectdv(*argv):
'''no string
'''
pass
def glRectf(*argv):
'''no string
'''
pass
def glRectfv(*argv):
'''no string
'''
pass
def glRecti(*argv):
'''no string
'''
pass
def glRectiv(*argv):
'''no string
'''
pass
def glRects(*argv):
'''no string
'''
pass
def glRectsv(*argv):
'''no string
'''
pass
def glRenderMode(*argv):
'''no string
'''
pass
def glRotated(*argv):
'''no string
'''
pass
def glRotatef(*argv):
'''no string
'''
pass
def glScaled(*argv):
'''no string
'''
pass
def glScalef(*argv):
'''no string
'''
pass
def glScissor(*argv):
'''no string
'''
pass
def glSelectBuffer(*argv):
'''no string
'''
pass
def glShadeModel(*argv):
'''no string
'''
pass
def glShaderSource(*argv):
'''no string
'''
pass
def glStencilFunc(*argv):
'''no string
'''
pass
def glStencilMask(*argv):
'''no string
'''
pass
def glStencilOp(*argv):
'''no string
'''
pass
def glTexCoord1d(*argv):
'''no string
'''
pass
def glTexCoord1dv(*argv):
'''no string
'''
pass
def glTexCoord1f(*argv):
'''no string
'''
pass
def glTexCoord1fv(*argv):
'''no string
'''
pass
def glTexCoord1i(*argv):
'''no string
'''
pass
def glTexCoord1iv(*argv):
'''no string
'''
pass
def glTexCoord1s(*argv):
'''no string
'''
pass
def glTexCoord1sv(*argv):
'''no string
'''
pass
def glTexCoord2d(*argv):
'''no string
'''
pass
def glTexCoord2dv(*argv):
'''no string
'''
pass
def glTexCoord2f(*argv):
'''no string
'''
pass
def glTexCoord2fv(*argv):
'''no string
'''
pass
def glTexCoord2i(*argv):
'''no string
'''
pass
def glTexCoord2iv(*argv):
'''no string
'''
pass
def glTexCoord2s(*argv):
'''no string
'''
pass
def glTexCoord2sv(*argv):
'''no string
'''
pass
def glTexCoord3d(*argv):
'''no string
'''
pass
def glTexCoord3dv(*argv):
'''no string
'''
pass
def glTexCoord3f(*argv):
'''no string
'''
pass
def glTexCoord3fv(*argv):
'''no string
'''
pass
def glTexCoord3i(*argv):
'''no string
'''
pass
def glTexCoord3iv(*argv):
'''no string
'''
pass
def glTexCoord3s(*argv):
'''no string
'''
pass
def glTexCoord3sv(*argv):
'''no string
'''
pass
def glTexCoord4d(*argv):
'''no string
'''
pass
def glTexCoord4dv(*argv):
'''no string
'''
pass
def glTexCoord4f(*argv):
'''no string
'''
pass
def glTexCoord4fv(*argv):
'''no string
'''
pass
def glTexCoord4i(*argv):
'''no string
'''
pass
def glTexCoord4iv(*argv):
'''no string
'''
pass
def glTexCoord4s(*argv):
'''no string
'''
pass
def glTexCoord4sv(*argv):
'''no string
'''
pass
def glTexEnvf(*argv):
'''no string
'''
pass
def glTexEnvfv(*argv):
'''no string
'''
pass
def glTexEnvi(*argv):
'''no string
'''
pass
def glTexEnviv(*argv):
'''no string
'''
pass
def glTexGend(*argv):
'''no string
'''
pass
def glTexGendv(*argv):
'''no string
'''
pass
def glTexGenf(*argv):
'''no string
'''
pass
def glTexGenfv(*argv):
'''no string
'''
pass
def glTexGeni(*argv):
'''no string
'''
pass
def glTexGeniv(*argv):
'''no string
'''
pass
def glTexImage1D(*argv):
'''no string
'''
pass
def glTexImage2D(*argv):
'''no string
'''
pass
def glTexParameterf(*argv):
'''no string
'''
pass
def glTexParameterfv(*argv):
'''no string
'''
pass
def glTexParameteri(*argv):
'''no string
'''
pass
def glTexParameteriv(*argv):
'''no string
'''
pass
def glTranslated(*argv):
'''no string
'''
pass
def glTranslatef(*argv):
'''no string
'''
pass
def glUniform1f(*argv):
'''no string
'''
pass
def glUniform1fv(*argv):
'''no string
'''
pass
def glUniform1i(*argv):
'''no string
'''
pass
def glUniform1iv(*argv):
'''no string
'''
pass
def glUniform2f(*argv):
'''no string
'''
pass
def glUniform2fv(*argv):
'''no string
'''
pass
def glUniform2i(*argv):
'''no string
'''
pass
def glUniform2iv(*argv):
'''no string
'''
pass
def glUniform3f(*argv):
'''no string
'''
pass
def glUniform3fv(*argv):
'''no string
'''
pass
def glUniform3i(*argv):
'''no string
'''
pass
def glUniform3iv(*argv):
'''no string
'''
pass
def glUniform4f(*argv):
'''no string
'''
pass
def glUniform4fv(*argv):
'''no string
'''
pass
def glUniform4i(*argv):
'''no string
'''
pass
def glUniform4iv(*argv):
'''no string
'''
pass
def glUniformMatrix2fv(*argv):
'''no string
'''
pass
def glUniformMatrix2x3fv(*argv):
'''no string
'''
pass
def glUniformMatrix2x4fv(*argv):
'''no string
'''
pass
def glUniformMatrix3fv(*argv):
'''no string
'''
pass
def glUniformMatrix3x2fv(*argv):
'''no string
'''
pass
def glUniformMatrix3x4fv(*argv):
'''no string
'''
pass
def glUniformMatrix4fv(*argv):
'''no string
'''
pass
def glUniformMatrix4x2fv(*argv):
'''no string
'''
pass
def glUniformMatrix4x3fv(*argv):
'''no string
'''
pass
def glUseProgram(*argv):
'''no string
'''
pass
def glValidateProgram(*argv):
'''no string
'''
pass
def glVertex2d(*argv):
'''no string
'''
pass
def glVertex2dv(*argv):
'''no string
'''
pass
def glVertex2f(*argv):
'''no string
'''
pass
def glVertex2fv(*argv):
'''no string
'''
pass
def glVertex2i(*argv):
'''no string
'''
pass
def glVertex2iv(*argv):
'''no string
'''
pass
def glVertex2s(*argv):
'''no string
'''
pass
def glVertex2sv(*argv):
'''no string
'''
pass
def glVertex3d(*argv):
'''no string
'''
pass
def glVertex3dv(*argv):
'''no string
'''
pass
def glVertex3f(*argv):
'''no string
'''
pass
def glVertex3fv(*argv):
'''no string
'''
pass
def glVertex3i(*argv):
'''no string
'''
pass
def glVertex3iv(*argv):
'''no string
'''
pass
def glVertex3s(*argv):
'''no string
'''
pass
def glVertex3sv(*argv):
'''no string
'''
pass
def glVertex4d(*argv):
'''no string
'''
pass
def glVertex4dv(*argv):
'''no string
'''
pass
def glVertex4f(*argv):
'''no string
'''
pass
def glVertex4fv(*argv):
'''no string
'''
pass
def glVertex4i(*argv):
'''no string
'''
pass
def glVertex4iv(*argv):
'''no string
'''
pass
def glVertex4s(*argv):
'''no string
'''
pass
def glVertex4sv(*argv):
'''no string
'''
pass
def glViewport(*argv):
'''no string
'''
pass
def gluLookAt(*argv):
'''no string
'''
pass
def gluOrtho2D(*argv):
'''no string
'''
pass
def gluPerspective(*argv):
'''no string
'''
pass
def gluPickMatrix(*argv):
'''no string
'''
pass
def gluProject(*argv):
'''no string
'''
pass
def gluUnProject(*argv):
'''no string
'''
pass
class Buffer:
def to_list(*argv):
'''return the buffer as a list
'''
pass
dimensions = None
| 13.119177
| 57
| 0.560692
| 5,021
| 45,904
| 4.927903
| 0.222067
| 0.236431
| 0.272198
| 0.237966
| 0.400679
| 0.041466
| 0.021824
| 0.021824
| 0
| 0
| 0
| 0.066482
| 0.339077
| 45,904
| 3,498
| 58
| 13.122927
| 0.749069
| 0.316966
| 0
| 0.310084
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.310084
| false
| 0.313445
| 0
| 0
| 0.311765
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
481bd685e32984b7ef0230a75e16fe22e1500b4c
| 175
|
py
|
Python
|
exercicios_programas/ex10_repeticoes_aninhadas/exercicio5.27.py
|
robinson-1985/livro_python
|
2b94d32c81121b631a70f8c9fc443d697947ee63
|
[
"MIT"
] | null | null | null |
exercicios_programas/ex10_repeticoes_aninhadas/exercicio5.27.py
|
robinson-1985/livro_python
|
2b94d32c81121b631a70f8c9fc443d697947ee63
|
[
"MIT"
] | null | null | null |
exercicios_programas/ex10_repeticoes_aninhadas/exercicio5.27.py
|
robinson-1985/livro_python
|
2b94d32c81121b631a70f8c9fc443d697947ee63
|
[
"MIT"
] | null | null | null |
''' 5.27 Escreva um programa que verifique se um número é palíndromo. Um número é
palíndromo se continua o mesmo caso seus dígitos sejam invertidos. Exemplos: 454, 10501. '''
| 58.333333
| 92
| 0.76
| 28
| 175
| 4.785714
| 0.785714
| 0.119403
| 0.134328
| 0.283582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075862
| 0.171429
| 175
| 2
| 93
| 87.5
| 0.841379
| 0.948571
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
48403bab1cee3a7d5c1c5d57394f1c352ac8123a
| 35,488
|
py
|
Python
|
src/pruning_layers/core_layers.py
|
ivclab/PackExpander
|
81b8e832018f60fc678883f3025c39cb1d289e27
|
[
"MIT"
] | 25
|
2019-03-28T09:02:44.000Z
|
2022-02-11T15:30:50.000Z
|
src/pruning_layers/core_layers.py
|
ivclab/PackExpander
|
81b8e832018f60fc678883f3025c39cb1d289e27
|
[
"MIT"
] | 2
|
2020-10-29T06:16:16.000Z
|
2021-01-04T02:23:04.000Z
|
src/pruning_layers/core_layers.py
|
ivclab/PackExpander
|
81b8e832018f60fc678883f3025c39cb1d289e27
|
[
"MIT"
] | 7
|
2020-03-26T05:39:30.000Z
|
2021-07-30T09:12:42.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the core layer classes for model pruning and its functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import pdb
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import control_flow_ops
import pdb
MASK_COLLECTION = 'masks'
THRESHOLD_COLLECTION = 'thresholds'
MASKED_WEIGHT_COLLECTION = 'masked_weights'
WEIGHT_COLLECTION = 'kernel'
# The 'weights' part of the name is needed for the quantization library
# to recognize that the kernel should be quantized.
MASKED_WEIGHT_NAME = 'weights/masked_weight'
FLAGS = tf.app.flags.FLAGS
def change_mask(mask, task_id=1, open_ratio=0.2, cell_scope_name=''):
if cell_scope_name not in mask.op.name:
return control_flow_ops.no_op()
# from 0 to 1, use setting probability to decide whether open the rest space to train
# with tf.device('/cpu:0'):
mask_in_gpu = tf.cast(mask, dtype=tf.int32) # GPU only support int32, int64, not int8
task_id_mask = tf.constant(value=task_id, shape=mask_in_gpu.shape, dtype=tf.int32) # GPU only support int32, int64, not int8
false_mask = tf.constant(value=False, shape=mask_in_gpu.shape, dtype=tf.bool)
with tf.device('/cpu:0'):
random_open_mask = tf.random_uniform(shape=mask.shape, minval=0.0, maxval=1.0, dtype=tf.float32) # if we don't put this op to cpu, error occurs!
select_mask = tf.where(tf.equal(mask, 0), tf.less(random_open_mask, open_ratio), false_mask)
if task_id == 1:
return tf.cast(mask.assign(tf.constant(value=1, shape=mask.shape, dtype=tf.int8)), dtype=tf.bool)
else:
return tf.cast(mask.assign(tf.cast(tf.where(select_mask, task_id_mask, mask_in_gpu), dtype=tf.int8)), dtype=tf.bool) # select part of elems in mask and change mask value 0.0 to new task_id
def change_mask_and_weight(mask, weight, task_id=1, open_ratio=0.2, cell_scope_name=''):
if cell_scope_name not in mask.op.name:
return control_flow_ops.no_op()
# from 0 to 1, use setting probability to decide whether open the rest space to train
# with tf.device('/cpu:0'):
mask_in_gpu = tf.cast(mask, dtype=tf.int32) # GPU only support int32, int64, not int8
task_id_mask = tf.constant(value=task_id, shape=mask_in_gpu.shape, dtype=tf.int32) # GPU only support int32, int64, not int8
false_mask = tf.constant(value=False, shape=mask_in_gpu.shape, dtype=tf.bool)
random_weight = tf.random_normal(shape=weight.shape, stddev=0.01, dtype=tf.float32)
with tf.device('/cpu:0'):
random_open_mask = tf.random_uniform(shape=mask.shape, minval=0.0, maxval=1.0, dtype=tf.float32) # if we don't put this op to cpu, error occurs!
select_mask = tf.where(tf.equal(mask, 0), tf.less(random_open_mask, open_ratio), false_mask)
if task_id == 1:
return tf.cast(mask.assign(tf.constant(value=1, shape=mask.shape, dtype=tf.int8)), dtype=tf.bool)
else:
return tf.logical_and(
tf.cast(mask.assign(tf.cast(tf.where(select_mask, task_id_mask, mask_in_gpu), dtype=tf.int8)), dtype=tf.bool), # select part of elems in mask and change mask value 0.0 to new task_id
tf.cast(weight.assign(tf.where(select_mask, random_weight, weight)), dtype=tf.bool) # assign these part of elems in mask with new task_id the new random weights
)
class _MaskedConv(base.Layer):
"""Abstract nD convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. The weight tensor of this layer is masked.
If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
task_id = 1,
**kwargs):
super(_MaskedConv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = utils.normalize_tuple(strides, rank, 'strides')
self.padding = utils.normalize_padding(padding)
self.data_format = utils.normalize_data_format(data_format)
self.dilation_rate = utils.normalize_tuple(dilation_rate, rank,
'dilation_rate')
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.input_spec = base.InputSpec(ndim=self.rank + 2)
self.task_id = task_id
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = 1 if self.data_format == 'channels_first' else -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.mask = self.add_variable(
name='mask',
shape=kernel_shape,
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=tf.int8)
self.kernel = self.add_variable(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
self.threshold = self.add_variable(
name='threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
if FLAGS.reset_weights_in_new_locations and FLAGS.open_ratio:
conditional_op = change_mask_and_weight(self.mask, self.kernel, self.task_id, FLAGS.open_ratio, FLAGS.cell_scope_to_be_assigned_current_task_id)
elif FLAGS.open_ratio:
conditional_op = change_mask(self.mask, self.task_id, FLAGS.open_ratio, FLAGS.cell_scope_to_be_assigned_current_task_id)
else:
conditional_op = control_flow_ops.no_op()
# conditional_op = control_flow_ops.cond(
# manually_set_zeros_to_task_id(),
# lambda: control_flow_ops.no_op(),
# lambda: change_mask_and_weight(self.mask, self.kernel, self.task_id, FLAGS.open_ratio))
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
with tf.control_dependencies([conditional_op]):
if FLAGS.share_only_task_1:
boolean_mask = tf.cast(
tf.logical_or(
tf.equal(tf.identity(self.mask), 1),
tf.equal(tf.identity(self.mask), self.task_id)),
dtype=tf.float32)
else:
boolean_mask = tf.cast(
tf.logical_and(
tf.greater_equal(tf.identity(self.mask), 1),
tf.less_equal(tf.identity(self.mask), self.task_id)),
dtype=tf.float32)
self.masked_kernel = math_ops.multiply(boolean_mask, self.kernel,
MASKED_WEIGHT_NAME)
if self.mask not in ops.get_collection_ref(MASK_COLLECTION):
ops.add_to_collection(MASK_COLLECTION, self.mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.kernel)
if self.use_bias:
original_scope = self._scope
with tf.variable_scope('task_{}'.format(self.task_id)) as scope: # Because there are multi-task problems
self._scope = scope
self.bias = self.add_variable(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
self._scope = original_scope
else:
self.bias = None
self.input_spec = base.InputSpec(
ndim=self.rank + 2, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
outputs = nn.convolution(
input=inputs,
filter=self.masked_kernel,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=utils.convert_data_format(self.data_format, self.rank + 2))
if self.bias is not None:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
if self.rank == 2:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
if self.rank == 3:
# As of Mar 2017, direct addition is significantly slower than
# bias_add when computing gradients. To use bias_add, we collapse Z
# and Y into a single dimension to obtain a 4D input tensor.
outputs_shape = outputs.shape.as_list()
outputs_4d = array_ops.reshape(outputs, [
outputs_shape[0], outputs_shape[1],
outputs_shape[2] * outputs_shape[3], outputs_shape[4]
])
outputs_4d = nn.bias_add(outputs_4d, self.bias, data_format='NCHW')
outputs = array_ops.reshape(outputs_4d, outputs_shape)
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
class MaskedSeparableConv2D(_MaskedConv):
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
depth_multiplier=1, # only separable conv has
activation=None,
use_bias=True,
depthwise_initializer='global_uniform', # only separable conv has
pointwise_initializer='global_uniform', # only separable conv has
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_initializer=init_ops.zeros_initializer(),
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
task_id = 1,
**kwargs):
super(MaskedSeparableConv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
bias_initializer=bias_initializer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
task_id=task_id,
**kwargs)
# only seperate conv have
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = depthwise_initializer
self.pointwise_initializer = pointwise_initializer
self.depthwise_regularizer = depthwise_regularizer
self.pointwise_regularizer = pointwise_regularizer
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = 1 if self.data_format == 'channels_first' else -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
depthwise_kernel_shape = self.kernel_size + (input_dim,
self.depth_multiplier)
pointwise_kernel_shape = (
1,) * self.rank + (self.depth_multiplier * input_dim, self.filters)
self.depthwise_mask = self.add_variable(
name='depthwise_mask',
shape=depthwise_kernel_shape,
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=tf.int8)
self.depthwise_kernel = self.add_variable(
name='depthwise_kernel',
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
regularizer=self.depthwise_regularizer,
trainable=True,
dtype=self.dtype)
self.depthwise_threshold = self.add_variable(
name='depthwise_threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
self.pointwise_mask = self.add_variable(
name='pointwise_mask',
shape=pointwise_kernel_shape,
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=tf.int8)
self.pointwise_kernel = self.add_variable(
name='pointwise_kernel',
shape=pointwise_kernel_shape,
initializer=self.pointwise_initializer,
regularizer=self.pointwise_regularizer,
trainable=True,
dtype=self.dtype)
self.pointwise_threshold = self.add_variable(
name='pointwise_threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
if FLAGS.reset_weights_in_new_locations and FLAGS.open_ratio:
depthwise_conditional_op = change_mask_and_weight(self.depthwise_mask, self.depthwise_kernel, self.task_id, FLAGS.open_ratio, FLAGS.cell_scope_to_be_assigned_current_task_id)
elif FLAGS.open_ratio:
depthwise_conditional_op = change_mask(self.depthwise_mask, self.task_id, FLAGS.open_ratio, FLAGS.cell_scope_to_be_assigned_current_task_id)
else:
depthwise_conditional_op = control_flow_ops.no_op()
# depthwise_conditional_op = control_flow_ops.cond(
# manually_set_zeros_to_task_id(),
# lambda: control_flow_ops.no_op(),
# lambda: change_mask_and_weight(self.depthwise_mask,
# self.depthwise_kernel, self.task_id, FLAGS.open_ratio))
if FLAGS.reset_weights_in_new_locations and FLAGS.open_ratio:
pointwise_conditional_op = change_mask_and_weight(self.pointwise_mask, self.pointwise_kernel, self.task_id, FLAGS.open_ratio, FLAGS.cell_scope_to_be_assigned_current_task_id)
elif FLAGS.open_ratio:
pointwise_conditional_op = change_mask(self.pointwise_mask, self.task_id, FLAGS.open_ratio, FLAGS.cell_scope_to_be_assigned_current_task_id)
else:
pointwise_conditional_op = control_flow_ops.no_op()
# pointwise_conditional_op = control_flow_ops.cond(
# manually_set_zeros_to_task_id(),
# lambda: control_flow_ops.no_op(),
# lambda: change_mask_and_weight(self.pointwise_mask,
# self.pointwise_kernel, self.task_id, FLAGS.open_ratio))
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
with tf.control_dependencies([depthwise_conditional_op,
pointwise_conditional_op]):
if FLAGS.share_only_task_1:
depthwise_boolean_mask = tf.cast(
tf.logical_or(
tf.equal(tf.identity(self.depthwise_mask), 1),
tf.equal(tf.identity(self.depthwise_mask), self.task_id)),
dtype=tf.float32)
pointwise_boolean_mask = tf.cast(
tf.logical_or(
tf.equal(tf.identity(self.pointwise_mask), 1),
tf.equal(tf.identity(self.pointwise_mask), self.task_id)),
dtype=tf.float32)
else:
depthwise_boolean_mask = tf.cast(
tf.logical_and(
tf.greater_equal(tf.identity(self.depthwise_mask), 1),
tf.less_equal(tf.identity(self.depthwise_mask), self.task_id)),
dtype=tf.float32)
pointwise_boolean_mask = tf.cast(
tf.logical_and(
tf.greater_equal(tf.identity(self.pointwise_mask), 1),
tf.less_equal(tf.identity(self.pointwise_mask), self.task_id)),
dtype=tf.float32)
self.masked_depthwise_kernel = math_ops.multiply(depthwise_boolean_mask,
self.depthwise_kernel,
MASKED_WEIGHT_NAME)
self.masked_pointwise_kernel = math_ops.multiply(pointwise_boolean_mask,
self.pointwise_kernel,
MASKED_WEIGHT_NAME)
if self.depthwise_mask not in ops.get_collection_ref(MASK_COLLECTION):
ops.add_to_collection(MASK_COLLECTION, self.depthwise_mask)
ops.add_to_collection(MASK_COLLECTION, self.pointwise_mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_depthwise_kernel)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_pointwise_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.depthwise_threshold)
ops.add_to_collection(THRESHOLD_COLLECTION, self.pointwise_threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.depthwise_kernel)
ops.add_to_collection(WEIGHT_COLLECTION, self.pointwise_kernel)
if self.use_bias:
original_scope = self._scope
with tf.variable_scope('task_{}'.format(self.task_id)) as scope: # Because there are multi-task problems
self._scope = scope
self.bias = self.add_variable(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
self._scope = original_scope
else:
self.bias = None
self.input_spec = base.InputSpec(
ndim=self.rank + 2, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
# Apply the actual ops.
if self.data_format == 'channels_last':
strides = (1,) + self.strides + (1,)
else:
strides = (1, 1) + self.strides
outputs = nn.separable_conv2d(
inputs,
self.masked_depthwise_kernel,
self.masked_pointwise_kernel,
strides=strides,
padding=self.padding.upper(),
rate=self.dilation_rate,
data_format=utils.convert_data_format(self.data_format, ndim=4))
if self.bias is not None:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
if self.rank == 2:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
if self.rank == 3:
# As of Mar 2017, direct addition is significantly slower than
# bias_add when computing gradients. To use bias_add, we collapse Z
# and Y into a single dimension to obtain a 4D input tensor.
outputs_shape = outputs.shape.as_list()
outputs_4d = array_ops.reshape(outputs, [
outputs_shape[0], outputs_shape[1],
outputs_shape[2] * outputs_shape[3], outputs_shape[4]
])
outputs_4d = nn.bias_add(outputs_4d, self.bias, data_format='NCHW')
outputs = array_ops.reshape(outputs_4d, outputs_shape)
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
class MaskedConv2D(_MaskedConv):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
task_id=1,
**kwargs):
super(MaskedConv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
task_id=task_id,
**kwargs)
class MaskedFullyConnected(base.Layer):
"""Fully-connected layer class with masked weights.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the weight matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the weight matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
task_id=1,
**kwargs):
super(MaskedFullyConnected, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.input_spec = base.InputSpec(min_ndim=2)
self.task_id = task_id
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(
min_ndim=2, axes={-1: input_shape[-1].value})
self.mask = self.add_variable(
name='mask',
shape=[input_shape[-1].value, self.units],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=tf.int8)
self.kernel = self.add_variable(
'kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
dtype=self.dtype,
trainable=True)
self.threshold = self.add_variable(
name='threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
if FLAGS.reset_weights_in_new_locations and FLAGS.open_ratio:
conditional_op = change_mask_and_weight(self.mask, self.kernel, self.task_id, FLAGS.open_ratio, FLAGS.cell_scope_to_be_assigned_current_task_id)
elif FLAGS.open_ratio:
conditional_op = change_mask(self.mask, self.task_id, FLAGS.open_ratio, FLAGS.cell_scope_to_be_assigned_current_task_id)
else:
conditional_op = control_flow_ops.no_op()
# conditional_op = control_flow_ops.cond(
# manually_set_zeros_to_task_id(),
# lambda: control_flow_ops.no_op(),
# lambda: change_mask_and_weight(self.mask, self.kernel, self.task_id, FLAGS.open_ratio))
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
with tf.control_dependencies([conditional_op]):
if FLAGS.share_only_task_1:
boolean_mask = tf.cast(
tf.logical_or(
tf.equal(tf.identity(self.mask), 1),
tf.equal(tf.identity(self.mask), self.task_id)),
dtype=tf.float32)
else:
boolean_mask = tf.cast(
tf.logical_and(
tf.greater_equal(tf.identity(self.mask), 1),
tf.less_equal(tf.identity(self.mask), self.task_id)),
dtype=tf.float32)
self.masked_kernel = math_ops.multiply(boolean_mask, self.kernel,
MASKED_WEIGHT_NAME)
if self.mask not in ops.get_collection_ref(MASK_COLLECTION):
ops.add_to_collection(MASK_COLLECTION, self.mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.kernel)
if self.use_bias:
original_scope = self._scope
with tf.variable_scope('task_{}'.format(self.task_id)) as scope: # Because there are multi-task problems
self._scope = scope
self.bias = self.add_variable(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
self._scope = original_scope
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.masked_kernel,
[[len(shape) - 1], [0]])
# Reshape the output back to the original ndim of the input.
outputs.set_shape(output_shape)
else:
outputs = standard_ops.matmul(inputs, self.masked_kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
| 42.705174
| 196
| 0.672453
| 4,576
| 35,488
| 4.996504
| 0.089379
| 0.015745
| 0.010934
| 0.013296
| 0.798373
| 0.757873
| 0.738672
| 0.711162
| 0.680283
| 0.667337
| 0
| 0.008279
| 0.241011
| 35,488
| 830
| 197
| 42.756627
| 0.840579
| 0.290352
| 0
| 0.72935
| 0
| 0
| 0.029575
| 0.000845
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024605
| false
| 0
| 0.02812
| 0
| 0.086116
| 0.001757
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
48772bc6be67c64dcca30600d45c0ee39c37c7e1
| 10,607
|
py
|
Python
|
tests/test_dynamicalsystem.py
|
albapa/QUIP
|
ecde1e332c6bd62c238d3cd90e31dba4fb390313
|
[
"NRL"
] | 229
|
2015-01-20T16:35:59.000Z
|
2022-03-24T10:44:32.000Z
|
tests/test_dynamicalsystem.py
|
albapa/QUIP
|
ecde1e332c6bd62c238d3cd90e31dba4fb390313
|
[
"NRL"
] | 356
|
2015-05-29T08:28:59.000Z
|
2022-03-31T22:55:34.000Z
|
tests/test_dynamicalsystem.py
|
albapa/QUIP
|
ecde1e332c6bd62c238d3cd90e31dba4fb390313
|
[
"NRL"
] | 106
|
2015-01-21T12:56:29.000Z
|
2022-03-25T08:39:24.000Z
|
# HQ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# HQ X
# HQ X quippy: Python interface to QUIP atomistic simulation library
# HQ X
# HQ X Copyright James Kermode 2010
# HQ X
# HQ X These portions of the source code are released under the GNU General
# HQ X Public License, version 2, http://www.gnu.org/copyleft/gpl.html
# HQ X
# HQ X If you would like to license the source code under different terms,
# HQ X please contact James Kermode, james.kermode@gmail.com
# HQ X
# HQ X When using this software, please cite the following reference:
# HQ X
# HQ X http://www.jrkermode.co.uk/quippy
# HQ X
# HQ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
"""
This module defines the :class:`DynamicalSystem` and :class:`Dynamics`
classes, for carrying out molecular dynamics simulations.
.. module:: quippy.dynamicalsystem
:synopsis: Run molecular dynamics simulations
"""
import quippy
import numpy as np
import unittest
import quippytest
import ase.io
# class TestDynamicalSystem(quippytest.QuippyTestCase):
# def setUp(self):
# self.at = quippy.convert.ase_to_quip(ase.io.read('atoms_dynamicalsystem.xyz'))
# self.ds = quippy.dynamicalsystem.DynamicalSystem(self.at)
#
# # def test_atoms(self):
# # self.assertEqual(self.ds.atoms, self.at)
#
# def test_avgpos(self):
# self.assertArrayAlmostEqual(self.ds.atoms.avgpos, self.ds.atoms.pos)
#
# def test_avg_ke(self):
# self.assertArrayAlmostEqual(self.ds.atoms.avg_ke, 0.5 * self.ds.atoms.mass * self.ds.atoms.velo.norm2())
#
# def test_set_masses(self):
# """
# Test for regressions to GitHub issue #25
# """
# at = self.at.copy()
# at.add_property('mass', 0.0)
# at.mass[:] = at.get_masses() * quippy.convert.MASSCONVERT
# mass1 = at.mass.copy()
# ds = quippy.dynamicalsystem.Dynamics(at) # should not change masses
# self.assertArrayAlmostEqual(mass1, ds.atoms.mass)
#
#
# class GenericTestDynamics(object):
# def common_init(self):
# random.seed(1)
# system_reseed_rng(2065775975)
# self.at1 = diamond(5.44, 14)
# # Explicitly set the mass as IUPAC updates in
# # ase will not always sync with quippy and can
# # slighly change the final positions.
# self.at1.set_masses([28.085] * len(self.at1))
# self.at1.rattle(0.01)
# self.at2 = ase.Atoms(self.at1)
# self.dt = 1.0 # fs
# self.skin = 2.0
# self.pot = quippy.potential.Potential('IP SW', cutoff_skin=self.skin)
# self.orig_at = self.at1.copy()
# self.at1.set_calculator(self.pot)
# self.at2.set_calculator(self.pot)
# self.ds = DynamicalSystem(self.at1)
# self.dyn = Dynamics(self.at2, self.dt * fs, trajectory=None, logfile=None)
#
# def test_final_state(self):
# # print 'pos_ref = %r' % self.ds.atoms.get_positions()
# # print
# # print 'velo_ref = %r' % array(self.ds.atoms.velo.T*sqrt(MASSCONVERT))
# # print
#
# self.assertArrayAlmostEqual(self.ds.atoms.get_positions(), self.pos_ref)
# self.assertArrayAlmostEqual(self.dyn.atoms.get_positions(), self.pos_ref)
# self.assertArrayAlmostEqual(self.ds.atoms.velo.T * sqrt(MASSCONVERT), self.velo_ref)
# self.assertArrayAlmostEqual(self.dyn.atoms.get_velocities(), self.velo_ref)
#
#
# class TestDynamics_1step(GenericTestDynamics, quippytest.QuippyTestCase):
# pos_ref = array([[4.97540451e-03, -1.38126112e-03, 6.45355046e-03],
# [1.37520514e+00, 1.35768022e+00, 1.35766373e+00],
# [2.73574542e+00, 2.72766112e+00, -4.70585816e-03],
# [4.08545164e+00, 4.07535031e+00, 1.35535781e+00],
# [2.72242689e+00, -1.90944563e-02, 2.70278269e+00],
# [4.07437528e+00, 1.34984705e+00, 4.08313749e+00],
# [-9.05352772e-03, 2.70588380e+00, 2.73460483e+00],
# [1.35774766e+00, 4.08066033e+00, 4.06579153e+00]])
#
# velo_ref = array([[1.67917869e-04, 2.79017525e-05, -4.74029119e-04],
# [-5.11506283e-04, 4.41800437e-04, 1.03394949e-04],
# [-9.48996805e-04, -2.69235922e-04, -2.25428551e-04],
# [5.28728510e-04, -3.14943810e-04, 3.06554333e-04],
# [1.47525100e-04, 7.79166916e-04, 6.47972422e-04],
# [-3.70488567e-05, -4.99795865e-04, -1.01584331e-04],
# [5.43109420e-04, 1.38852057e-04, -1.04929014e-03],
# [1.10271046e-04, -3.03745565e-04, 7.92410433e-04]])
#
# def setUp(self):
# self.common_init()
#
# # advance one step with the DynamicalSystem + quippy Atoms
# self.ds.atoms.set_cutoff(self.pot.cutoff(), cutoff_skin=self.skin)
# self.ds.atoms.calc_connect()
#
# # use initial forces to set initial acceleration
# self.pot.calc(self.ds.atoms, args_str='force')
# self.ds.atoms.acc[...] = self.ds.atoms.force / self.ds.atoms.mass
#
# self.ds.advance_verlet1(self.dt)
# self.pot.calc(self.ds.atoms, args_str='force')
# self.ds.advance_verlet2(self.dt, self.ds.atoms.force)
#
# # Advance one step with the Dynamics wrapper
# f0 = self.dyn.atoms.get_forces()
# self.f1 = self.dyn.step(f0)
#
#
# class TestDynamics_50steps(GenericTestDynamics, quippytest.QuippyTestCase):
# pos_ref = array([[3.83982027e-03, -6.83299035e-03, -3.84829791e-03],
# [1.35831458e+00, 1.35029772e+00, 1.35533926e+00],
# [2.72050579e+00, 2.70597502e+00, 1.17293724e-03],
# [4.07926508e+00, 4.07607954e+00, 1.35518711e+00],
# [2.71969587e+00, -1.78006885e-03, 2.72309180e+00],
# [4.09123475e+00, 1.36339170e+00, 4.07478635e+00],
# [9.79061494e-03, 2.71688891e+00, 2.71690897e+00],
# [1.36422741e+00, 4.07258729e+00, 4.07844764e+00]])
#
# velo_ref = array([[-5.14500838e-03, -2.82954861e-03, 7.20818721e-04],
# [1.53654932e-03, -1.11170527e-02, 1.47967446e-05],
# [8.72682542e-03, -2.62427183e-03, 3.88571704e-03],
# [-1.07552557e-02, 4.18162297e-03, -1.83155420e-03],
# [-1.11039864e-03, -3.68321162e-03, -2.84845292e-03],
# [6.19661562e-03, 1.02390323e-02, -4.67696227e-04],
# [-2.05461501e-03, 5.34887189e-03, 4.60654485e-03],
# [2.60528742e-03, 4.84557566e-04, -4.08017401e-03]])
#
# def setUp(self):
# self.common_init()
#
# # advance 50 steps with the DynamicalSystem + quippy Atoms
# verbosity_push(PRINT_SILENT)
# self.ds.run(self.pot, dt=self.dt, n_steps=50)
# verbosity_pop()
#
# # Advance 50 steps with the Dynamics wrapper
# self.dyn.run(50)
#
#
# class TestDynamics_InitTemperature(GenericTestDynamics, quippytest.QuippyTestCase):
# pos_ref = array([[0.05803303, -0.04190969, 0.00749258],
# [1.34873105, 1.40383634, 1.36193793],
# [2.74431719, 2.72345406, -0.01449615],
# [4.09026562, 4.06058634, 1.42717456],
# [2.63832671, -0.01874479, 2.64817844],
# [4.08304536, 1.3141336, 4.04176215],
# [0.01976151, 2.752241, 2.78688919],
# [1.36439344, 4.08301027, 4.04214707]])
#
# velo_ref = array([[0.00601649, -0.01855508, 0.00017399],
# [0.00595795, -0.02294921, 0.02800064],
# [-0.02460223, 0.01526762, 0.01087747],
# [-0.00900424, 0.00726531, 0.01150711],
# [-0.01207218, -0.02462552, 0.01588084],
# [0.01408512, 0.02512257, -0.02771789],
# [0.02072795, 0.02207844, -0.00283951],
# [-0.00110884, -0.00360411, -0.03588265]])
#
# def setUp(self):
# self.common_init()
#
# # choose some random velocities for one case
# self.dyn.set_temperature(300.)
#
# # use same random initial velocities
# self.ds.atoms.velo[...] = transpose(self.dyn.atoms.get_velocities() / sqrt(MASSCONVERT))
#
# # advance 50 steps with the DynamicalSystem + quippy Atoms
# verbosity_push(PRINT_SILENT)
# self.ds.run(self.pot, dt=self.dt, n_steps=50)
# verbosity_pop()
#
# # Advance 50 steps with the Dynamics wrapper
# self.dyn.run(50)
#
#
# class TestDynamics_Thermostat(GenericTestDynamics, quippytest.QuippyTestCase):
# pos_ref = array([[1.04158945e-02, -9.87095573e-04, 4.63078398e-04],
# [1.37069437e+00, 1.35732274e+00, 1.36231260e+00],
# [2.73317987e+00, 2.72473320e+00, -2.53175180e-03],
# [4.09431672e+00, 4.07259172e+00, 1.34964664e+00],
# [2.72208620e+00, -1.54533813e-02, 2.70315737e+00],
# [4.07948880e+00, 1.34575563e+00, 4.07878517e+00],
# [-3.89972060e-03, 2.71244938e+00, 2.72906452e+00],
# [1.35610951e+00, 4.08095355e+00, 4.06509952e+00]])
#
# velo_ref = array([[0.01003832, -0.00199037, -0.01122285],
# [-0.0043836, 0.00514716, 0.01096999],
# [-0.00706223, -0.00601213, 0.0024837],
# [0.01965012, -0.00356979, -0.00803952],
# [-0.0028455, 0.00181915, -0.00144307],
# [0.00515436, -0.00648117, -0.01035976],
# [0.00507576, 0.0088829, -0.00847764],
# [0.00127142, -0.00010429, 0.00312452]])
#
# def setUp(self):
# self.common_init()
#
# system_reseed_rng(2065775975)
# self.ds.add_thermostat(THERMOSTAT_LANGEVIN, 300.0, tau=500.0)
#
# # advance 10 steps with the DynamicalSystem + quippy Atoms
# verbosity_push(PRINT_SILENT)
# self.ds.run(self.pot, dt=self.dt, n_steps=10)
# verbosity_pop()
#
# # we need same sequence of thermostat random forces
# system_reseed_rng(2065775975)
# self.dyn.add_thermostat(THERMOSTAT_LANGEVIN, 300.0, tau=500.0)
#
# # Advance 10 steps with the Dynamics wrapper
# self.dyn.run(10)
#
#
# if __name__ == '__main__':
# unittest.main()
| 44.195833
| 114
| 0.584897
| 1,305
| 10,607
| 4.688123
| 0.321839
| 0.026479
| 0.034161
| 0.005884
| 0.273128
| 0.219189
| 0.155116
| 0.12831
| 0.124224
| 0.10559
| 0
| 0.269449
| 0.270482
| 10,607
| 239
| 115
| 44.380753
| 0.521194
| 0.947016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
487e0484e7603598d799efcc52f13216f81ff2b2
| 37,256
|
py
|
Python
|
sdk/python/pulumi_aws/elasticache/replication_group.py
|
mdop-wh/pulumi-aws
|
05bb32e9d694dde1c3b76d440fd2cd0344d23376
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/elasticache/replication_group.py
|
mdop-wh/pulumi-aws
|
05bb32e9d694dde1c3b76d440fd2cd0344d23376
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/elasticache/replication_group.py
|
mdop-wh/pulumi-aws
|
05bb32e9d694dde1c3b76d440fd2cd0344d23376
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ReplicationGroup']
class ReplicationGroup(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
apply_immediately: Optional[pulumi.Input[bool]] = None,
at_rest_encryption_enabled: Optional[pulumi.Input[bool]] = None,
auth_token: Optional[pulumi.Input[str]] = None,
auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,
automatic_failover_enabled: Optional[pulumi.Input[bool]] = None,
availability_zones: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
cluster_mode: Optional[pulumi.Input[pulumi.InputType['ReplicationGroupClusterModeArgs']]] = None,
engine: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
maintenance_window: Optional[pulumi.Input[str]] = None,
node_type: Optional[pulumi.Input[str]] = None,
notification_topic_arn: Optional[pulumi.Input[str]] = None,
number_cache_clusters: Optional[pulumi.Input[float]] = None,
parameter_group_name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[float]] = None,
replication_group_description: Optional[pulumi.Input[str]] = None,
replication_group_id: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
security_group_names: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
snapshot_arns: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
snapshot_retention_limit: Optional[pulumi.Input[float]] = None,
snapshot_window: Optional[pulumi.Input[str]] = None,
subnet_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
transit_encryption_enabled: Optional[pulumi.Input[bool]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides an ElastiCache Replication Group resource.
For working with Memcached or single primary Redis instances (Cluster Mode Disabled), see the
`elasticache.Cluster` resource.
> **Note:** When you change an attribute, such as `engine_version`, by
default the ElastiCache API applies it in the next maintenance window. Because
of this, this provider may report a difference in its planning phase because the
actual modification has not yet taken place. You can use the
`apply_immediately` flag to instruct the service to apply the change
immediately. Using `apply_immediately` can result in a brief downtime as
servers reboots.
## Example Usage
### Redis Cluster Mode Disabled
To create a single shard primary with single read replica:
```python
import pulumi
import pulumi_aws as aws
example = aws.elasticache.ReplicationGroup("example",
automatic_failover_enabled=True,
availability_zones=[
"us-west-2a",
"us-west-2b",
],
node_type="cache.m4.large",
number_cache_clusters=2,
parameter_group_name="default.redis3.2",
port=6379,
replication_group_description="test description")
```
You have two options for adjusting the number of replicas:
* Adjusting `number_cache_clusters` directly. This will attempt to automatically add or remove replicas, but provides no granular control (e.g. preferred availability zone, cache cluster ID) for the added or removed replicas. This also currently expects cache cluster IDs in the form of `replication_group_id-00#`.
* Otherwise for fine grained control of the underlying cache clusters, they can be added or removed with the `elasticache.Cluster` resource and its `replication_group_id` attribute. In this situation, you will need to utilize [`ignoreChanges`](https://www.pulumi.com/docs/intro/concepts/programming-model/#ignorechanges) to prevent perpetual differences with the `number_cache_cluster` attribute.
```python
import pulumi
import pulumi_aws as aws
example = aws.elasticache.ReplicationGroup("example",
automatic_failover_enabled=True,
availability_zones=[
"us-west-2a",
"us-west-2b",
],
replication_group_description="test description",
node_type="cache.m4.large",
number_cache_clusters=2,
parameter_group_name="default.redis3.2",
port=6379)
replica = None
if 1 == True:
replica = aws.elasticache.Cluster("replica", replication_group_id=example.id)
```
### Redis Cluster Mode Enabled
To create two shards with a primary and a single read replica each:
```python
import pulumi
import pulumi_aws as aws
baz = aws.elasticache.ReplicationGroup("baz",
automatic_failover_enabled=True,
cluster_mode=aws.elasticache.ReplicationGroupClusterModeArgs(
num_node_groups=2,
replicas_per_node_group=1,
),
node_type="cache.t2.small",
parameter_group_name="default.redis3.2.cluster.on",
port=6379,
replication_group_description="test description")
```
> **Note:** We currently do not support passing a `primary_cluster_id` in order to create the Replication Group.
> **Note:** Automatic Failover is unavailable for Redis versions earlier than 2.8.6,
and unavailable on T1 node types. For T2 node types, it is only available on Redis version 3.2.4 or later with cluster mode enabled. See the [High Availability Using Replication Groups](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Replication.html) guide
for full details on using Replication Groups.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] apply_immediately: Specifies whether any modifications are applied immediately, or during the next maintenance window. Default is `false`.
:param pulumi.Input[bool] at_rest_encryption_enabled: Whether to enable encryption at rest.
:param pulumi.Input[str] auth_token: The password used to access a password protected server. Can be specified only if `transit_encryption_enabled = true`.
:param pulumi.Input[bool] auto_minor_version_upgrade: Specifies whether a minor engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. Defaults to `true`.
:param pulumi.Input[bool] automatic_failover_enabled: Specifies whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ is disabled for this replication group. Must be enabled for Redis (cluster mode enabled) replication groups. Defaults to `false`.
:param pulumi.Input[List[pulumi.Input[str]]] availability_zones: A list of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is not important.
:param pulumi.Input[pulumi.InputType['ReplicationGroupClusterModeArgs']] cluster_mode: Create a native redis cluster. `automatic_failover_enabled` must be set to true. Cluster Mode documented below. Only 1 `cluster_mode` block is allowed.
:param pulumi.Input[str] engine: The name of the cache engine to be used for the clusters in this replication group. e.g. `redis`
:param pulumi.Input[str] engine_version: The version number of the cache engine to be used for the cache clusters in this replication group.
:param pulumi.Input[str] kms_key_id: The ARN of the key that you wish to use if encrypting at rest. If not supplied, uses service managed encryption. Can be specified only if `at_rest_encryption_enabled = true`.
:param pulumi.Input[str] maintenance_window: Specifies the weekly time range for when maintenance
on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC).
The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`
:param pulumi.Input[str] node_type: The compute and memory capacity of the nodes in the node group.
:param pulumi.Input[str] notification_topic_arn: An Amazon Resource Name (ARN) of an
SNS topic to send ElastiCache notifications to. Example:
`arn:aws:sns:us-east-1:012345678999:my_sns_topic`
:param pulumi.Input[float] number_cache_clusters: The number of cache clusters (primary and replicas) this replication group will have. If Multi-AZ is enabled, the value of this parameter must be at least 2. Updates will occur before other modifications.
:param pulumi.Input[str] parameter_group_name: The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.
:param pulumi.Input[float] port: The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379.
:param pulumi.Input[str] replication_group_description: A user-created description for the replication group.
:param pulumi.Input[str] replication_group_id: The replication group identifier. This parameter is stored as a lowercase string.
:param pulumi.Input[List[pulumi.Input[str]]] security_group_ids: One or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud
:param pulumi.Input[List[pulumi.Input[str]]] security_group_names: A list of cache security group names to associate with this replication group.
:param pulumi.Input[List[pulumi.Input[str]]] snapshot_arns: A single-element string list containing an
Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
Example: `arn:aws:s3:::my_bucket/snapshot1.rdb`
:param pulumi.Input[str] snapshot_name: The name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource.
:param pulumi.Input[float] snapshot_retention_limit: The number of days for which ElastiCache will
retain automatic cache cluster snapshots before deleting them. For example, if you set
SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days
before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro or cache.t2.* cache nodes
:param pulumi.Input[str] snapshot_window: The daily time range (in UTC) during which ElastiCache will
begin taking a daily snapshot of your cache cluster. The minimum snapshot window is a 60 minute period. Example: `05:00-09:00`
:param pulumi.Input[str] subnet_group_name: The name of the cache subnet group to be used for the replication group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource
:param pulumi.Input[bool] transit_encryption_enabled: Whether to enable encryption in transit.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['apply_immediately'] = apply_immediately
__props__['at_rest_encryption_enabled'] = at_rest_encryption_enabled
__props__['auth_token'] = auth_token
__props__['auto_minor_version_upgrade'] = auto_minor_version_upgrade
__props__['automatic_failover_enabled'] = automatic_failover_enabled
__props__['availability_zones'] = availability_zones
__props__['cluster_mode'] = cluster_mode
__props__['engine'] = engine
__props__['engine_version'] = engine_version
__props__['kms_key_id'] = kms_key_id
__props__['maintenance_window'] = maintenance_window
__props__['node_type'] = node_type
__props__['notification_topic_arn'] = notification_topic_arn
__props__['number_cache_clusters'] = number_cache_clusters
__props__['parameter_group_name'] = parameter_group_name
__props__['port'] = port
if replication_group_description is None:
raise TypeError("Missing required property 'replication_group_description'")
__props__['replication_group_description'] = replication_group_description
__props__['replication_group_id'] = replication_group_id
__props__['security_group_ids'] = security_group_ids
__props__['security_group_names'] = security_group_names
__props__['snapshot_arns'] = snapshot_arns
__props__['snapshot_name'] = snapshot_name
__props__['snapshot_retention_limit'] = snapshot_retention_limit
__props__['snapshot_window'] = snapshot_window
__props__['subnet_group_name'] = subnet_group_name
__props__['tags'] = tags
__props__['transit_encryption_enabled'] = transit_encryption_enabled
__props__['configuration_endpoint_address'] = None
__props__['member_clusters'] = None
__props__['primary_endpoint_address'] = None
super(ReplicationGroup, __self__).__init__(
'aws:elasticache/replicationGroup:ReplicationGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
apply_immediately: Optional[pulumi.Input[bool]] = None,
at_rest_encryption_enabled: Optional[pulumi.Input[bool]] = None,
auth_token: Optional[pulumi.Input[str]] = None,
auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,
automatic_failover_enabled: Optional[pulumi.Input[bool]] = None,
availability_zones: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
cluster_mode: Optional[pulumi.Input[pulumi.InputType['ReplicationGroupClusterModeArgs']]] = None,
configuration_endpoint_address: Optional[pulumi.Input[str]] = None,
engine: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
maintenance_window: Optional[pulumi.Input[str]] = None,
member_clusters: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
node_type: Optional[pulumi.Input[str]] = None,
notification_topic_arn: Optional[pulumi.Input[str]] = None,
number_cache_clusters: Optional[pulumi.Input[float]] = None,
parameter_group_name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[float]] = None,
primary_endpoint_address: Optional[pulumi.Input[str]] = None,
replication_group_description: Optional[pulumi.Input[str]] = None,
replication_group_id: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
security_group_names: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
snapshot_arns: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
snapshot_name: Optional[pulumi.Input[str]] = None,
snapshot_retention_limit: Optional[pulumi.Input[float]] = None,
snapshot_window: Optional[pulumi.Input[str]] = None,
subnet_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
transit_encryption_enabled: Optional[pulumi.Input[bool]] = None) -> 'ReplicationGroup':
"""
Get an existing ReplicationGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] apply_immediately: Specifies whether any modifications are applied immediately, or during the next maintenance window. Default is `false`.
:param pulumi.Input[bool] at_rest_encryption_enabled: Whether to enable encryption at rest.
:param pulumi.Input[str] auth_token: The password used to access a password protected server. Can be specified only if `transit_encryption_enabled = true`.
:param pulumi.Input[bool] auto_minor_version_upgrade: Specifies whether a minor engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. Defaults to `true`.
:param pulumi.Input[bool] automatic_failover_enabled: Specifies whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ is disabled for this replication group. Must be enabled for Redis (cluster mode enabled) replication groups. Defaults to `false`.
:param pulumi.Input[List[pulumi.Input[str]]] availability_zones: A list of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is not important.
:param pulumi.Input[pulumi.InputType['ReplicationGroupClusterModeArgs']] cluster_mode: Create a native redis cluster. `automatic_failover_enabled` must be set to true. Cluster Mode documented below. Only 1 `cluster_mode` block is allowed.
:param pulumi.Input[str] configuration_endpoint_address: The address of the replication group configuration endpoint when cluster mode is enabled.
:param pulumi.Input[str] engine: The name of the cache engine to be used for the clusters in this replication group. e.g. `redis`
:param pulumi.Input[str] engine_version: The version number of the cache engine to be used for the cache clusters in this replication group.
:param pulumi.Input[str] kms_key_id: The ARN of the key that you wish to use if encrypting at rest. If not supplied, uses service managed encryption. Can be specified only if `at_rest_encryption_enabled = true`.
:param pulumi.Input[str] maintenance_window: Specifies the weekly time range for when maintenance
on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC).
The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`
:param pulumi.Input[List[pulumi.Input[str]]] member_clusters: The identifiers of all the nodes that are part of this replication group.
:param pulumi.Input[str] node_type: The compute and memory capacity of the nodes in the node group.
:param pulumi.Input[str] notification_topic_arn: An Amazon Resource Name (ARN) of an
SNS topic to send ElastiCache notifications to. Example:
`arn:aws:sns:us-east-1:012345678999:my_sns_topic`
:param pulumi.Input[float] number_cache_clusters: The number of cache clusters (primary and replicas) this replication group will have. If Multi-AZ is enabled, the value of this parameter must be at least 2. Updates will occur before other modifications.
:param pulumi.Input[str] parameter_group_name: The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.
:param pulumi.Input[float] port: The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379.
:param pulumi.Input[str] primary_endpoint_address: (Redis only) The address of the endpoint for the primary node in the replication group, if the cluster mode is disabled.
:param pulumi.Input[str] replication_group_description: A user-created description for the replication group.
:param pulumi.Input[str] replication_group_id: The replication group identifier. This parameter is stored as a lowercase string.
:param pulumi.Input[List[pulumi.Input[str]]] security_group_ids: One or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud
:param pulumi.Input[List[pulumi.Input[str]]] security_group_names: A list of cache security group names to associate with this replication group.
:param pulumi.Input[List[pulumi.Input[str]]] snapshot_arns: A single-element string list containing an
Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
Example: `arn:aws:s3:::my_bucket/snapshot1.rdb`
:param pulumi.Input[str] snapshot_name: The name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource.
:param pulumi.Input[float] snapshot_retention_limit: The number of days for which ElastiCache will
retain automatic cache cluster snapshots before deleting them. For example, if you set
SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days
before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro or cache.t2.* cache nodes
:param pulumi.Input[str] snapshot_window: The daily time range (in UTC) during which ElastiCache will
begin taking a daily snapshot of your cache cluster. The minimum snapshot window is a 60 minute period. Example: `05:00-09:00`
:param pulumi.Input[str] subnet_group_name: The name of the cache subnet group to be used for the replication group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource
:param pulumi.Input[bool] transit_encryption_enabled: Whether to enable encryption in transit.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["apply_immediately"] = apply_immediately
__props__["at_rest_encryption_enabled"] = at_rest_encryption_enabled
__props__["auth_token"] = auth_token
__props__["auto_minor_version_upgrade"] = auto_minor_version_upgrade
__props__["automatic_failover_enabled"] = automatic_failover_enabled
__props__["availability_zones"] = availability_zones
__props__["cluster_mode"] = cluster_mode
__props__["configuration_endpoint_address"] = configuration_endpoint_address
__props__["engine"] = engine
__props__["engine_version"] = engine_version
__props__["kms_key_id"] = kms_key_id
__props__["maintenance_window"] = maintenance_window
__props__["member_clusters"] = member_clusters
__props__["node_type"] = node_type
__props__["notification_topic_arn"] = notification_topic_arn
__props__["number_cache_clusters"] = number_cache_clusters
__props__["parameter_group_name"] = parameter_group_name
__props__["port"] = port
__props__["primary_endpoint_address"] = primary_endpoint_address
__props__["replication_group_description"] = replication_group_description
__props__["replication_group_id"] = replication_group_id
__props__["security_group_ids"] = security_group_ids
__props__["security_group_names"] = security_group_names
__props__["snapshot_arns"] = snapshot_arns
__props__["snapshot_name"] = snapshot_name
__props__["snapshot_retention_limit"] = snapshot_retention_limit
__props__["snapshot_window"] = snapshot_window
__props__["subnet_group_name"] = subnet_group_name
__props__["tags"] = tags
__props__["transit_encryption_enabled"] = transit_encryption_enabled
return ReplicationGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="applyImmediately")
def apply_immediately(self) -> pulumi.Output[bool]:
"""
Specifies whether any modifications are applied immediately, or during the next maintenance window. Default is `false`.
"""
return pulumi.get(self, "apply_immediately")
@property
@pulumi.getter(name="atRestEncryptionEnabled")
def at_rest_encryption_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to enable encryption at rest.
"""
return pulumi.get(self, "at_rest_encryption_enabled")
@property
@pulumi.getter(name="authToken")
def auth_token(self) -> pulumi.Output[Optional[str]]:
"""
The password used to access a password protected server. Can be specified only if `transit_encryption_enabled = true`.
"""
return pulumi.get(self, "auth_token")
@property
@pulumi.getter(name="autoMinorVersionUpgrade")
def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether a minor engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. Defaults to `true`.
"""
return pulumi.get(self, "auto_minor_version_upgrade")
@property
@pulumi.getter(name="automaticFailoverEnabled")
def automatic_failover_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ is disabled for this replication group. Must be enabled for Redis (cluster mode enabled) replication groups. Defaults to `false`.
"""
return pulumi.get(self, "automatic_failover_enabled")
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> pulumi.Output[Optional[List[str]]]:
"""
A list of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is not important.
"""
return pulumi.get(self, "availability_zones")
@property
@pulumi.getter(name="clusterMode")
def cluster_mode(self) -> pulumi.Output['outputs.ReplicationGroupClusterMode']:
"""
Create a native redis cluster. `automatic_failover_enabled` must be set to true. Cluster Mode documented below. Only 1 `cluster_mode` block is allowed.
"""
return pulumi.get(self, "cluster_mode")
@property
@pulumi.getter(name="configurationEndpointAddress")
def configuration_endpoint_address(self) -> pulumi.Output[str]:
"""
The address of the replication group configuration endpoint when cluster mode is enabled.
"""
return pulumi.get(self, "configuration_endpoint_address")
@property
@pulumi.getter
def engine(self) -> pulumi.Output[Optional[str]]:
"""
The name of the cache engine to be used for the clusters in this replication group. e.g. `redis`
"""
return pulumi.get(self, "engine")
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> pulumi.Output[str]:
"""
The version number of the cache engine to be used for the cache clusters in this replication group.
"""
return pulumi.get(self, "engine_version")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> pulumi.Output[Optional[str]]:
"""
The ARN of the key that you wish to use if encrypting at rest. If not supplied, uses service managed encryption. Can be specified only if `at_rest_encryption_enabled = true`.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="maintenanceWindow")
def maintenance_window(self) -> pulumi.Output[str]:
"""
Specifies the weekly time range for when maintenance
on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC).
The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`
"""
return pulumi.get(self, "maintenance_window")
@property
@pulumi.getter(name="memberClusters")
def member_clusters(self) -> pulumi.Output[List[str]]:
"""
The identifiers of all the nodes that are part of this replication group.
"""
return pulumi.get(self, "member_clusters")
@property
@pulumi.getter(name="nodeType")
def node_type(self) -> pulumi.Output[str]:
"""
The compute and memory capacity of the nodes in the node group.
"""
return pulumi.get(self, "node_type")
@property
@pulumi.getter(name="notificationTopicArn")
def notification_topic_arn(self) -> pulumi.Output[Optional[str]]:
"""
An Amazon Resource Name (ARN) of an
SNS topic to send ElastiCache notifications to. Example:
`arn:aws:sns:us-east-1:012345678999:my_sns_topic`
"""
return pulumi.get(self, "notification_topic_arn")
@property
@pulumi.getter(name="numberCacheClusters")
def number_cache_clusters(self) -> pulumi.Output[float]:
"""
The number of cache clusters (primary and replicas) this replication group will have. If Multi-AZ is enabled, the value of this parameter must be at least 2. Updates will occur before other modifications.
"""
return pulumi.get(self, "number_cache_clusters")
@property
@pulumi.getter(name="parameterGroupName")
def parameter_group_name(self) -> pulumi.Output[str]:
"""
The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.
"""
return pulumi.get(self, "parameter_group_name")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[float]]:
"""
The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="primaryEndpointAddress")
def primary_endpoint_address(self) -> pulumi.Output[str]:
"""
(Redis only) The address of the endpoint for the primary node in the replication group, if the cluster mode is disabled.
"""
return pulumi.get(self, "primary_endpoint_address")
@property
@pulumi.getter(name="replicationGroupDescription")
def replication_group_description(self) -> pulumi.Output[str]:
"""
A user-created description for the replication group.
"""
return pulumi.get(self, "replication_group_description")
@property
@pulumi.getter(name="replicationGroupId")
def replication_group_id(self) -> pulumi.Output[str]:
"""
The replication group identifier. This parameter is stored as a lowercase string.
"""
return pulumi.get(self, "replication_group_id")
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> pulumi.Output[List[str]]:
"""
One or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud
"""
return pulumi.get(self, "security_group_ids")
@property
@pulumi.getter(name="securityGroupNames")
def security_group_names(self) -> pulumi.Output[List[str]]:
"""
A list of cache security group names to associate with this replication group.
"""
return pulumi.get(self, "security_group_names")
@property
@pulumi.getter(name="snapshotArns")
def snapshot_arns(self) -> pulumi.Output[Optional[List[str]]]:
"""
A single-element string list containing an
Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
Example: `arn:aws:s3:::my_bucket/snapshot1.rdb`
"""
return pulumi.get(self, "snapshot_arns")
@property
@pulumi.getter(name="snapshotName")
def snapshot_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource.
"""
return pulumi.get(self, "snapshot_name")
@property
@pulumi.getter(name="snapshotRetentionLimit")
def snapshot_retention_limit(self) -> pulumi.Output[Optional[float]]:
"""
The number of days for which ElastiCache will
retain automatic cache cluster snapshots before deleting them. For example, if you set
SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days
before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro or cache.t2.* cache nodes
"""
return pulumi.get(self, "snapshot_retention_limit")
@property
@pulumi.getter(name="snapshotWindow")
def snapshot_window(self) -> pulumi.Output[str]:
"""
The daily time range (in UTC) during which ElastiCache will
begin taking a daily snapshot of your cache cluster. The minimum snapshot window is a 60 minute period. Example: `05:00-09:00`
"""
return pulumi.get(self, "snapshot_window")
@property
@pulumi.getter(name="subnetGroupName")
def subnet_group_name(self) -> pulumi.Output[str]:
"""
The name of the cache subnet group to be used for the replication group.
"""
return pulumi.get(self, "subnet_group_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="transitEncryptionEnabled")
def transit_encryption_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to enable encryption in transit.
"""
return pulumi.get(self, "transit_encryption_enabled")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 61.377265
| 404
| 0.694653
| 4,704
| 37,256
| 5.314201
| 0.100128
| 0.060725
| 0.044804
| 0.028082
| 0.771902
| 0.736739
| 0.716617
| 0.702976
| 0.690095
| 0.688855
| 0
| 0.007544
| 0.22434
| 37,256
| 606
| 405
| 61.478548
| 0.857499
| 0.520641
| 0
| 0.311189
| 1
| 0
| 0.160499
| 0.071614
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118881
| false
| 0.003497
| 0.024476
| 0.006993
| 0.262238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6faf99f34b7889143b3ef157f4214ca04cce0fa7
| 15,847
|
py
|
Python
|
tests/http/test_protocol_handler.py
|
Flared/proxy.py
|
7199459c69a717bb55f932230ae9a39707430149
|
[
"BSD-3-Clause"
] | null | null | null |
tests/http/test_protocol_handler.py
|
Flared/proxy.py
|
7199459c69a717bb55f932230ae9a39707430149
|
[
"BSD-3-Clause"
] | null | null | null |
tests/http/test_protocol_handler.py
|
Flared/proxy.py
|
7199459c69a717bb55f932230ae9a39707430149
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import base64
import selectors
from typing import Any, cast
import pytest
from unittest import mock
from pytest_mock import MockerFixture
from proxy.http import HttpProtocolHandler, httpHeaders
from proxy.http.proxy import HttpProxyPlugin
from proxy.common.flag import FlagParser
from proxy.http.parser import HttpParser, httpParserTypes, httpParserStates
from proxy.common.utils import bytes_
from proxy.common.plugins import Plugins
from proxy.common.version import __version__
from proxy.http.responses import (
BAD_GATEWAY_RESPONSE_PKT, PROXY_AUTH_FAILED_RESPONSE_PKT,
PROXY_TUNNEL_ESTABLISHED_RESPONSE_PKT,
)
from proxy.core.connection import TcpClientConnection
from proxy.common.constants import (
CRLF, PLUGIN_HTTP_PROXY, PLUGIN_PROXY_AUTH, PLUGIN_WEB_SERVER,
)
from ..test_assertions import Assertions
def mock_selector_for_client_read(self: Any) -> None:
self.mock_selector.return_value.select.return_value = [
(
selectors.SelectorKey(
fileobj=self._conn.fileno(),
fd=self._conn.fileno(),
events=selectors.EVENT_READ,
data=None,
),
selectors.EVENT_READ,
),
]
class TestHttpProtocolHandlerWithoutServerMock(Assertions):
@pytest.fixture(autouse=True) # type: ignore[misc]
def _setUp(self, mocker: MockerFixture) -> None:
self.mock_fromfd = mocker.patch('socket.fromfd')
self.mock_selector = mocker.patch('selectors.DefaultSelector')
self.fileno = 10
self._addr = ('127.0.0.1', 54382)
self._conn = self.mock_fromfd.return_value
self.http_server_port = 65535
self.flags = FlagParser.initialize(threaded=True)
self.flags.plugins = Plugins.load([
bytes_(PLUGIN_HTTP_PROXY),
bytes_(PLUGIN_WEB_SERVER),
])
self.protocol_handler = HttpProtocolHandler(
TcpClientConnection(self._conn, self._addr),
flags=self.flags,
)
self.protocol_handler.initialize()
@pytest.mark.asyncio # type: ignore[misc]
async def test_proxy_connection_failed(self) -> None:
mock_selector_for_client_read(self)
self._conn.recv.return_value = CRLF.join([
b'GET http://unknown.domain HTTP/1.1',
b'Host: unknown.domain',
CRLF,
])
await self.protocol_handler._run_once()
self.assertEqual(
self.protocol_handler.work.buffer[0],
BAD_GATEWAY_RESPONSE_PKT,
)
@pytest.mark.asyncio # type: ignore[misc]
async def test_proxy_authentication_failed(self) -> None:
self._conn = self.mock_fromfd.return_value
mock_selector_for_client_read(self)
flags = FlagParser.initialize(
auth_code=base64.b64encode(b'user:pass'),
threaded=True,
)
flags.plugins = Plugins.load([
bytes_(PLUGIN_HTTP_PROXY),
bytes_(PLUGIN_WEB_SERVER),
bytes_(PLUGIN_PROXY_AUTH),
])
self.protocol_handler = HttpProtocolHandler(
TcpClientConnection(self._conn, self._addr), flags=flags,
)
self.protocol_handler.initialize()
self._conn.recv.return_value = CRLF.join([
b'GET http://abhinavsingh.com HTTP/1.1',
b'Host: abhinavsingh.com',
CRLF,
])
await self.protocol_handler._run_once()
self.assertEqual(
self.protocol_handler.work.buffer[0],
PROXY_AUTH_FAILED_RESPONSE_PKT,
)
class TestHttpProtocolHandler(Assertions):
@pytest.fixture(autouse=True) # type: ignore[misc]
def _setUp(self, mocker: MockerFixture) -> None:
self.mock_fromfd = mocker.patch('socket.fromfd')
self.mock_selector = mocker.patch('selectors.DefaultSelector')
self.mock_server_connection = mocker.patch(
'proxy.http.proxy.server.TcpServerConnection',
)
self.fileno = 10
self._addr = ('127.0.0.1', 54382)
self._conn = self.mock_fromfd.return_value
self.http_server_port = 65535
self.flags = FlagParser.initialize(threaded=True)
self.flags.plugins = Plugins.load([
bytes_(PLUGIN_HTTP_PROXY),
bytes_(PLUGIN_WEB_SERVER),
])
self.protocol_handler = HttpProtocolHandler(
TcpClientConnection(self._conn, self._addr),
flags=self.flags,
)
self.protocol_handler.initialize()
@pytest.mark.asyncio # type: ignore[misc]
async def test_http_get(self) -> None:
server = self.mock_server_connection.return_value
server.connect.return_value = True
server.buffer_size.return_value = 0
self.mock_selector_for_client_read_and_server_write(server)
# Send request line
assert self.http_server_port is not None
self._conn.recv.return_value = (
b'GET http://localhost:%d HTTP/1.1' %
self.http_server_port
) + CRLF
await self.protocol_handler._run_once()
self.assertEqual(
self.protocol_handler.request.state,
httpParserStates.LINE_RCVD,
)
self.assertNotEqual(
self.protocol_handler.request.state,
httpParserStates.COMPLETE,
)
# Send headers and blank line, thus completing HTTP request
assert self.http_server_port is not None
self._conn.recv.return_value = CRLF.join([
b'User-Agent: proxy.py/%s' % bytes_(__version__),
b'Host: localhost:%d' % self.http_server_port,
b'Accept: */*',
b'Proxy-Connection: Keep-Alive',
CRLF,
])
await self.assert_data_queued(server)
await self.protocol_handler._run_once()
server.flush.assert_called_once()
async def assert_tunnel_response(
self,
server: mock.Mock,
) -> None:
await self.protocol_handler._run_once()
self.assertTrue(
cast(
HttpProxyPlugin,
self.protocol_handler.plugin,
).upstream is not None,
)
self.assertEqual(
self.protocol_handler.work.buffer[0],
PROXY_TUNNEL_ESTABLISHED_RESPONSE_PKT,
)
self.mock_server_connection.assert_called_once()
server.connect.assert_called_once()
server.queue.assert_not_called()
server.closed = False
parser = HttpParser(httpParserTypes.RESPONSE_PARSER)
parser.parse(self.protocol_handler.work.buffer[0].tobytes())
self.assertEqual(parser.state, httpParserStates.COMPLETE)
assert parser.code is not None
self.assertEqual(int(parser.code), 200)
@pytest.mark.asyncio # type: ignore[misc]
async def test_http_tunnel(self) -> None:
server = self.mock_server_connection.return_value
server.connect.return_value = True
def has_buffer() -> bool:
return cast(bool, server.queue.called)
server.has_buffer.side_effect = has_buffer
self.mock_selector.return_value.select.side_effect = [
[
(
selectors.SelectorKey(
fileobj=self._conn.fileno(),
fd=self._conn.fileno(),
events=selectors.EVENT_READ,
data=None,
),
selectors.EVENT_READ,
),
],
[
(
selectors.SelectorKey(
fileobj=self._conn.fileno(),
fd=self._conn.fileno(),
events=0,
data=None,
),
selectors.EVENT_WRITE,
),
],
[
(
selectors.SelectorKey(
fileobj=self._conn.fileno(),
fd=self._conn.fileno(),
events=selectors.EVENT_READ,
data=None,
),
selectors.EVENT_READ,
),
],
[
(
selectors.SelectorKey(
fileobj=server.connection.fileno(),
fd=server.connection.fileno(),
events=0,
data=None,
),
selectors.EVENT_WRITE,
),
],
]
assert self.http_server_port is not None
self._conn.recv.return_value = CRLF.join([
b'CONNECT localhost:%d HTTP/1.1' % self.http_server_port,
b'Host: localhost:%d' % self.http_server_port,
b'User-Agent: proxy.py/%s' % bytes_(__version__),
b'Proxy-Connection: Keep-Alive',
CRLF,
])
await self.assert_tunnel_response(server)
# Dispatch tunnel established response to client
await self.protocol_handler._run_once()
await self.assert_data_queued_to_server(server)
await self.protocol_handler._run_once()
self.assertEqual(server.queue.call_count, 1)
server.flush.assert_called_once()
@pytest.mark.asyncio # type: ignore[misc]
async def test_authenticated_proxy_http_get(self) -> None:
self._conn = self.mock_fromfd.return_value
mock_selector_for_client_read(self)
server = self.mock_server_connection.return_value
server.connect.return_value = True
server.buffer_size.return_value = 0
flags = FlagParser.initialize(
auth_code=base64.b64encode(b'user:pass'),
threaded=True,
)
flags.plugins = Plugins.load([
bytes_(PLUGIN_HTTP_PROXY),
bytes_(PLUGIN_WEB_SERVER),
])
self.protocol_handler = HttpProtocolHandler(
TcpClientConnection(self._conn, self._addr), flags=flags,
)
self.protocol_handler.initialize()
assert self.http_server_port is not None
self._conn.recv.return_value = b'GET http://localhost:%d HTTP/1.1' % self.http_server_port
await self.protocol_handler._run_once()
self.assertEqual(
self.protocol_handler.request.state,
httpParserStates.INITIALIZED,
)
self._conn.recv.return_value = CRLF
await self.protocol_handler._run_once()
self.assertEqual(
self.protocol_handler.request.state,
httpParserStates.LINE_RCVD,
)
assert self.http_server_port is not None
self._conn.recv.return_value = CRLF.join([
b'User-Agent: proxy.py/%s' % bytes_(__version__),
b'Host: localhost:%d' % self.http_server_port,
b'Accept: */*',
httpHeaders.PROXY_CONNECTION + b': Keep-Alive',
httpHeaders.PROXY_AUTHORIZATION + b': Basic dXNlcjpwYXNz',
CRLF,
])
await self.assert_data_queued(server)
@pytest.mark.asyncio # type: ignore[misc]
async def test_authenticated_proxy_http_tunnel(self) -> None:
server = self.mock_server_connection.return_value
server.connect.return_value = True
server.buffer_size.return_value = 0
self._conn = self.mock_fromfd.return_value
self.mock_selector_for_client_read_and_server_write(server)
flags = FlagParser.initialize(
auth_code=base64.b64encode(b'user:pass'),
threaded=True,
)
flags.plugins = Plugins.load([
bytes_(PLUGIN_HTTP_PROXY),
bytes_(PLUGIN_WEB_SERVER),
])
self.protocol_handler = HttpProtocolHandler(
TcpClientConnection(self._conn, self._addr), flags=flags,
)
self.protocol_handler.initialize()
assert self.http_server_port is not None
self._conn.recv.return_value = CRLF.join([
b'CONNECT localhost:%d HTTP/1.1' % self.http_server_port,
b'Host: localhost:%d' % self.http_server_port,
b'User-Agent: proxy.py/%s' % bytes_(__version__),
httpHeaders.PROXY_CONNECTION + b': Keep-Alive',
httpHeaders.PROXY_AUTHORIZATION + b': Basic dXNlcjpwYXNz',
CRLF,
])
await self.assert_tunnel_response(server)
self.protocol_handler.work.flush()
await self.assert_data_queued_to_server(server)
await self.protocol_handler._run_once()
server.flush.assert_called_once()
def mock_selector_for_client_read_and_server_write(
self, server: mock.Mock,
) -> None:
self.mock_selector.return_value.select.side_effect = [
[
(
selectors.SelectorKey(
fileobj=self._conn.fileno(),
fd=self._conn.fileno(),
events=selectors.EVENT_READ,
data=None,
),
selectors.EVENT_READ,
),
],
[
(
selectors.SelectorKey(
fileobj=self._conn.fileno(),
fd=self._conn.fileno(),
events=0,
data=None,
),
selectors.EVENT_READ,
),
],
[
(
selectors.SelectorKey(
fileobj=server.connection.fileno(),
fd=server.connection.fileno(),
events=0,
data=None,
),
selectors.EVENT_WRITE,
),
],
]
async def assert_data_queued(
self, server: mock.Mock,
) -> None:
await self.protocol_handler._run_once()
self.assertEqual(
self.protocol_handler.request.state,
httpParserStates.COMPLETE,
)
self.mock_server_connection.assert_called_once()
server.connect.assert_called_once()
server.closed = False
assert self.http_server_port is not None
pkt = CRLF.join([
b'GET / HTTP/1.1',
b'User-Agent: proxy.py/%s' % bytes_(__version__),
b'Host: localhost:%d' % self.http_server_port,
b'Accept: */*',
b'Via: 1.1 proxy.py v%s' % bytes_(__version__),
CRLF,
])
server.queue.assert_called_once()
self.assertEqual(server.queue.call_args_list[0][0][0].tobytes(), pkt)
server.buffer_size.return_value = len(pkt)
async def assert_data_queued_to_server(self, server: mock.Mock) -> None:
assert self.http_server_port is not None
self.assertEqual(
self._conn.send.call_args[0][0],
PROXY_TUNNEL_ESTABLISHED_RESPONSE_PKT,
)
pkt = CRLF.join([
b'GET / HTTP/1.1',
b'Host: localhost:%d' % self.http_server_port,
b'User-Agent: proxy.py/%s' % bytes_(__version__),
CRLF,
])
self._conn.recv.return_value = pkt
await self.protocol_handler._run_once()
server.queue.assert_called_once_with(pkt)
server.buffer_size.return_value = len(pkt)
server.flush.assert_not_called()
| 34.828571
| 98
| 0.583328
| 1,650
| 15,847
| 5.335758
| 0.124848
| 0.029986
| 0.071218
| 0.040891
| 0.797024
| 0.761131
| 0.728987
| 0.705475
| 0.683553
| 0.658451
| 0
| 0.008673
| 0.323342
| 15,847
| 454
| 99
| 34.905286
| 0.812086
| 0.036852
| 0
| 0.707692
| 0
| 0
| 0.055585
| 0.00611
| 0
| 0
| 0
| 0
| 0.117949
| 1
| 0.012821
| false
| 0.007692
| 0.04359
| 0.002564
| 0.064103
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6fb5898dce7dbfa7e8270a66a10d33adb22bc15f
| 129
|
py
|
Python
|
src/pyturb/power_plant/__init__.py
|
MRod5/pyturb
|
08b4016528fc50733fff58d967d1000bf1e634c9
|
[
"MIT"
] | 32
|
2017-04-13T12:25:23.000Z
|
2022-01-23T01:23:19.000Z
|
src/pyturb/power_plant/__init__.py
|
sergiodobler/pyturb
|
248ea0ddc939c6d6f2c8d6b3f9a3d13976c22910
|
[
"MIT"
] | 12
|
2017-11-13T23:19:15.000Z
|
2021-11-28T20:18:28.000Z
|
src/pyturb/power_plant/__init__.py
|
sergiodobler/pyturb
|
248ea0ddc939c6d6f2c8d6b3f9a3d13976c22910
|
[
"MIT"
] | 10
|
2017-05-06T20:05:46.000Z
|
2022-03-19T13:31:52.000Z
|
from .control_volume import ControlVolume
from .intake import Intake
from .nozzle import Nozzle
from .combustor import Combustor
| 25.8
| 41
| 0.844961
| 17
| 129
| 6.352941
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124031
| 129
| 4
| 42
| 32.25
| 0.955752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6fc46543e84163bd9e61cfc71f3645f0b613027e
| 401
|
py
|
Python
|
tests/fixtures/hnclient.py
|
Rahul09123/zester
|
e878ba5ce66156a642bc7513a69dc4175f9393be
|
[
"ISC"
] | 10
|
2015-10-17T16:12:30.000Z
|
2021-12-09T04:08:47.000Z
|
tests/fixtures/hnclient.py
|
Rahul09123/zester
|
e878ba5ce66156a642bc7513a69dc4175f9393be
|
[
"ISC"
] | null | null | null |
tests/fixtures/hnclient.py
|
Rahul09123/zester
|
e878ba5ce66156a642bc7513a69dc4175f9393be
|
[
"ISC"
] | 2
|
2020-05-05T04:04:28.000Z
|
2020-09-30T14:19:16.000Z
|
from zester import MultipleClient, Attribute
class HNClient(MultipleClient):
url = "http://news.ycombinator.com/"
title = Attribute(selector="$('.title a')", modifier="$(el).html()")
link = Attribute(selector="$('.title a')", modifier="$(el).attr('href')")
points = Attribute(selector="$('.subtext span')",
modifier="$(el).html().replace(' points', '')")
| 40.1
| 77
| 0.59601
| 40
| 401
| 5.975
| 0.625
| 0.213389
| 0.1841
| 0.192469
| 0.276151
| 0.276151
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182045
| 401
| 9
| 78
| 44.555556
| 0.728659
| 0
| 0
| 0
| 0
| 0
| 0.341646
| 0.054863
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
d23ad33663c6194c3fbc525e9df83323c88b24f6
| 195
|
py
|
Python
|
skyhook/resources/__init__.py
|
Mikfr83/skyhook
|
1af5afdd8ad77a46ea918be487b100d376642db8
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
skyhook/resources/__init__.py
|
Mikfr83/skyhook
|
1af5afdd8ad77a46ea918be487b100d376642db8
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
skyhook/resources/__init__.py
|
Mikfr83/skyhook
|
1af5afdd8ad77a46ea918be487b100d376642db8
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import os
logo_transparent = os.path.join(os.path.dirname(__file__), "sky_hook_logo_transparent.png")
label_transparent = os.path.join(os.path.dirname(__file__), "sky_hook_label_transparent.png")
| 65
| 93
| 0.820513
| 30
| 195
| 4.8
| 0.4
| 0.166667
| 0.236111
| 0.291667
| 0.625
| 0.625
| 0.625
| 0.625
| 0.625
| 0.625
| 0
| 0
| 0.046154
| 195
| 3
| 93
| 65
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0.30102
| 0.30102
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d24e3732913dedcf657630c8a290d0006d31dbcd
| 84
|
py
|
Python
|
tcex/metrics/__init__.py
|
kdeltared/tcex
|
818c0d09256764f871e42d9ca5916f92d941d882
|
[
"Apache-2.0"
] | 18
|
2017-01-09T22:17:49.000Z
|
2022-01-24T20:46:42.000Z
|
tcex/metrics/__init__.py
|
kdeltared/tcex
|
818c0d09256764f871e42d9ca5916f92d941d882
|
[
"Apache-2.0"
] | 84
|
2017-04-11T13:47:49.000Z
|
2022-03-21T20:12:57.000Z
|
tcex/metrics/__init__.py
|
kdeltared/tcex
|
818c0d09256764f871e42d9ca5916f92d941d882
|
[
"Apache-2.0"
] | 43
|
2017-01-05T20:40:26.000Z
|
2022-03-31T19:18:02.000Z
|
"""Metrics module for TcEx Framework"""
# flake8: noqa
from .metrics import Metrics
| 21
| 39
| 0.75
| 11
| 84
| 5.727273
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013889
| 0.142857
| 84
| 3
| 40
| 28
| 0.861111
| 0.559524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d2791bb3e0498c2046e3bd00abc4a18cb0b036af
| 1,241
|
py
|
Python
|
translate/translate/models/mymodel.py
|
naoki-maeda/translate
|
21e94e8871562c2cc4ac5afc414a87658d146b0e
|
[
"MIT"
] | null | null | null |
translate/translate/models/mymodel.py
|
naoki-maeda/translate
|
21e94e8871562c2cc4ac5afc414a87658d146b0e
|
[
"MIT"
] | null | null | null |
translate/translate/models/mymodel.py
|
naoki-maeda/translate
|
21e94e8871562c2cc4ac5afc414a87658d146b0e
|
[
"MIT"
] | null | null | null |
from sqlalchemy import (
Column,
Index,
Integer,
Text,
TIMESTAMP,
Boolean
)
from .meta import Base
import datetime
class Login(Base):
__tablename__ = 'login'
id = Column(Integer, primary_key=True)
email = Column(Text, nullable=False)
password = Column(Text, nullable=False)
class Users(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(Text, nullable=False)
password = Column(Text, nullable=False)
email = Column(Text, nullable=False)
admin = Column(Boolean, nullable=False)
add_timestamp = Column(TIMESTAMP, nullable=False, default=datetime.datetime.now())
update_timestamp = Column(TIMESTAMP, nullable=False, default=datetime.datetime.now())
class Language(Base):
__tablename__= 'language'
id = Column(Integer, primary_key=True)
name = Column(Text, nullable=False)
japanese = Column(Text, nullable=False)
english = Column(Text, nullable=False)
chinese = Column(Text, nullable=False)
lock = Column(Boolean, nullable=False)
add_timestamp = Column(TIMESTAMP, nullable=False, default=datetime.datetime.now())
update_timestamp = Column(TIMESTAMP, nullable=False, default=datetime.datetime.now())
| 30.268293
| 89
| 0.709106
| 143
| 1,241
| 6.020979
| 0.244755
| 0.226481
| 0.188153
| 0.240418
| 0.674797
| 0.636469
| 0.602787
| 0.602787
| 0.602787
| 0.504065
| 0
| 0
| 0.178082
| 1,241
| 40
| 90
| 31.025
| 0.844118
| 0
| 0
| 0.382353
| 0
| 0
| 0.014504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.058824
| 0.088235
| 0
| 0.794118
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
d285fb4a87eb55eacf3430f06f7235380d7117ac
| 100
|
py
|
Python
|
car_forum/forum/tags.py
|
ReubenHawley/Car_forum
|
dea9bfd68b3089cb75155a628a572fd6e0c3ebfc
|
[
"MIT",
"Unlicense"
] | null | null | null |
car_forum/forum/tags.py
|
ReubenHawley/Car_forum
|
dea9bfd68b3089cb75155a628a572fd6e0c3ebfc
|
[
"MIT",
"Unlicense"
] | 4
|
2021-01-19T23:43:30.000Z
|
2021-01-19T23:45:02.000Z
|
car_forum/forum/tags.py
|
ReubenHawley/Car_forum
|
dea9bfd68b3089cb75155a628a572fd6e0c3ebfc
|
[
"MIT",
"Unlicense"
] | null | null | null |
def tags(func):
def wrapper(msg):
return f"<{func}> {msg} </{func}>"
return wrapper
| 20
| 42
| 0.55
| 13
| 100
| 4.230769
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.26
| 100
| 4
| 43
| 25
| 0.743243
| 0
| 0
| 0
| 0
| 0
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
9631e307694c0c73438df7f099752ab83747a828
| 414
|
py
|
Python
|
uploads/core/views.py
|
1998krzysiek/chatbot
|
8dce98bc828efcb52382142e39ebe3ca31e5b623
|
[
"MIT"
] | 1
|
2021-05-09T07:30:21.000Z
|
2021-05-09T07:30:21.000Z
|
uploads/core/views.py
|
1998krzysiek/chatbot
|
8dce98bc828efcb52382142e39ebe3ca31e5b623
|
[
"MIT"
] | null | null | null |
uploads/core/views.py
|
1998krzysiek/chatbot
|
8dce98bc828efcb52382142e39ebe3ca31e5b623
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.core.files.storage import FileSystemStorage
def home(request):
return render(request, 'home.html')
def bosak(request):
return render(request, 'bosak.html')
def biedron(request):
return render(request, 'biedron.html')
def duda(request):
return render(request, 'duda.html')
def holownia(request):
return render(request, 'holownia.html')
| 17.25
| 55
| 0.727053
| 52
| 414
| 5.788462
| 0.346154
| 0.215947
| 0.315615
| 0.431894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154589
| 414
| 23
| 56
| 18
| 0.86
| 0
| 0
| 0
| 0
| 0
| 0.128641
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.416667
| false
| 0
| 0.166667
| 0.416667
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
966b3fc0603dcf705de63af584ea6d7b83d57a0f
| 100
|
py
|
Python
|
useintest/modules/bissell/__init__.py
|
wtsi-hgi/startfortest
|
426343c0ff340d4d83575cdafe2c4184707e7693
|
[
"MIT"
] | 1
|
2019-06-18T20:56:42.000Z
|
2019-06-18T20:56:42.000Z
|
useintest/modules/bissell/__init__.py
|
wtsi-hgi/useintest
|
426343c0ff340d4d83575cdafe2c4184707e7693
|
[
"MIT"
] | 3
|
2017-09-21T12:14:44.000Z
|
2018-02-19T11:18:47.000Z
|
useintest/modules/bissell/__init__.py
|
wtsi-hgi/useintest
|
426343c0ff340d4d83575cdafe2c4184707e7693
|
[
"MIT"
] | null | null | null |
from useintest.modules.bissell.bissell import BissellServiceController, bissell_service_controllers
| 50
| 99
| 0.91
| 10
| 100
| 8.9
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 100
| 1
| 100
| 100
| 0.936842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
967a5134145546de689de251840deaca889570ab
| 167
|
py
|
Python
|
users/admin.py
|
priyanshisharma/teesco-backend
|
f6b35f3fc1f21e60e266ff3baed0e8b4f128d7cb
|
[
"Apache-2.0"
] | 10
|
2020-04-17T10:34:46.000Z
|
2021-06-08T04:08:16.000Z
|
users/admin.py
|
priyanshisharma/teesco-backend
|
f6b35f3fc1f21e60e266ff3baed0e8b4f128d7cb
|
[
"Apache-2.0"
] | 122
|
2020-04-16T18:27:12.000Z
|
2021-06-09T03:54:31.000Z
|
users/admin.py
|
priyanshisharma/teesco-backend
|
f6b35f3fc1f21e60e266ff3baed0e8b4f128d7cb
|
[
"Apache-2.0"
] | 20
|
2020-04-16T17:44:50.000Z
|
2021-02-16T16:00:33.000Z
|
from django.contrib import admin
from users.models import User, Notification
# Register your models here.
admin.site.register(User)
admin.site.register(Notification)
| 23.857143
| 43
| 0.820359
| 23
| 167
| 5.956522
| 0.565217
| 0.131387
| 0.248175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101796
| 167
| 7
| 44
| 23.857143
| 0.913333
| 0.155689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
969368948cd77a3f2733deb7f53a4f72b3296539
| 97
|
py
|
Python
|
sparv/modules/hunpos/__init__.py
|
heatherleaf/sparv-pipeline
|
0fe5f27d0d82548ecc6cb21a69289668aac54cf1
|
[
"MIT"
] | 17
|
2018-09-21T07:01:45.000Z
|
2022-02-24T23:26:49.000Z
|
sparv/modules/hunpos/__init__.py
|
heatherleaf/sparv-pipeline
|
0fe5f27d0d82548ecc6cb21a69289668aac54cf1
|
[
"MIT"
] | 146
|
2018-11-13T19:13:25.000Z
|
2022-03-31T09:57:56.000Z
|
sparv/modules/hunpos/__init__.py
|
heatherleaf/sparv-pipeline
|
0fe5f27d0d82548ecc6cb21a69289668aac54cf1
|
[
"MIT"
] | 5
|
2019-02-14T00:50:38.000Z
|
2021-03-29T15:37:41.000Z
|
"""Part of Speech annotation using Hunpos."""
from . import hunpos, morphtable, morphtable_hist
| 24.25
| 49
| 0.762887
| 12
| 97
| 6.083333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134021
| 97
| 3
| 50
| 32.333333
| 0.869048
| 0.402062
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7369e89f021e0aeb51b113bdf7d25682678180db
| 3,226
|
py
|
Python
|
example/ex_2_agcu_icosahedrons/plt_individuals.py
|
loevlie/ce_expansion
|
17417b9467914dd91ee8e0325cfdc3bd19ad7f1e
|
[
"MIT"
] | 1
|
2020-11-04T08:01:22.000Z
|
2020-11-04T08:01:22.000Z
|
example/ex_2_agcu_icosahedrons/plt_individuals.py
|
loevlie/ce_expansion
|
17417b9467914dd91ee8e0325cfdc3bd19ad7f1e
|
[
"MIT"
] | 2
|
2021-04-19T23:45:54.000Z
|
2022-02-21T17:40:41.000Z
|
example/ex_2_agcu_icosahedrons/plt_individuals.py
|
loevlie/ce_expansion
|
17417b9467914dd91ee8e0325cfdc3bd19ad7f1e
|
[
"MIT"
] | 3
|
2021-05-10T14:25:28.000Z
|
2022-02-18T01:09:05.000Z
|
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import ce_expansion.npdb.db_inter
DEFAULT_DPI = 600 # Dots per inch
DEFAULT_SCALE = 8 # Scale in inches
# Define the colors
sizes = [13, 55, 147, 309, 561, 923, 1415, 2057, 2869, 5083]
colors = cm.rainbow(np.linspace(0, 1, len(sizes)))
# Set up the state machine
alloys = ["AgCu", "AgAu", "AuCu"]
symbols = (("icosahedron", "solid", "Ico"),
("cuboctahedron", "dashed", "Cub"),
("elongated-pentagonal-bipyramid", "dotted", "EPB"))
for alloy in alloys:
for shape, style, lbl in symbols:
fig = plt.figure()
fig.set_size_inches(2 * DEFAULT_SCALE, DEFAULT_SCALE)
ax = plt.subplot(111)
for size, coloration in zip(sizes, colors):
# Perform the database query
query = ce_expansion.npdb.db_inter.get_bimet_result(metals=alloy, shape=shape, num_atoms=size)
query = sorted(query, key=lambda i: i.n_metal2)
# Calculate the copper content and pull excess energy
cu_content = list(map(lambda i: i.n_metal2 / size, query))
excess_energy = list(map(lambda i: i.EE, query))
# Make the plot
ax.plot(cu_content, excess_energy,
color=coloration,
label=str(size) + "_" + lbl,
linestyle=style)
dopant = alloy[2:]
plt.title(alloy + "_" + lbl)
plt.xlabel("% " + dopant)
plt.ylabel("Excess Energy (eV)")
plt.tight_layout()
chartbox = ax.get_position()
ax.set_position([chartbox.x0, chartbox.y0, chartbox.width * 0.6, chartbox.height])
ax.legend(loc="upper left", fontsize=20, bbox_to_anchor=(0.98, 0.8), ncol=1)
plt.savefig("Excess_Energies_" + alloy + "_" + lbl + ".png", dpi=DEFAULT_DPI)
plt.close()
# Get cubes
shape = "fcc-cube"
lbl = "cube"
style = "dashdot"
sizes = [13, 63, 171, 365, 665, 1099, 1687, 2457, 3429, 4631, 6083, 7813, 9841, 12195]
colors = cm.rainbow(np.linspace(0, 1, len(sizes)))
fig = plt.figure()
fig.set_size_inches(2 * DEFAULT_SCALE, DEFAULT_SCALE)
ax = plt.subplot(111)
for size, coloration in zip(sizes, colors):
# Perform the database query
query = ce_expansion.npdb.db_inter.get_bimet_result(metals=alloy, shape=shape, num_atoms=size)
query = sorted(query, key=lambda i: i.n_metal2)
# Calculate the copper content and pull excess energy
cu_content = list(map(lambda i: i.n_metal2 / size, query))
excess_energy = list(map(lambda i: i.EE, query))
# Make the plot
ax.plot(cu_content, excess_energy,
color=coloration,
label=str(size) + "_" + lbl,
linestyle=style)
plt.xlabel("% " + dopant)
plt.title(alloy + "_" + lbl)
plt.ylabel("Excess Energy (eV)")
plt.tight_layout()
chartbox = ax.get_position()
ax.set_position([chartbox.x0, chartbox.y0, chartbox.width * 0.6, chartbox.height])
ax.legend(loc="upper left", fontsize=20, bbox_to_anchor=(0.98, 0.8), ncol=1)
plt.savefig("Excess_Energies_" + alloy + "_" + lbl + ".png", dpi=DEFAULT_DPI)
plt.close
| 37.511628
| 106
| 0.611283
| 436
| 3,226
| 4.394495
| 0.353211
| 0.050104
| 0.025052
| 0.018789
| 0.755219
| 0.72547
| 0.72547
| 0.72547
| 0.72547
| 0.688935
| 0
| 0.052983
| 0.256975
| 3,226
| 85
| 107
| 37.952941
| 0.74635
| 0.083075
| 0
| 0.677419
| 0
| 0
| 0.073659
| 0.010183
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
736ae94f9fa07351f833581d89603a8f707974c7
| 85
|
py
|
Python
|
tccli/services/waf/__init__.py
|
zqfan/tencentcloud-cli
|
b6ad9fced2a2b340087e4e5522121d405f68b615
|
[
"Apache-2.0"
] | 47
|
2018-05-31T11:26:25.000Z
|
2022-03-08T02:12:45.000Z
|
tccli/services/waf/__init__.py
|
zqfan/tencentcloud-cli
|
b6ad9fced2a2b340087e4e5522121d405f68b615
|
[
"Apache-2.0"
] | 23
|
2018-06-14T10:46:30.000Z
|
2022-02-28T02:53:09.000Z
|
tccli/services/waf/__init__.py
|
zqfan/tencentcloud-cli
|
b6ad9fced2a2b340087e4e5522121d405f68b615
|
[
"Apache-2.0"
] | 22
|
2018-10-22T09:49:45.000Z
|
2022-03-30T08:06:04.000Z
|
# -*- coding: utf-8 -*-
from tccli.services.waf.waf_client import action_caller
| 21.25
| 55
| 0.694118
| 12
| 85
| 4.75
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014085
| 0.164706
| 85
| 4
| 56
| 21.25
| 0.788732
| 0.247059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7378f08b14e5a9a22c66b1c1003a0cafd8b9ae18
| 100
|
py
|
Python
|
OnibusFlexivel/main.py
|
Drawiin/estrutura-de-dados
|
41531709346c41397d7b87c9a9664401dfbd593f
|
[
"MIT"
] | 1
|
2020-03-31T00:21:14.000Z
|
2020-03-31T00:21:14.000Z
|
OnibusFlexivel/main.py
|
Drawiin/estrutura-de-dados
|
41531709346c41397d7b87c9a9664401dfbd593f
|
[
"MIT"
] | null | null | null |
OnibusFlexivel/main.py
|
Drawiin/estrutura-de-dados
|
41531709346c41397d7b87c9a9664401dfbd593f
|
[
"MIT"
] | null | null | null |
from routeCalculator import calculateBestRoute
print(calculateBestRoute([(1, 2), (2, 3), (7, 1)]))
| 25
| 51
| 0.72
| 12
| 100
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067416
| 0.11
| 100
| 3
| 52
| 33.333333
| 0.741573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
738927f961bd172b6ad2bdd2ea49f3ffd201ff5a
| 21,348
|
py
|
Python
|
tests/test_max_weight_matching.py
|
darknight009/retworkx
|
f0ad15fa7e85aad73e0c2e352ec07172333571b5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_max_weight_matching.py
|
darknight009/retworkx
|
f0ad15fa7e85aad73e0c2e352ec07172333571b5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_max_weight_matching.py
|
darknight009/retworkx
|
f0ad15fa7e85aad73e0c2e352ec07172333571b5
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# These tests are adapated from the networkx test cases:
# https://github.com/networkx/networkx/blob/3351206a3ce5b3a39bb2fc451e93ef545b96c95b/networkx/algorithms/tests/test_matching.py
import random
import fixtures
import networkx
import testtools
import retworkx
def match_dict_to_set(match):
return {(u, v) for (u, v) in set(map(frozenset, match.items()))}
class TestMaxWeightMatching(testtools.TestCase):
def setUp(self):
super().setUp()
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
def compare_match_sets(self, rx_match, expected_match):
for (u, v) in rx_match:
if (u, v) not in expected_match and \
(v, u) not in expected_match:
self.fail("Element %s and it's reverse %s not found in "
"expected output.\nretworkx output: %s\nexpected "
"output: %s" % (
(u, v), (v, u), rx_match, expected_match))
def compare_rx_nx_sets(self, rx_graph, rx_matches, nx_matches, seed,
nx_graph):
def get_rx_weight(edge):
weight = rx_graph.get_edge_data(*edge)
if weight is None:
return 1
return weight
def get_nx_weight(edge):
weight = nx_graph.get_edge_data(*edge)
if not weight:
return 1
return weight['weight']
not_match = False
for (u, v) in rx_matches:
if (u, v) not in nx_matches:
if (v, u) not in nx_matches:
print("seed %s failed. Element %s and it's "
"reverse %s not found in networkx output.\nretworkx"
" output: %s\nnetworkx output: %s\nedge list: %s\n"
"falling back to checking for a valid solution" % (
seed, (u, v), (v, u), rx_matches,
nx_matches, list(rx_graph.weighted_edge_list())))
not_match = True
break
if not_match:
self.assertTrue(retworkx.is_matching(rx_graph, rx_matches),
"%s is not a valid matching" % rx_matches)
self.assertTrue(retworkx.is_maximal_matching(rx_graph, rx_matches),
"%s is not a maximal matching" % rx_matches)
self.assertEqual(sum(map(get_rx_weight, rx_matches)),
sum(map(get_nx_weight, nx_matches)))
def test_empty_graph(self):
graph = retworkx.PyGraph()
self.assertEqual(retworkx.max_weight_matching(graph), set())
def test_single_edge(self):
graph = retworkx.PyGraph()
graph.add_nodes_from([0, 1])
graph.add_edges_from([(0, 1, 1)])
self.compare_match_sets(
retworkx.max_weight_matching(graph, verify_optimum=True),
{(0, 1), })
def test_single_edge_no_verification(self):
graph = retworkx.PyGraph()
graph.add_nodes_from([0, 1])
graph.add_edges_from([(0, 1, 1)])
self.compare_match_sets(
retworkx.max_weight_matching(graph, verify_optimum=False),
{(0, 1), })
def test_single_self_edge(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([(0, 0, 100)])
self.assertEqual(retworkx.max_weight_matching(graph), set())
def test_small_graph(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([(1, 2, 10), (2, 3, 11)])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
{(2, 3), })
def test_path_graph(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list(
[(1, 2, 5), (2, 3, 11), (3, 4, 5)])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
{(2, 3), })
self.compare_match_sets(
retworkx.max_weight_matching(graph, True, weight_fn=lambda x: x,
verify_optimum=True),
{(1, 2), (3, 4)})
def test_negative_weights(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([
(1, 2, 2),
(1, 3, -2),
(2, 3, 1),
(2, 4, -1),
(3, 4, -6),
])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
{(1, 2), })
self.compare_match_sets(
retworkx.max_weight_matching(graph, True, weight_fn=lambda x: x,
verify_optimum=True),
{(1, 3), (2, 4)})
def test_s_blossom(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([
(0, 1, 8),
(0, 2, 9),
(1, 2, 10),
(2, 3, 7),
])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
{(0, 1), (2, 3)})
graph.extend_from_weighted_edge_list([(0, 5, 5), (3, 4, 6)])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
{(0, 5), (1, 2), (3, 4)})
def test_s_t_blossom(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([
(1, 2, 9),
(1, 3, 8),
(2, 3, 10),
(1, 4, 5),
(4, 5, 4),
(1, 6, 3),
])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
{(1, 6), (2, 3), (4, 5)})
graph.remove_edge(1, 6)
graph.remove_edge(4, 5)
graph.extend_from_weighted_edge_list([(4, 5, 3), (1, 6, 4)])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
{(1, 6), (2, 3), (4, 5)})
graph.remove_edge(1, 6)
graph.add_edge(3, 6, 4)
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
{(1, 2), (3, 6), (4, 5)})
def test_s_t_blossom_with_removed_nodes(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([
(1, 2, 9),
(1, 3, 8),
(2, 3, 10),
(1, 4, 5),
(4, 5, 4),
(1, 6, 3),
])
node_id = graph.add_node(None)
graph.remove_node(5)
graph.add_edge(4, node_id, 4)
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
{(1, 6), (2, 3), (4, 7)})
graph.remove_edge(1, 6)
graph.remove_edge(4, 7)
graph.extend_from_weighted_edge_list([(4, node_id, 3), (1, 6, 4)])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
{(1, 6), (2, 3), (4, 7)})
graph.remove_edge(1, 6)
graph.add_edge(3, 6, 4)
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
{(1, 2), (3, 6), (4, 7)})
def test_nested_s_blossom(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([
(1, 2, 9),
(1, 3, 9),
(2, 3, 10),
(2, 4, 8),
(3, 5, 8),
(4, 5, 10),
(5, 6, 6),
])
expected = {(1, 3), (2, 4), (5, 6)}
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
expected)
def test_nested_s_blossom_relabel(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([
(1, 2, 10),
(1, 7, 10),
(2, 3, 12),
(3, 4, 20),
(3, 5, 20),
(4, 5, 25),
(5, 6, 10),
(6, 7, 10),
(7, 8, 8),
])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
{(1, 2), (3, 4), (5, 6), (7, 8)})
def test_nested_s_blossom_expand(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([
(1, 2, 8),
(1, 3, 8),
(2, 3, 10),
(2, 4, 12),
(3, 5, 12),
(4, 5, 14),
(4, 6, 12),
(5, 7, 12),
(6, 7, 14),
(7, 8, 12),
])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
{(1, 2), (3, 5), (4, 6), (7, 8)})
def test_s_blossom_relabel_expand(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([
(1, 2, 23),
(1, 5, 22),
(1, 6, 15),
(2, 3, 25),
(3, 4, 22),
(4, 5, 25),
(4, 8, 14),
(5, 7, 13),
])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
{(1, 6), (2, 3), (4, 8), (5, 7)})
def test_nested_s_blossom_relabel_expand(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([
(1, 2, 19),
(1, 3, 20),
(1, 8, 8),
(2, 3, 25),
(2, 4, 18),
(3, 5, 18),
(4, 5, 13),
(4, 7, 7),
(5, 6, 7),
])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
match_dict_to_set(
{1: 8, 2: 3, 3: 2, 4: 7, 5: 6, 6: 5, 7: 4, 8: 1}))
def test_blossom_relabel_multiple_paths(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([
(1, 2, 45),
(1, 5, 45),
(2, 3, 50),
(3, 4, 45),
(4, 5, 50),
(1, 6, 30),
(3, 9, 35),
(4, 8, 35),
(5, 7, 26),
(9, 10, 5),
])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
match_dict_to_set(
{1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4, 9: 10,
10: 9}))
def test_blossom_relabel_multiple_path_alternate(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([
(1, 2, 45),
(1, 5, 45),
(2, 3, 50),
(3, 4, 45),
(4, 5, 50),
(1, 6, 30),
(3, 9, 35),
(4, 8, 26),
(5, 7, 40),
(9, 10, 5),
])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
match_dict_to_set(
{1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4, 9: 10,
10: 9}))
def test_blossom_relabel_multiple_paths_least_slack(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([
(1, 2, 45),
(1, 5, 45),
(2, 3, 50),
(3, 4, 45),
(4, 5, 50),
(1, 6, 30),
(3, 9, 35),
(4, 8, 28),
(5, 7, 26),
(9, 10, 5),
])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
match_dict_to_set(
{1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4, 9: 10,
10: 9}))
def test_nested_blossom_expand_recursively(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([
(1, 2, 40),
(1, 3, 40),
(2, 3, 60),
(2, 4, 55),
(3, 5, 55),
(4, 5, 50),
(1, 8, 15),
(5, 7, 30),
(7, 6, 10),
(8, 10, 10),
(4, 9, 30),
])
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
match_dict_to_set(
{1: 2, 2: 1, 3: 5, 4: 9, 5: 3, 6: 7, 7: 6, 8: 10, 9: 4,
10: 8}))
def test_nested_blossom_augmented(self):
graph = retworkx.PyGraph()
graph.extend_from_weighted_edge_list([
(1, 2, 45),
(1, 7, 45),
(2, 3, 50),
(3, 4, 45),
(4, 5, 95),
(4, 6, 94),
(5, 6, 94),
(6, 7, 50),
(1, 8, 30),
(3, 11, 35),
(5, 9, 36),
(7, 10, 26),
(11, 12, 5),
])
expected = {
1: 8,
2: 3,
3: 2,
4: 6,
5: 9,
6: 4,
7: 10,
8: 1,
9: 5,
10: 7,
11: 12,
12: 11,
}
self.compare_match_sets(
retworkx.max_weight_matching(graph, weight_fn=lambda x: x,
verify_optimum=True),
match_dict_to_set(expected))
def test_gnp_random_against_networkx(self):
for i in range(1024):
# TODO: add back subTest usage on new testtools release
rx_graph = retworkx.undirected_gnp_random_graph(10, .75,
seed=42 + i)
nx_graph = networkx.Graph(list(rx_graph.edge_list()))
nx_matches = networkx.max_weight_matching(nx_graph)
rx_matches = retworkx.max_weight_matching(rx_graph,
verify_optimum=True)
self.compare_rx_nx_sets(rx_graph, rx_matches, nx_matches,
42 + i, nx_graph)
def test_gnp_random_against_networkx_with_weight(self):
for i in range(1024):
# TODO: add back subTest usage on new testtools release
random.seed(i)
rx_graph = retworkx.undirected_gnp_random_graph(10, .75,
seed=42 + i)
for edge in rx_graph.edge_list():
rx_graph.update_edge(*edge, random.randint(0, 5000))
nx_graph = networkx.Graph(
[(x[0], x[1],
{'weight': x[2]}) for x in rx_graph.weighted_edge_list()])
nx_matches = networkx.max_weight_matching(nx_graph)
rx_matches = retworkx.max_weight_matching(
rx_graph, weight_fn=lambda x: x, verify_optimum=True)
self.compare_rx_nx_sets(rx_graph, rx_matches, nx_matches,
42 + i, nx_graph)
def test_gnp_random_against_networkx_with_negative_weight(self):
for i in range(1024):
# TODO: add back subTest usage on new testtools release
random.seed(i)
rx_graph = retworkx.undirected_gnp_random_graph(10, .75,
seed=42 + i)
for edge in rx_graph.edge_list():
rx_graph.update_edge(*edge, random.randint(-5000, 5000))
nx_graph = networkx.Graph(
[(x[0], x[1],
{'weight': x[2]}) for x in rx_graph.weighted_edge_list()])
nx_matches = networkx.max_weight_matching(nx_graph)
rx_matches = retworkx.max_weight_matching(
rx_graph, weight_fn=lambda x: x, verify_optimum=True)
self.compare_rx_nx_sets(rx_graph, rx_matches, nx_matches,
42 + i, nx_graph)
def test_gnp_random_against_networkx_max_cardinality(self):
rx_graph = retworkx.undirected_gnp_random_graph(10, .78, seed=428)
nx_graph = networkx.Graph(list(rx_graph.edge_list()))
nx_matches = networkx.max_weight_matching(
nx_graph, maxcardinality=True)
rx_matches = retworkx.max_weight_matching(
rx_graph, max_cardinality=True, verify_optimum=True)
self.compare_rx_nx_sets(rx_graph, rx_matches, nx_matches, 428,
nx_graph)
def test_gnp_random_against_networkx_with_weight_max_cardinality(self):
for i in range(1024):
# TODO: add back subTest usage on new testtools release
random.seed(i)
rx_graph = retworkx.undirected_gnp_random_graph(10, .75,
seed=42 + i)
for edge in rx_graph.edge_list():
rx_graph.update_edge(*edge, random.randint(0, 5000))
nx_graph = networkx.Graph(
[(x[0], x[1],
{'weight': x[2]}) for x in rx_graph.weighted_edge_list()])
nx_matches = networkx.max_weight_matching(nx_graph,
maxcardinality=True)
rx_matches = retworkx.max_weight_matching(
rx_graph, weight_fn=lambda x: x, max_cardinality=True,
verify_optimum=True)
self.compare_rx_nx_sets(rx_graph, rx_matches, nx_matches,
42 + i, nx_graph)
def test_gnp_random__networkx_with_negative_weight_max_cardinality(self):
for i in range(1024):
# TODO: add back subTest usage on new testtools release
random.seed(i)
rx_graph = retworkx.undirected_gnp_random_graph(10, .75,
seed=42 + i)
for edge in rx_graph.edge_list():
rx_graph.update_edge(*edge, random.randint(-5000, 5000))
nx_graph = networkx.Graph(
[(x[0], x[1],
{'weight': x[2]}) for x in rx_graph.weighted_edge_list()])
nx_matches = networkx.max_weight_matching(nx_graph,
maxcardinality=True)
rx_matches = retworkx.max_weight_matching(
rx_graph, weight_fn=lambda x: x, max_cardinality=True,
verify_optimum=True)
self.compare_rx_nx_sets(rx_graph, rx_matches, nx_matches,
42 + i, nx_graph)
def test_gnm_random_against_networkx(self):
rx_graph = retworkx.undirected_gnm_random_graph(10, 13, seed=42)
nx_graph = networkx.Graph(list(rx_graph.edge_list()))
nx_matches = networkx.max_weight_matching(nx_graph)
rx_matches = retworkx.max_weight_matching(rx_graph,
verify_optimum=True)
self.compare_rx_nx_sets(rx_graph, rx_matches, nx_matches, 42,
nx_graph)
def test_gnm_random_against_networkx_max_cardinality(self):
rx_graph = retworkx.undirected_gnm_random_graph(10, 12, seed=42)
nx_graph = networkx.Graph(list(rx_graph.edge_list()))
nx_matches = networkx.max_weight_matching(
nx_graph, maxcardinality=True)
rx_matches = retworkx.max_weight_matching(
rx_graph, max_cardinality=True, verify_optimum=True)
self.compare_rx_nx_sets(rx_graph, rx_matches, nx_matches, 42,
nx_graph)
| 39.098901
| 127
| 0.499719
| 2,609
| 21,348
| 3.835952
| 0.08969
| 0.031475
| 0.073042
| 0.08743
| 0.780176
| 0.759193
| 0.728018
| 0.717126
| 0.71213
| 0.696443
| 0
| 0.068889
| 0.387343
| 21,348
| 545
| 128
| 39.170642
| 0.696307
| 0.045531
| 0
| 0.602062
| 0
| 0
| 0.019551
| 0
| 0
| 0
| 0
| 0.001835
| 0.010309
| 1
| 0.070103
| false
| 0
| 0.010309
| 0.002062
| 0.092784
| 0.002062
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
738d39af604cf65717cfc82c5ce2410825a47159
| 120
|
py
|
Python
|
recommendations/admin.py
|
ImmaculateObsession/nest
|
8f384e8847ea2e0d639b4defef11d4b226e44461
|
[
"MIT"
] | 1
|
2015-04-09T01:30:38.000Z
|
2015-04-09T01:30:38.000Z
|
recommendations/admin.py
|
ImmaculateObsession/nest
|
8f384e8847ea2e0d639b4defef11d4b226e44461
|
[
"MIT"
] | null | null | null |
recommendations/admin.py
|
ImmaculateObsession/nest
|
8f384e8847ea2e0d639b4defef11d4b226e44461
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from recommendations.models import Recommendation
admin.site.register(Recommendation)
| 24
| 49
| 0.866667
| 14
| 120
| 7.428571
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 120
| 4
| 50
| 30
| 0.945455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
73faac9f2799960158df3c7cc269cc26d34693db
| 12,454
|
py
|
Python
|
CA1_plasticity/model/stimulation_protocols.py
|
tomko-neuron/CA1_plasticity
|
45e3734dbc0f404c1f85e6b766d32bd43b0d39e8
|
[
"MIT"
] | null | null | null |
CA1_plasticity/model/stimulation_protocols.py
|
tomko-neuron/CA1_plasticity
|
45e3734dbc0f404c1f85e6b766d32bd43b0d39e8
|
[
"MIT"
] | null | null | null |
CA1_plasticity/model/stimulation_protocols.py
|
tomko-neuron/CA1_plasticity
|
45e3734dbc0f404c1f85e6b766d32bd43b0d39e8
|
[
"MIT"
] | null | null | null |
"""
Title: stimulation_protocols.py
Author: Matus Tomko
Mail: matus.tomko __at__ fmph.uniba.sk
"""
import numpy as np
from neuron import h
from CA1_plasticity.model.utils import Synapse
class StimulationProtocol:
"""
A class used to set a stimulation protocol
...
Attributes
----------
setting : dict
the setting dictionary
net_cons : list
the list of neuron.hoc.NetCons
ppStims : list
the list of neuron.hoc.spGen2s
vec_stims : list
the list of neuron.hoc.VecStims
Methods
-------
create_VecStim(t_vec, synapse)
Creates a vector of stimulus times.
set_Dong_sequential_stimulation(synapses)
Sets the sequential stimulation stimulation protocol using Vecstim objects for Dong et al experiments.
set_Pavlowsky_Alarcon_HFS(synapses)
Sets the HFS stimulation protocol using Vecstim objects for Pavlowsky & Alarcon experiments.
set_Pavlowsky_Alarcon_LFS(synapses)
Sets the LFS stimulation protocol using Vecstim objects for Pavlowsky & Alarcon experiments.
set_Pavlowsky_Alarcon_PP(synapses)
Sets the paired-pulses stimulation protocol using Vecstim objects for Pavlowsky & Alarcon experiments.
set_ppStim(synapses)
Sets the paired-pulses stimulation protocol using spGen2 objects.
set_square_pulse(synapses)
Sets the square pulse stimulation protocol using Vecstim objects.
set_theta_burst(synapses)
Sets the theta burst stimulation protocol using Vecstim objects.
"""
def __init__(self, setting):
"""
Parameters
----------
setting : dict
the dictionary containing setting
"""
self.setting = setting
self.net_cons = []
self.ppStims = []
self.vec_stims = []
def create_VecStim(self, t_vec, synapse, weight):
"""
Creates a vector stream of events for given synapse.
Parameters
----------
t_vec : numpy.ndarray
the time vector
synapse : Synapse
synapse
weight : float
the stimulus weight
"""
vec = h.Vector(t_vec)
vec_stim = h.VecStim()
vec_stim.play(vec)
self.vec_stims.append(vec_stim)
nc = h.NetCon(vec_stim, synapse.synapse, 0, 0, weight)
nc.record(synapse.input_spikes)
self.net_cons.append(nc)
def set_Dong_sequential_stimulation(self, synapses):
"""
Sets the sequential stimulation stimulation protocol using Vecstim objects for Dong et al experiments.
Parameters
----------
synapses : dict
the dictionary containing synapses
"""
for sec in synapses:
for syn in synapses[sec]:
if np.random.rand() < self.setting['protocol']['Dong_SSt']['DONG_STIMULATED_PERC']:
if syn.pathway == 'SCH':
t_start = self.setting['protocol']['Dong_SSt']['DONG_SCH_START'] + np.random.rand()
elif syn.pathway == 'COM':
t_start = self.setting['protocol']['Dong_SSt']['DONG_COM_START'] + np.random.rand()
else:
continue
t_stop = t_start + self.setting['protocol']['Dong_SSt']['DONG_PULSES_NUM'] * \
self.setting['protocol']['Dong_SSt']['DONG_INTERPULSE_INTERVAL']
t_vec = np.arange(t_start, t_stop, self.setting['protocol']['Dong_SSt']['DONG_INTERPULSE_INTERVAL'])
self.create_VecStim(t_vec=t_vec,
synapse=syn,
weight=self.setting['protocol']['Dong_SSt']['DONG_WEIGHT'])
syn.stimulated = True
else:
continue
def set_Pavlowsky_Alarcon_HFS(self, synapses):
"""
Sets the HFS stimulation protocol using Vecstim objects for Pavlowsky & Alarcon experiments.
Parameters
----------
synapses : dict
the dictionary containing synapses
"""
for sec in synapses:
for syn in synapses[sec]:
if np.random.rand() < self.setting['protocol']['Pavlowsky_Alarcon']['HFS_STIMULATED_PERC']:
t_start = self.setting['protocol']['Pavlowsky_Alarcon']['HFS_START'] + np.random.rand()
t_vec = np.zeros(0)
for i in range(self.setting['protocol']['Pavlowsky_Alarcon']['HFS_TRAINS_NUM']):
vec = np.arange(t_start, t_start + 1000, 10)
t_vec = np.concatenate((t_vec, vec), axis=0)
t_start = t_start + 1000 + self.setting['protocol']['Pavlowsky_Alarcon']['HFS_INTERTRAIN']
self.create_VecStim(t_vec=t_vec,
synapse=syn,
weight=self.setting['protocol']['Pavlowsky_Alarcon']['HFS_WEIGHT'])
syn.stimulated = True
else:
continue
def set_Pavlowsky_Alarcon_LFS(self, synapses):
"""
Sets the LFS stimulation protocol using Vecstim objects for Pavlowsky & Alarcon experiments.
Parameters
----------
synapses : dict
the dictionary containing synapses
"""
for sec in synapses:
for syn in synapses[sec]:
if np.random.rand() < self.setting['protocol']['Pavlowsky_Alarcon']['LFS_STIMULATED_PERC']:
t_start = self.setting['protocol']['Pavlowsky_Alarcon']['LFS_START'] + np.random.rand()
t_vec = np.zeros(0)
vec = np.arange(t_start,
t_start + self.setting['protocol']['Pavlowsky_Alarcon']['LFS_STIM_LEN'],
1000)
t_vec = np.concatenate((t_vec, vec), axis=0)
self.create_VecStim(t_vec=t_vec,
synapse=syn,
weight=self.setting['protocol']['Pavlowsky_Alarcon']['LFS_WEIGHT'])
syn.stimulated = True
else:
continue
def set_Pavlowsky_Alarcon_PP(self, synapses):
"""
Sets the paired-pulses stimulation protocol using Vecstim objects for Pavlowsky & Alarcon experiments.
Parameters
----------
synapses : dict
the dictionary containing synapses
"""
for sec in synapses:
for syn in synapses[sec]:
if np.random.rand() < self.setting['protocol']['Pavlowsky_Alarcon']['PP_STIMULATED_PERC']:
t_start = self.setting['protocol']['Pavlowsky_Alarcon']['PP_START'] + np.random.rand()
t_vec = np.zeros(0)
for i in range(self.setting['protocol']['Pavlowsky_Alarcon']['PP_NUM']):
vec = [t_start, t_start + 50]
t_vec = np.concatenate((t_vec, vec), axis=0)
t_start = t_start + 1000
self.create_VecStim(t_vec=t_vec,
synapse=syn,
weight=self.setting['protocol']['Pavlowsky_Alarcon']['PP_WEIGHT'])
syn.stimulated = True
else:
continue
def set_ppStim(self, synapses):
"""
Sets the paired-pulses stimulation protocol using spGen2 objects.
Parameters
----------
synapses : dict
the dictionary containing synapses
"""
for sec in synapses:
for syn in synapses[sec]:
ppStim = h.SpGen2(0.5)
ppStim.APinburst = self.setting['protocol']['theta_burst']['PP_STIM_APINBURST']
ppStim.t01 = self.setting['protocol']['theta_burst']['PP_STIM_T01']
self.ppStims.append(ppStim)
if syn.receptor == 'AMPA':
nc = h.NetCon(ppStim, syn.synapse, 0, 0, self.setting['protocol']['theta_burst']['PP_WEIGHT'])
self.net_cons.append(nc)
syn.weight_vec.record(nc._ref_weight[1], self.setting['simulation']['RECORDING_STEP'])
nc.record(syn.input_spikes)
elif syn.receptor == 'NMDA':
nc = h.NetCon(ppStim, syn.synapse, 0, 0, self.setting['protocol']['theta_burst']['PP_WEIGHT'])
self.net_cons.append(nc)
nc.record(syn.input_spikes)
syn.stimulated = True
def set_square_pulse(self, synapses):
"""
Sets the square pulse stimulation protocol using Vecstim objects.
Parameters
----------
synapses : dict
the dictionary containing synapses
"""
for sec in synapses:
for syn in synapses[sec]:
t_start = self.setting['protocol']['square_pulses']['SQ_START'] + np.random.rand()
t_vec = np.zeros(0)
for i in range(self.setting['protocol']['square_pulses']['SQ_PULSES_NUM']):
pulse_vec = np.arange(t_start,
self.setting['protocol']['square_pulses']['SQ_INTERSPIKE_INTERVAL'] *
self.setting['protocol']['square_pulses']['SQ_STIMULI_NUM'] + t_start,
self.setting['protocol']['square_pulses']['SQ_INTERSPIKE_INTERVAL'])
t_vec = np.concatenate((t_vec, pulse_vec), axis=0)
t_start = t_start + self.setting['protocol']['square_pulses']['SQ_INTERSPIKE_INTERVAL'] * \
self.setting['protocol']['square_pulses']['SQ_STIMULI_NUM'] + \
self.setting['protocol']['square_pulses']['SQ_INTERPULSE_INTERVAL']
self.create_VecStim(t_vec=t_vec,
synapse=syn,
weight=self.setting['protocol']['square_pulses']['SQ_WEIGHT'])
syn.stimulated = True
def set_theta_burst(self, synapses):
"""
Sets the theta burst stimulation protocol using Vecstim objects.
Parameters
----------
synapses : dict
the dictionary containing synapses
"""
for sec in synapses:
for syn in synapses[sec]:
t_start = self.setting['protocol']['theta_burst']['TB_START'] + np.random.rand()
t_vec = np.zeros(0)
for i in range(self.setting['protocol']['theta_burst']['TB_BURSTS_NUM']):
t_stop = t_start + 1 + (self.setting['protocol']['theta_burst']['TB_STIMULI_NUM'] - 1) * \
self.setting['protocol']['theta_burst']['TB_INTERSPIKE_INTERVAL']
burst_vec = np.arange(t_start,
t_stop,
self.setting['protocol']['theta_burst']['TB_INTERSPIKE_INTERVAL'])
t_vec = np.concatenate((t_vec, burst_vec), axis=0)
t_start = t_start + (
self.setting['protocol']['theta_burst']['TB_STIMULI_NUM'] - 1) * \
self.setting['protocol']['theta_burst']['TB_INTERSPIKE_INTERVAL'] + \
self.setting['protocol']['theta_burst']['TB_INTERBURST_INTERVAL']
vec = h.Vector(t_vec)
vec_stim = h.VecStim()
vec_stim.play(vec)
self.vec_stims.append(vec_stim)
if syn.receptor == 'AMPA':
nc = h.NetCon(vec_stim, syn.synapse, 0, 0, self.setting['protocol']['theta_burst']['TB_WEIGHT'])
self.net_cons.append(nc)
# syn.weight_vec.record(nc._ref_weight[1], self.setting['simulation']['RECORDING_STEP'])
nc.record(syn.input_spikes)
elif syn.receptor == 'NMDA':
nc = h.NetCon(vec_stim, syn.synapse, 0, 0, self.setting['protocol']['theta_burst']['TB_WEIGHT'])
self.net_cons.append(nc)
nc.record(syn.input_spikes)
syn.stimulated = True
| 44.960289
| 120
| 0.539345
| 1,301
| 12,454
| 4.960799
| 0.114527
| 0.080105
| 0.126588
| 0.052061
| 0.818872
| 0.806632
| 0.767586
| 0.745119
| 0.690425
| 0.654633
| 0
| 0.006932
| 0.351293
| 12,454
| 276
| 121
| 45.123188
| 0.79193
| 0.222419
| 0
| 0.5
| 0
| 0
| 0.173277
| 0.024817
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060811
| false
| 0
| 0.02027
| 0
| 0.087838
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
73fb6ddaf6c57935ea47212fd3335479769084c0
| 39
|
py
|
Python
|
test/lmp/script/__init__.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
test/lmp/script/__init__.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
test/lmp/script/__init__.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
"""Test :py:mod:`lmp.script` entry."""
| 19.5
| 38
| 0.589744
| 6
| 39
| 3.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.638889
| 0.820513
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fb5688ef43fb6f9010d86b9c0fe03f593d0e3062
| 2,023
|
py
|
Python
|
book/migrations/0012_auto_20220308_1322.py
|
Hamed-D3/Mahi-Goli
|
7cfea8a84e70a5f0c8140e9e8fe97d3b3eec204c
|
[
"MIT"
] | null | null | null |
book/migrations/0012_auto_20220308_1322.py
|
Hamed-D3/Mahi-Goli
|
7cfea8a84e70a5f0c8140e9e8fe97d3b3eec204c
|
[
"MIT"
] | null | null | null |
book/migrations/0012_auto_20220308_1322.py
|
Hamed-D3/Mahi-Goli
|
7cfea8a84e70a5f0c8140e9e8fe97d3b3eec204c
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.10 on 2022-03-08 09:52
import book.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0011_auto_20220226_2222'),
]
operations = [
migrations.AlterField(
model_name='audiobook',
name='book_file',
field=models.FileField(blank=True, null=True, upload_to='audiobook_files/', validators=[book.models.audiobook_file_format_validator]),
),
migrations.AlterField(
model_name='audiobook',
name='picture',
field=models.ImageField(blank=True, upload_to='images/books/', verbose_name='تصویر جلد'),
),
migrations.AlterField(
model_name='author',
name='avatar',
field=models.ImageField(blank=True, upload_to='images/person/', verbose_name='عکس پروفایل'),
),
migrations.AlterField(
model_name='electronicbook',
name='book_file',
field=models.FileField(blank=True, null=True, upload_to='electronicbook_files/', validators=[book.models.ebook_file_format_validator]),
),
migrations.AlterField(
model_name='electronicbook',
name='picture',
field=models.ImageField(blank=True, upload_to='images/books/', verbose_name='تصویر جلد'),
),
migrations.AlterField(
model_name='physicalbook',
name='picture',
field=models.ImageField(blank=True, upload_to='images/books/', verbose_name='تصویر جلد'),
),
migrations.AlterField(
model_name='teller',
name='avatar',
field=models.ImageField(blank=True, upload_to='images/person/', verbose_name='عکس پروفایل'),
),
migrations.AlterField(
model_name='translator',
name='avatar',
field=models.ImageField(blank=True, upload_to='images/person/', verbose_name='عکس پروفایل'),
),
]
| 36.781818
| 147
| 0.608008
| 204
| 2,023
| 5.857843
| 0.289216
| 0.133891
| 0.167364
| 0.194142
| 0.761506
| 0.761506
| 0.698745
| 0.61841
| 0.61841
| 0.61841
| 0
| 0.02152
| 0.264953
| 2,023
| 54
| 148
| 37.462963
| 0.782112
| 0.022739
| 0
| 0.708333
| 1
| 0
| 0.173165
| 0.022278
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.041667
| 0
| 0.104167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fb82329c0c2b86d8605552e2150de44ca3272306
| 51
|
py
|
Python
|
tinyurl_ga/__init__.py
|
tinyurl-ga/tinyurl-ga.py
|
3fd49c59b58abeb449fe549b5b6a566d2edbd7ef
|
[
"MIT"
] | null | null | null |
tinyurl_ga/__init__.py
|
tinyurl-ga/tinyurl-ga.py
|
3fd49c59b58abeb449fe549b5b6a566d2edbd7ef
|
[
"MIT"
] | null | null | null |
tinyurl_ga/__init__.py
|
tinyurl-ga/tinyurl-ga.py
|
3fd49c59b58abeb449fe549b5b6a566d2edbd7ef
|
[
"MIT"
] | null | null | null |
from .main import create
from .console import main
| 17
| 25
| 0.803922
| 8
| 51
| 5.125
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 51
| 2
| 26
| 25.5
| 0.953488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fb85746eef45b50cd307300ee0000373159e11e1
| 2,621
|
py
|
Python
|
Sleep_Dataset.py
|
hebinalee/sleep_AI_challenge_2021
|
47cd8b825abb4c712c536605ead8b0f51ff19ac0
|
[
"FSFAP"
] | null | null | null |
Sleep_Dataset.py
|
hebinalee/sleep_AI_challenge_2021
|
47cd8b825abb4c712c536605ead8b0f51ff19ac0
|
[
"FSFAP"
] | null | null | null |
Sleep_Dataset.py
|
hebinalee/sleep_AI_challenge_2021
|
47cd8b825abb4c712c536605ead8b0f51ff19ac0
|
[
"FSFAP"
] | null | null | null |
#######################################################
## READ DATA FROM FILENAMES AND SAVE IMAGE AND LABELS
## Sleep_Dataset : for train dataset
## Sleep_Test_Dataset: for test dataset
#######################################################
import os
import random
import pandas as pd
import numpy as np
import cv2
from PIL import Image
import torch
from torch.utils.data import Dataset
from utils import crop_image
class Sleep_Dataset(Dataset):
def __init__(self, csv_file, transform,mode='origin', data_root_dir='/DATA/'):
self.mode = mode
self.data_root_dir = data_root_dir
self.transform = transform
self.data = pd.read_csv(csv_file, header=None)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
file_path = self.data_root_dir + self.data[0][idx] +'/'+ self.data[1][idx]
target = self.data[2][idx]
#print(file_path, target)
target = self._target_label(target)
if not os.path.exists(file_path):
print('dose not exist '+file_path)
image = cv2.imread(file_path, cv2.IMREAD_UNCHANGED)
image = crop_image(image, mode = self.mode)
sample = {'image':image, 'label':target}
if self.transform:
sample['image'] = self.transform(sample['image'])
return sample
def _target_label(self,target):
if target == 'Wake' : return 0
if target == 'N1' : return 1
if target == 'N2' : return 2
if target == 'N3' : return 3
if target == 'REM' : return 4
class Sleep_Test_Dataset(Dataset):
def __init__(self, csv_file, transform, mode='origin', data_root_dir='/DATA/'):
self.mode= mode
self.data_root_dir = data_root_dir
self.transform = transform
self.data = pd.read_csv(csv_file, header=None)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
file_path = self.data_root_dir + self.data[0][idx] +'/'+ self.data[1][idx]
#target = self.data[2][idx]
#print(file_path, target)
#target = self._target_label(target)
if not os.path.exists(file_path):
print('dose not exist '+file_path)
image = cv2.imread(file_path, cv2.IMREAD_UNCHANGED)
image = crop_image(image, mode = self.mode)
sample = {'image':image, 'code':self.data[0][idx], 'num':self.data[1][idx]}
if self.transform:
sample['image'] = self.transform(sample['image'])
return sample
| 30.835294
| 84
| 0.580694
| 332
| 2,621
| 4.364458
| 0.201807
| 0.088337
| 0.060732
| 0.041408
| 0.709455
| 0.709455
| 0.709455
| 0.709455
| 0.709455
| 0.709455
| 0
| 0.010909
| 0.265548
| 2,621
| 84
| 85
| 31.202381
| 0.741818
| 0.08966
| 0
| 0.603774
| 0
| 0
| 0.050894
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132075
| false
| 0
| 0.169811
| 0.037736
| 0.415094
| 0.037736
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fb8b6494ad78fb90b0ba695190ae36197b0dc3f3
| 168
|
py
|
Python
|
evaluators/dialog/state/__init__.py
|
kaniblu/vhda
|
35941097ef552568c29f66cc55d8ce1927f34978
|
[
"MIT"
] | 3
|
2021-01-12T05:43:20.000Z
|
2021-03-05T17:03:06.000Z
|
evaluators/dialog/state/__init__.py
|
kaniblu/vhda
|
35941097ef552568c29f66cc55d8ce1927f34978
|
[
"MIT"
] | null | null | null |
evaluators/dialog/state/__init__.py
|
kaniblu/vhda
|
35941097ef552568c29f66cc55d8ce1927f34978
|
[
"MIT"
] | null | null | null |
from .entropy import StateEntropyEvaluator
from .distinct import DistinctStateEvaluator
from .count import StateCountEvaluator
from .novel import StateNoveltyEvaluator
| 33.6
| 44
| 0.880952
| 16
| 168
| 9.25
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 168
| 4
| 45
| 42
| 0.973684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fb99a333d2d39d355bb4818b10147a45129ed2a2
| 72
|
py
|
Python
|
fixture/navigation.py
|
TestQA14/python_training
|
7b1c8e6925b274ed05d154a3d434c5ea704595bb
|
[
"Apache-2.0"
] | null | null | null |
fixture/navigation.py
|
TestQA14/python_training
|
7b1c8e6925b274ed05d154a3d434c5ea704595bb
|
[
"Apache-2.0"
] | null | null | null |
fixture/navigation.py
|
TestQA14/python_training
|
7b1c8e6925b274ed05d154a3d434c5ea704595bb
|
[
"Apache-2.0"
] | null | null | null |
class PageHelper:
def __init__(self, app):
self.app = app
| 12
| 28
| 0.597222
| 9
| 72
| 4.333333
| 0.666667
| 0.358974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.305556
| 72
| 5
| 29
| 14.4
| 0.78
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
fb9c0270288c89c5db40e99d932432773f34e160
| 217
|
py
|
Python
|
lib/oeqa/runtime/cases/rubygems_rubygems_google_apis_core.py
|
tuxable-ltd/meta-rubygems
|
e80630e79b64e1be8339e1add0ab07644ec99425
|
[
"BSD-2-Clause"
] | null | null | null |
lib/oeqa/runtime/cases/rubygems_rubygems_google_apis_core.py
|
tuxable-ltd/meta-rubygems
|
e80630e79b64e1be8339e1add0ab07644ec99425
|
[
"BSD-2-Clause"
] | 141
|
2021-02-04T16:22:13.000Z
|
2022-03-27T08:29:40.000Z
|
lib/oeqa/runtime/cases/rubygems_rubygems_google_apis_core.py
|
tuxable-ltd/meta-rubygems
|
e80630e79b64e1be8339e1add0ab07644ec99425
|
[
"BSD-2-Clause"
] | 3
|
2021-02-04T14:02:01.000Z
|
2022-02-02T16:46:52.000Z
|
from rubygems_utils import RubyGemsTestUtils
class RubyGemsTestrubygems_google_apis_core(RubyGemsTestUtils):
def test_gem_list_rubygems_google_apis_core(self):
self.gem_is_installed("google-apis-core")
| 27.125
| 63
| 0.829493
| 27
| 217
| 6.222222
| 0.62963
| 0.178571
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110599
| 217
| 7
| 64
| 31
| 0.870466
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
fb9ff1dd029299b8da9dcbf6d6b7b19daf046a96
| 138
|
py
|
Python
|
leetcode/231-Power-of-Two/PowofTwo_003.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1
|
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/231-Power-of-Two/PowofTwo_003.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1
|
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/231-Power-of-Two/PowofTwo_003.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2
|
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution:
# @param {integer} n
# @return {boolean}
def isPowerOfTwo(self, n):
return n > 0 and (n & n - 1 is 0)
| 23
| 41
| 0.550725
| 20
| 138
| 3.8
| 0.7
| 0.184211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031915
| 0.318841
| 138
| 5
| 42
| 27.6
| 0.776596
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
fb9ffd857183433887413e3e53fb05652351a0aa
| 51
|
py
|
Python
|
exunit/exunit_print.py
|
Dania02525/TestExUnit
|
874aeb299d34ce7471a3a3552fcc00827c91d547
|
[
"MIT"
] | null | null | null |
exunit/exunit_print.py
|
Dania02525/TestExUnit
|
874aeb299d34ce7471a3a3552fcc00827c91d547
|
[
"MIT"
] | null | null | null |
exunit/exunit_print.py
|
Dania02525/TestExUnit
|
874aeb299d34ce7471a3a3552fcc00827c91d547
|
[
"MIT"
] | null | null | null |
def exunit_print(*args):
print("ExUnit:", *args)
| 17
| 25
| 0.666667
| 7
| 51
| 4.714286
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 51
| 2
| 26
| 25.5
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
fba915f4c4e1f41eae528bcc4a153040106e626a
| 74
|
py
|
Python
|
grid/__init__.py
|
cthorey/Grid
|
f86c117fda882a8c36073cfd872e542bb8fbe07f
|
[
"Apache-2.0"
] | 1
|
2018-03-09T20:49:48.000Z
|
2018-03-09T20:49:48.000Z
|
grid/__init__.py
|
jvmancuso/Grid
|
ce202add2a066eb6a1421d0391646b50e2d7f306
|
[
"Apache-2.0"
] | 1
|
2018-03-28T09:08:28.000Z
|
2018-03-28T09:08:28.000Z
|
grid/__init__.py
|
jvmancuso/Grid
|
ce202add2a066eb6a1421d0391646b50e2d7f306
|
[
"Apache-2.0"
] | null | null | null |
from . import clients
from .channels import torch_listen_for_obj_callback
| 24.666667
| 51
| 0.864865
| 11
| 74
| 5.454545
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 74
| 2
| 52
| 37
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.