hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8485e5b2da6eb4386bce2a0728a86df99eb9e765
| 116
|
py
|
Python
|
example/myapp/ajax.py
|
snogaraleal/adjax
|
01b1a9ee70589c7f2a5a9be3b6b0ae3d1f85c365
|
[
"MIT"
] | 11
|
2015-06-02T16:48:37.000Z
|
2021-07-06T17:58:21.000Z
|
example/myapp/ajax.py
|
chr1043086360/adjax
|
db3b106e201408f4d7746f422c8966152d1e8c5d
|
[
"MIT"
] | null | null | null |
example/myapp/ajax.py
|
chr1043086360/adjax
|
db3b106e201408f4d7746f422c8966152d1e8c5d
|
[
"MIT"
] | 3
|
2016-07-30T18:29:47.000Z
|
2019-11-20T01:15:39.000Z
|
import sys
if sys.version_info >= (3, 0):
from .ajax3 import * # noqa
else:
from .ajax2 import * # noqa
| 14.5
| 32
| 0.603448
| 17
| 116
| 4.058824
| 0.705882
| 0.289855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0.275862
| 116
| 7
| 33
| 16.571429
| 0.77381
| 0.077586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
848fcda0dc21e083917965dc6633b0d2e4509016
| 6,622
|
py
|
Python
|
MNIST/MNISTReader.py
|
ShenJinglong/WFL
|
582b92d818febf363ef0e1fd3c08f6c8b93dc164
|
[
"Apache-2.0"
] | 3
|
2021-11-10T07:49:05.000Z
|
2021-11-19T09:26:12.000Z
|
MNIST/MNISTReader.py
|
ShenJinglong/WFL
|
582b92d818febf363ef0e1fd3c08f6c8b93dc164
|
[
"Apache-2.0"
] | null | null | null |
MNIST/MNISTReader.py
|
ShenJinglong/WFL
|
582b92d818febf363ef0e1fd3c08f6c8b93dc164
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
author: Jalen Shen
"""
import numpy as np
import imageio
class MNISTImageReader():
"""
brief: read image data from .idx3-ubyte file as numpy array
use cases:
# case 1
with MNISTImageReader('t10k-images.idx3-ubyte') as reader:
# the reader was designed as an iterable object.
for index, image in reader:
...
# case 2
reader = MNISTImageReader('t10k-images.idx3-ubyte')
reader.open()
# read 10 images from source file.
# there will be two returned value, the first one is an index list corresponding to returned images,
# the second one is a multi-dimensional numpy array which hold the image data.
index, images = reader.read(10)
reader.close()
# case 3
with MNISTImageReader('t10k-images.idx3-ubyte') as reader:
index, images = reader.read(10) # Of course, you can access images using read() within 'with' context.
"""
_expected_magic = 2051
_current_index = 0
def __init__(self, path):
if not path.endswith('.idx3-ubyte'):
raise NameError("File must be a '.idx3-ubyte' extension")
self.__path = path
self.__file_object = None
def __enter__(self):
self.__file_object = open(self.__path, 'rb')
magic_number = int.from_bytes(self.__file_object.read(4), byteorder='big')
if magic_number != self._expected_magic:
raise TypeError("The File is not a properly formatted .idx3-ubyte file!")
self.__num_of_images = int.from_bytes(self.__file_object.read(4), byteorder='big')
print(f'Total {self.__num_of_images} images ...')
self.__num_of_rows = int.from_bytes(self.__file_object.read(4), byteorder='big')
self.__num_of_cols = int.from_bytes(self.__file_object.read(4), byteorder='big')
return self
def __exit__(self, type, val, tb):
self.__file_object.close()
def __iter__(self):
return self
def __next__(self):
raw_image_data = self.__file_object.read(self.__num_of_rows * self.__num_of_cols)
if self.__file_object is None or raw_image_data == b'':
raise StopIteration
else:
self._current_index += 1
return self._current_index, np.frombuffer(raw_image_data, dtype=np.uint8).reshape((self.__num_of_rows, self.__num_of_cols))
def read(self, num):
feasible_num = num if self.__num_of_images - self._current_index >= num else self.__num_of_images - self._current_index
raw_image_data = self.__file_object.read(self.__num_of_rows * self.__num_of_cols * feasible_num)
index = range(self._current_index + 1, self._current_index + feasible_num + 1)
return index, np.frombuffer(raw_image_data, dtype=np.uint8).reshape((feasible_num, self.__num_of_rows, self.__num_of_cols))
def open(self):
self.__file_object = open(self.__path, 'rb')
magic_number = int.from_bytes(self.__file_object.read(4), byteorder='big')
if magic_number != self._expected_magic:
raise TypeError("The File is not a properly formatted .idx3-ubyte file!")
self.__num_of_images = int.from_bytes(self.__file_object.read(4), byteorder='big')
print(f'Total {self.__num_of_images} images ...')
self.__num_of_rows = int.from_bytes(self.__file_object.read(4), byteorder='big')
self.__num_of_cols = int.from_bytes(self.__file_object.read(4), byteorder='big')
def close(self):
self.__file_object.close()
class MNISTLabelReader():
"""
brief: read label data from .idx1-ubyte file as integer (0, 1, ..., 9)
use cases:
# case 1
with MNISTLabelReader('t10k-labels.idx1-ubyte') as reader:
# the reader was designed as an iterable object.
for index, label in reader:
...
# case 2
reader = MNISTLabelReader('t10k-labels.idx1-ubyte')
reader.open()
# read 10 labels from source file.
# there will be two returned value, the first one is an index list corresponding to returned labels,
# the second one is a numpy array which hold the label data.
index, labels = reader.read(10)
reader.close()
# case 3
with MNISTImageReader('t10k-images.idx3-ubyte') as reader:
index, labels = reader.read(10) # Of course, you can access labels using read() within 'with' context.
"""
_expected_magic = 2049
_current_index = 0
def __init__(self, path):
if not path.endswith('.idx1-ubyte'):
raise NameError("File must be a '.idx1-ubyte' extension")
self.__file_path = path
self.__file_object = None
def __enter__(self):
self.__file_object = open(self.__file_path, 'rb')
magic_number = int.from_bytes(self.__file_object.read(4), byteorder='big')
if magic_number != self._expected_magic:
raise TypeError("The File is not a properly formatted .idx1-ubyte file!")
self.__num_of_labels = int.from_bytes(self.__file_object.read(4), byteorder='big')
print(f'Total {self.__num_of_labels} labels ...')
return self
def __exit__(self, *args, **kwargs):
self.__file_object.close()
def __iter__(self):
return self
def __next__(self):
raw_label = self.__file_object.read(1)
if self.__file_object is None or raw_label == b'':
raise StopIteration
else:
self._current_index += 1
return self._current_index, int.from_bytes(raw_label, byteorder='big')
def read(self, num):
feasible_num = num if self.__num_of_labels - self._current_index >= num else self.__num_of_labels - self._current_index
raw_label_data = self.__file_object.read(feasible_num)
index = range(self._current_index + 1, self._current_index + feasible_num + 1)
return index, np.frombuffer(raw_label_data, dtype=np.uint8).reshape((feasible_num,))
def open(self):
self.__file_object = open(self.__file_path, 'rb')
magic_number = int.from_bytes(self.__file_object.read(4), byteorder='big')
if magic_number != self._expected_magic:
raise TypeError("The File is not a properly formatted .idx1-ubyte file!")
self.__num_of_labels = int.from_bytes(self.__file_object.read(4), byteorder='big')
print(f'Total {self.__num_of_labels} labels ...')
def close(self):
self.__file_object.close()
| 39.416667
| 135
| 0.646481
| 893
| 6,622
| 4.43449
| 0.147816
| 0.062626
| 0.09899
| 0.072727
| 0.890152
| 0.796212
| 0.796212
| 0.720202
| 0.662879
| 0.64899
| 0
| 0.016449
| 0.247206
| 6,622
| 167
| 136
| 39.652695
| 0.777934
| 0.252794
| 0
| 0.704545
| 0
| 0
| 0.108477
| 0.018464
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.022727
| 0.022727
| 0.363636
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ca18f0c907f54cbe036d41899a0ddb575e60a81c
| 37
|
py
|
Python
|
mmdet/changeDetection/model/__init__.py
|
LokeZhou/mydetection
|
516cc5d9839ea78bfd859bd0733f61dd586184b8
|
[
"Apache-2.0"
] | null | null | null |
mmdet/changeDetection/model/__init__.py
|
LokeZhou/mydetection
|
516cc5d9839ea78bfd859bd0733f61dd586184b8
|
[
"Apache-2.0"
] | null | null | null |
mmdet/changeDetection/model/__init__.py
|
LokeZhou/mydetection
|
516cc5d9839ea78bfd859bd0733f61dd586184b8
|
[
"Apache-2.0"
] | null | null | null |
from .cd_mask_rcnn import CDMaskRCNN
| 18.5
| 36
| 0.864865
| 6
| 37
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ca5aad8557b1274a2356a5cdf9f50abbcc7e1040
| 108
|
py
|
Python
|
yaag_mme/errors.py
|
Cutewarriorlover/yaag-mme
|
c2601c0352df120c2252e2f562c40b7f1d244a70
|
[
"MIT"
] | null | null | null |
yaag_mme/errors.py
|
Cutewarriorlover/yaag-mme
|
c2601c0352df120c2252e2f562c40b7f1d244a70
|
[
"MIT"
] | null | null | null |
yaag_mme/errors.py
|
Cutewarriorlover/yaag-mme
|
c2601c0352df120c2252e2f562c40b7f1d244a70
|
[
"MIT"
] | null | null | null |
class PlayerAlreadyHasItemError(Exception):
pass
class PlayerNotInitializedError(Exception):
pass
| 15.428571
| 43
| 0.796296
| 8
| 108
| 10.75
| 0.625
| 0.302326
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 108
| 6
| 44
| 18
| 0.934783
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
0468deb6d4c0ee8f40565f48e7045caf287460c2
| 135
|
py
|
Python
|
release/scripts/presets/camera/Sony_A55.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 365
|
2015-02-10T15:10:55.000Z
|
2022-03-03T15:50:51.000Z
|
release/scripts/presets/camera/Sony_A55.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 45
|
2015-01-09T15:34:20.000Z
|
2021-10-05T14:44:23.000Z
|
release/scripts/presets/camera/Sony_A55.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 172
|
2015-01-25T15:16:53.000Z
|
2022-01-31T08:25:36.000Z
|
import bpy
bpy.context.camera.sensor_width = 23.4
bpy.context.camera.sensor_height = 15.6
bpy.context.camera.sensor_fit = 'HORIZONTAL'
| 27
| 44
| 0.8
| 22
| 135
| 4.772727
| 0.590909
| 0.285714
| 0.457143
| 0.628571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048387
| 0.081481
| 135
| 4
| 45
| 33.75
| 0.798387
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0470ed48fec70b4562401ab1844230f92ab1fcc3
| 188
|
py
|
Python
|
commons/utils/Utils.py
|
GitHubSeyhun/Data-Analytics
|
28e4630e611df96774db2237677fa11a090dc5ca
|
[
"Apache-2.0"
] | null | null | null |
commons/utils/Utils.py
|
GitHubSeyhun/Data-Analytics
|
28e4630e611df96774db2237677fa11a090dc5ca
|
[
"Apache-2.0"
] | null | null | null |
commons/utils/Utils.py
|
GitHubSeyhun/Data-Analytics
|
28e4630e611df96774db2237677fa11a090dc5ca
|
[
"Apache-2.0"
] | null | null | null |
import re
class Utils():
def __init__(self, COMMA_DELIMITER):
COMMA_DELIMITER = re.compile(''',(?=(?:[^"]*"[^"]*")*[^"]*$)''')
self.COMMA_DELIMITER = COMMA_DELIMITER
| 23.5
| 72
| 0.56383
| 18
| 188
| 5.444444
| 0.555556
| 0.571429
| 0.367347
| 0.469388
| 0.653061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18617
| 188
| 7
| 73
| 26.857143
| 0.640523
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 0.148936
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
04ae4c0f87be140c3d28cbc55039b2a077b12785
| 3,040
|
py
|
Python
|
test-framework/test-suites/integration/tests/set/test_set_bootaction_args.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | 123
|
2015-05-12T23:36:45.000Z
|
2017-07-05T23:26:57.000Z
|
test-framework/test-suites/integration/tests/set/test_set_bootaction_args.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | 177
|
2015-06-05T19:17:47.000Z
|
2017-07-07T17:57:24.000Z
|
test-framework/test-suites/integration/tests/set/test_set_bootaction_args.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | 32
|
2015-06-07T02:25:03.000Z
|
2017-06-23T07:35:35.000Z
|
import json
from textwrap import dedent
class TestSetBootactionArgs:
def test_no_args(self, host):
result = host.run('stack set bootaction args')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "action" argument is required
{action} {args=string} [os=string] [type=string]
''')
def test_multiple_args(self, host):
result = host.run('stack set bootaction args test foo')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "action" argument must be unique
{action} {args=string} [os=string] [type=string]
''')
def test_invalid_action(self, host):
result = host.run('stack set bootaction args test type=os args=test')
assert result.rc == 255
assert result.stderr == 'error - action "test" does not exist\n'
def test_no_type(self, host):
result = host.run('stack set bootaction args memtest')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "type" parameter is required
{action} {args=string} [os=string] [type=string]
''')
def test_invalid_type(self, host):
result = host.run('stack set bootaction args memtest type=foo')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "type" parameter must be "os" or "install"
{action} {args=string} [os=string] [type=string]
''')
def test_no_args_parameter(self, host):
result = host.run('stack set bootaction args memtest type=os')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "args" parameter is required
{action} {args=string} [os=string] [type=string]
''')
def test_with_os(self, host):
# Add a test bootaction with an OS
result = host.run('stack add bootaction test type=os os=ubuntu kernel=""')
assert result.rc == 0
# Make sure the action got added
result = host.run('stack list bootaction test output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'args': None,
'bootaction': 'test',
'kernel': None,
'os': 'ubuntu',
'ramdisk': None,
'type': 'os'
}
]
# Set the bootaction args with a specified os
result = host.run(f'stack set bootaction args test type=os os=ubuntu args="test_args"')
assert result.rc == 0
# Make sure the args got set
result = host.run('stack list bootaction test output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'args': 'test_args',
'bootaction': 'test',
'kernel': None,
'os': 'ubuntu',
'ramdisk': None,
'type': 'os'
}
]
def test_os_is_null(self, host):
# Set the bootaction args with a null os
result = host.run('stack set bootaction args memtest type=os args="test_args"')
assert result.rc == 0
# Make sure the action got added
result = host.run('stack list bootaction memtest output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'args': 'test_args',
'bootaction': 'memtest',
'kernel': 'kernel memtest',
'os': None,
'ramdisk': None,
'type': 'os'
}
]
| 28.679245
| 89
| 0.658224
| 422
| 3,040
| 4.689573
| 0.156398
| 0.109146
| 0.078828
| 0.100051
| 0.820616
| 0.796362
| 0.771097
| 0.734209
| 0.734209
| 0.690248
| 0
| 0.009852
| 0.198684
| 3,040
| 105
| 90
| 28.952381
| 0.802545
| 0.067105
| 0
| 0.534884
| 0
| 0
| 0.43761
| 0
| 0
| 0
| 0
| 0
| 0.244186
| 1
| 0.093023
| false
| 0
| 0.023256
| 0
| 0.127907
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b6cad54eca2b52e98020381cbaaa966ce552f76c
| 21,466
|
py
|
Python
|
FSChartParser.py
|
osteele/pyfsa
|
58a44106d3e3918a17a5a106584d1a91636f9d52
|
[
"Artistic-1.0-Perl"
] | 7
|
2015-11-25T10:52:43.000Z
|
2018-09-11T21:35:25.000Z
|
FSChartParser.py
|
osteele/pyfsa
|
58a44106d3e3918a17a5a106584d1a91636f9d52
|
[
"Artistic-1.0-Perl"
] | null | null | null |
FSChartParser.py
|
osteele/pyfsa
|
58a44106d3e3918a17a5a106584d1a91636f9d52
|
[
"Artistic-1.0-Perl"
] | 7
|
2015-12-23T05:22:20.000Z
|
2021-07-13T19:17:32.000Z
|
# Module FSChartParser -- finite-state chart parser and grammar compilation utilities
"""ChartParser is a chart parser that uses finite-state automata to
recognize grammar rules.
ChartParser is initialized with a grammar, represented as a list of
rules. Each rule is either a categorizing automaton (defined below),
or a tuple (lhs, automaton), where each lhs is a category and each
automaton recognizes a language over terminals, and nonterminals. In
the latter case, the tuple is compiled to a categorizing automaton.
A categorizing automaton is an automaton which also maps each final
state to a list of categories, which index the languages that
categorize a sequence that leads to that final state. A categorizing
automaton can be used to simultaneously apply a number of regular
grammars to a single input sequence, and categorize each subsequence
according to each grammar. Categorizing automata are represented by
instances of class CategorizingAutomaton, and created by
compileCategorizingAutomaton, which takes a list of (lhs, automaton)
rules and constructs a single categorizing automaton which categorizes
inputs according to all the rules simultaneously.
The chart parser operates on instances of Constituent, which has a
category, a start index, an end index, and a list of children, which
are also constituents.
Example
--------
>>> RULES = map(lambda (lhs, rhs):(lhs, FSA.compileRE(rhs, multichar=1)), [
('S', 'NP VP'),
('NP', "det? adj* noun+"),
('NP', 'noun of noun'),
('VP', 'verb NP')])
>>> parser = ChartParser(compileRules(RULES))
>>> print parser.parseString('noun verb noun', multichar=1).constituents()
[S[NP[noun] VP[verb NP[noun]]]]
>>> print parser.parseString('det adj noun noun verb adj noun', multichar=1).constituents()
[S[NP[det adj noun noun] VP[verb NP[adj noun]]]]
>>> parser = ChartParser(compileRules(RULES, optimize=1))
>>> print parser.parseString('noun verb noun', multichar=1)
[S[NP[noun] VP[verb NP[noun]]]]
"""
__author__ = "Oliver Steele", 'steele@osteele.com'
import FSA
from types import ListType, StringType, TupleType
class ChartParser:
TRACE = 0
TRACE_MATCHES = 0
TRACE_CONSTITUENTS = 0
#
# Initialization
#
def __init__(self, rules):
"""Each rule is a sequence of f, fsa; where f is applied to (fsa,
state, children, start, end) to create a constituent or None, and fsa
is a finite-state automaton as defined in FSA."""
self.automata = rules
self.firstConstituentsOnly = 1
def initializeChart(self, n):
self.constituentMaps = map(lambda n:{}, range(n))
self.edges = map(lambda n:[], range(n + 1))
#for automaton in self.automata:
# for i in range(n):
# self.addEdge((automaton, automaton.initialState, i, i, []))
#
# Parsing
#
def parse(self, tokens):
"""Preterminals should be a sequence of tokens. Parse it. If
there's a set of spanning parses whose categories match the categories
in the first grammar rule, return them. For more general queries
(partial parses), use the query methods on the parser."""
self.tokens = tokens
self.initializeChart(len(tokens))
self.finalIndex = len(tokens)
for i in range(len(tokens)):
for automaton in self.automata:
self.addEdge(automaton, automaton.initialState, i, i, [])
return self
def parseString(self, sentence, multichar=0):
"""Parse the string argument, with one letter per preterminal
constituent. If multichar is true, the string is split at whitespace
and the intervening tokens are used as preterminals instead."""
if multichar:
import string
tokens = string.split(sentence)
else:
tokens = sentence
return self.parse(tokens)
#
# Queries
#
def constituents(self, categories=None, complete=0):
"""Return a list of all the non-preterminal constituents in the chart.
If categories is not false, include only constituents with a category
in categories."""
results = []
for constituentMap in self.constituentMaps:
constituents = constituentMap.values()
if categories:
constituents = filter(lambda c,cats=categories:c.category in cats, constituents)
constituents.sort(lambda a, b:-cmp(a.length(), b.length()))
results.extend(constituents)
if complete:
results = filter(lambda c, final=self.finalIndex: c.start == 0 and c.end == final, results)
return results
def constituentsAt(self, index):
"""Return a list of constituents at index."""
return filter(self.constituentMaps[index].values())
def longestDisjointConstituents(self, categories=None):
"""Return a sequence of disjoint constituents such that each
constituent is one of the longest constituents at its index position.
The algorithm is myopic and locally greedy: it won't choose a shorter
constituent in order to get the whole sequence to come out longer, and
it won't even choose between two constituents of equal length in order
to get a longer overall sequence."""
results = []
index = 0
while index < len(self.constituentMaps):
constituents = self.constituentsAt(index)
if categories:
constituents = filter(lambda c,cats=categories:c.category in cats, constituents)
if constituents:
constituents.sort(lambda a, b:-cmp(a.end, b.end))
best = constituents[0]
results.append(best)
index = best.end
else:
index = index + 1
return results
#
# Chart manipulation
#
def addConstituent(self, constituent, start, end):
if self.TRACE or self.TRACE_CONSTITUENTS:
print 'adding', constituent
if self.firstConstituentsOnly:
key = constituent.category, start, end
if self.constituentMaps[start].get(key):
return
self.constituentMaps[start][key] = constituent
for (automaton, state, left, right, children) in self.edges[start]:
assert right == start
successors = automaton.nextStates(state, constituent)
if successors and self.TRACE_MATCHES:
print '\tmatched:'
print '\t\t', automaton.atStateString(state), '->'
elif self.TRACE:
if successors:
print '\tmatched: \n\t\t%s -> %s' % (automaton.atStateString(state), successors)
else:
assert automaton.atStateString(state)
print '\tdidn\'t match: \n\t\t%s' % (automaton.atStateString(state))
for successor in successors:
if self.TRACE_MATCHES:
print '\t\t', automaton.atStateString(successor)
self.addEdge(automaton, successor, left, end, children + [constituent])
def addEdge(self, automaton, state, start, end, children):
if automaton.stateMatchesConstituents(state):
edge = automaton, state, start, end, children
self.edges[end].append(edge)
for category in automaton.getStateCategories(state):
constituent = Constituent(category, children, start, end)
try:
constituent.source = automaton
except AttributeError:
pass
self.addConstituent(constituent, start, end)
if end < len(self.tokens):
token = self.tokens[end]
constituents = self.constituentMaps[end].values()
for successor in automaton.nextStates(state, token):
self.addEdge(automaton, successor, start, end + 1, children + [token])
for constituent in constituents:
for successor in automaton.nextStates(state, constituent):
self.addEdge(automaton, successor, start, constituent.end, children + [constituent])
#
# Presentation
#
def toDotString(self, includeActiveEdges=0):
"""Returns a string that can be printed by the DOT tool at
http://www.research.att.com/sw/tools/graphviz/ ."""
import string
output = []
output.append('digraph finite_state_machine {');
output.append('\t0 [style = bold];' )
output.append('\tnode [shape = doublecircle]; ' + `self.finalIndex` + ';' );
output.append('\tnode [shape = circle];');
output.append('\trankdir=LR;');
if includeActiveEdges:
for edges in self.edges:
for (automaton, state, start, end, children) in edges:
label = string.replace(automaton.atStateString(state, wrap=20), '\n', '\\n')
output.append('\t%s -> %s [style=dotted,label="%s"];' % (start, end, label) )
for constituents in self.constituentMaps:
items = map(lambda c:(c.start, c.end, c), constituents.values())
items.sort()
for item in items:
output.append('\t%s -> %s [label="%s"];' % item)
output.append('}');
return string.join(output, '\n')
def view(self, includeActiveEdges=1):
FSA.view(self.toDotString(includeActiveEdges=includeActiveEdges))
#
# Chart classes
#
class Constituent:
def __init__(self, category, children, start, end):
self.category = category
self.children = children
self.start = start
self.end = end
def leaves(self):
if self.children:
leaves = []
for child in self.children:
if hasattr(child, 'leaves'):
leaves.extend(child.leaves())
else:
leaves.append(child)
return leaves
else:
return [self]
def __repr__(self):
import string
if self.children:
def tokenStr(token):
return (hasattr(token, 'token') and str(token.token)) or str(token)
if 1: #flatten the printed representation
return self.category + '[' + string.join(map(tokenStr, self.leaves())) + ']'
else:
return self.category + '[' + string.join(map(str, self.children or []), ' ') + ']'
else:
return self.category
def length(self):
return self.end - self.start
#
# Class CategorizingAutomaton
#
class CategorizingAutomaton(FSA.FSA):
"""A categorizing automaton is a finite-state automaton that additionally
maps each final state into a set of categories. A categorizing automataon
can be used to simultaneously recognize and categorize sequences according
to a number of languages."""
def __init__(*args, **keys):
apply(FSA.FSA.__init__, args, keys)
self = args[0]
self.setStateCategoriesMapping({})
def coerce(self, klass):
coercion = FSA.FSA.coerce(self, klass)
coercion.setStateCategoriesMapping(self.getStateCategoriesMapping())
return coercion
#
# Predicates
#
isCategorizingAutomaton = 1
#
# State categories
#
def categories(self):
"""Return a list of categories that this automaton will categorize
into."""
categories = []
for set in self.getStateCategoriesMapping().values():
for category in set:
if category not in categories:
categories.append(category)
return categories
def getStateCategories(self, state):
return self.stateCategories[state]
def addStateCategory(self, state, category):
categories = self.stateCategories[state]
if category not in categories:
self.stateCategories[state] = categories + [category]
def getStateCategoriesMapping(self):
mapping = {}
for state in self.states:
mapping[state] = self.getStateCategories(state)
return mapping
def setStateCategoriesMapping(self, mapping):
self.stateCategories = self.makeStateTable([])
for state, categories in mapping.items():
self.stateCategories[state] = categories
def setFinalCategory(self, category):
"""Set all the final states to categorize to this category."""
self.stateCategories = self.makeStateTable([])
for state in self.finalStates:
self.addStateCategory(state, category)
#
# Accessors
#
def computeStateMatchesConstituents(self, state):
return 1
def stateMatchesConstituents(self, state):
try:
isConstituentTestMap = self.isConstituentTestMap
except AttributeError:
isConstituentTestMap = [None] * (reduce(max, self.states) + 1)
for state in self.states:
isConstituentTestMap[state] = self.computeStateMatchesConstituents(state)
self.isConstituentTestMap = isConstituentTestMap
return isConstituentTestMap[state]
#
# Presentation template overrides
#
def additionalTransitionInfoString(self, transition):
result = FSA.FSA.additionalTransitionInfoString(self, transition)
categories = self.getStateCategories(transition[1])
if categories:
import string
result = (result and result + ' ' or '') + `categories`#string.join(map(str, categories), ', ')
return result
def stateLabelString(self, state):
# overrides the method in FSA, to include categorizing states in dot diagrams
if self.categoriesFor(state):
import string
return `state` + '\n' + string.join(map(str, self.categoriesFor(state)), ', ')
def atStateString(self, state, wrap=None):
try:
import REUtils
str = REUtils.decompileRE(self, dottedStates=[state], wrap=wrap, sep=self.tokenSeparator())
except ImportError:
str = "%s @ %s" % (self, state)
if len(self.categories()) == 1:
str = '%s => %s' % (self.categories()[0], str)
return str
def tokenSeparator(self):
return ' '
#
# Conversion
#
def toFSA(self, labelConstructor=lambda s:'=>' + s):
"""Return an FSA that corresponds to the categorizing automaton that is the
argument, except that final state categories have been replaced by
transitions labeled with a transformation of those categories."""
states, alphabet, transitions, initial, finals = self.tuple()
newFinal = self.nextAvailableState()
transitions = transitions[:]
for state, categories in self.getStateCategoriesMapping().items():
for category in categories:
transitions.append((state, newFinal, labelConstructor(category)))
return self.copy(states + [newFinal], alphabet, transitions, initial, [newFinal])
#
# Decision Functions
#
def buildDecisionTree(self, pairs):
if pairs:
test, state = pairs[0]
if test.isUnconditional():
return (None, state, state)
term = test.terms()[0]
complement = term.complement()
positives, negatives = [], []
for test, state in pairs:
if term in test.terms():
positives.append((test.build(filter(lambda x, term=term:x != term, test.terms())), state))
else:
negatives.append((test.build(filter(lambda x, term=complement:x != term, test.terms())), state))
return (term, self.buildDecisionTree(positives), self.buildDecisionTree(negatives))
def buildDecisionTreeDecider(self, pairs):
def decisionTreeDecider(constituent, tree=self.buildDecisionTree(pairs)):
while tree:
test, positive, negative = tree
if test:
if test.matches(constituent):
tree = positive
else:
tree = negative
else:
return positive
return decisionTreeDecider
def buildSerialDecider(self, pairs):
if pairs:
def serialDecider(constituent, pairs=pairs):
for test, state in pairs:
if test.matches(constituent):
return state
return serialDecider
else:
return lambda constituent:None
def buildDecisionFunctions(self):
assert getattr(self, '_isDeterminized', 0)
decisionFunctions = [None] * (reduce(max, self.states) + 1)
for state in self.states:
decisionFunctions[state] = self.buildDecisionFunction(state)
self.decisionFunctions = decisionFunctions
self.nextState = self.nextStateUsingDecisionFunctions
self.nextStates = self.nextStatesUsingDecisionFunctions
def nextStateUsingDecisionFunctions(self, state, input):
successor = self.decisionFunctions[state](input)
return successor
def nextStatesUsingDecisionFunctions(self, state, input):
successor = self.decisionFunctions[state](input)
return successor is not None and [successor] or []
#
# Accepting
#
def labelMatches(self, label, constituent):
"""Override the implementation in FSA, so that strings can be used as
labels that match the constituent's categories."""
if type(label) == StringType:
return label == constituent or hasattr(constituent, 'category') and label == constituent.category
else:
return FSA.FSA.labelMatches(self, label, constituent)
#
# Grammar compilation
#
def compileRule(rule, defaultCategory='S'):
if getattr(rule, 'isCategorizingAutomaton', 0):
automaton = rule
elif getattr(rule, 'isFSA', 0):
automaton = rule.coerce(CategorizingAutomaton)
automaton.setFinalCategory(defaultCategory)
elif type(rule) == TupleType:
lhs, rhs = rule
if type(rhs) == ListType:
rhs = FSA.sequence(rhs)
automaton = rhs.coerce(CategorizingAutomaton)
automaton.setFinalCategory(lhs)
else:
raise 'rule must be a (lhs, automaton) or an automaton'
return automaton
def compileRules(rules, optimize=0, labelConstructor=None):
# Rules is either a list of CategorizingFSAs or (lhs, rhs) pairs, where
# each rhs is either a list or an automaton. Turn each pair ino a
# CategorizingAutomaton by coercing it and setting the categories of its
# final states to the lhs.
automata = map(compileRule, rules)
if optimize:
automata = [combineRules(rules)]
return automata
def combineRules(rules, labelConstructor=None):
"""Create a categorizing automaton from a list of rules. Each rules is a
tuple (lhs, rhs), where lhs is the category for sequences recognized by
rhs, which is an automaton. lhsLabelConstructor is an expression that
converts a category into a label that can be intersected with the labels in
the rule automata (the intersection of a category label with any rhs
automaton label or with any other category label should be None); it
defaults to a function that turns the category 'C' into '=>C'."""
lhsMap = {}
def construct(rule, labelConstructor=labelConstructor or (lambda s:'=>' + s), lhsMap=lhsMap):
automaton = compileRule(rule)
for category in automaton.categories():
lhsMap[labelConstructor(category)] = category
return automaton.toFSA(labelConstructor=labelConstructor)
automata = map(construct , rules)
fsa = apply(FSA.union, automata).minimized()
states0, alpha, transitions0, initial, finals0 = fsa.tuple()
transitions = filter(lambda (s0,s1,label), f=lhsMap.get: not f(label), transitions0)
finalTransitions= filter(lambda (s0,s1,label), f=lhsMap.get: f(label), transitions0)
states = []
for s0, s1, _ in transitions:
if s0 not in states: states.append(s0)
if s1 not in states: states.append(s1)
finals = map(lambda (s0,s1,_): s0, finalTransitions)
fsa = automata[0].copy(states, alpha, transitions, initial, finals)
fsa._isDeterminized = 1
for state, _, label in finalTransitions:
fsa.addStateCategory(state, lhsMap[label])
return fsa
"""
RULES = map(lambda (lhs, rhs):(lhs, FSA.compileRE(rhs, multichar=1)), [
('S', 'NP VP'),
('NP', "det? adj* noun+"),
('NP', 'noun of noun'),
('VP', 'verb NP')])
parser = ChartParser(compileRules(RULES))
print parser.parseString('noun verb noun', multichar=1).constituents(complete=1)
parser = ChartParser(compileRules(RULES, optimize=1))
print parser.parseString('noun verb noun', multichar=1).constituents(complete=1)
print parser.parseString('det adj noun noun verb adj noun', multichar=1).constituents(complete=1)
print parser.toDotString()
print parser.toDotString(includeActiveEdges=1)
p.view()
"""
| 21,466
| 21,466
| 0.625547
| 2,320
| 21,466
| 5.772414
| 0.182328
| 0.005824
| 0.004704
| 0.00448
| 0.173536
| 0.125821
| 0.105436
| 0.091398
| 0.080197
| 0.080197
| 0
| 0.004265
| 0.279139
| 21,466
| 1
| 21,466
| 21,466
| 0.861187
| 0.997298
| 0
| 0.151235
| 0
| 0.012346
| 0.027234
| 0.003208
| 0
| 0
| 0
| 2
| 0.009259
| 0
| null | null | 0.003086
| 0.027778
| null | null | 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b6d67c07daea3c71e549a4a2de8e30ae22e60225
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/numpy/distutils/command/build_py.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/numpy/distutils/command/build_py.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/numpy/distutils/command/build_py.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/5e/22/d9/d9dfed98213cb86e55014e4e2b6ce5cd06b205f798e25f0514496d6e28
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.427083
| 0
| 96
| 1
| 96
| 96
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b6db7fcd72bac0d5a7b7a48cf8b5e024f0b4080f
| 315
|
py
|
Python
|
variable-length-arguments/args.py
|
mantoshkumar1/python3_practice
|
d10ebf7c632d725408a4ebe761961bc09e86d47c
|
[
"MIT"
] | 1
|
2020-05-28T19:14:55.000Z
|
2020-05-28T19:14:55.000Z
|
variable-length-arguments/args.py
|
mantoshkumar1/python3_practice
|
d10ebf7c632d725408a4ebe761961bc09e86d47c
|
[
"MIT"
] | null | null | null |
variable-length-arguments/args.py
|
mantoshkumar1/python3_practice
|
d10ebf7c632d725408a4ebe761961bc09e86d47c
|
[
"MIT"
] | null | null | null |
def func(*args):
print(args)
print(*args)
a = [1, 2, 3]
# here "a" is passed as tuple of list: ([1, 2, 3],)
func(a)
# print(args) -> ([1, 2, 3],)
# print(*args) -> [1, 2, 3]
# here "a" is passed as tuple of integers: (1, 2, 3)
func(*a)
# print(args) -> (1, 2, 3)
# print(*args) -> 1 2 3
| 18.529412
| 53
| 0.485714
| 57
| 315
| 2.684211
| 0.280702
| 0.091503
| 0.137255
| 0.287582
| 0.72549
| 0.72549
| 0.72549
| 0.72549
| 0.72549
| 0.72549
| 0
| 0.092105
| 0.27619
| 315
| 16
| 54
| 19.6875
| 0.578947
| 0.638095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.166667
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8e19363a32af599f7539f4ac6fe45523c33f40d5
| 145
|
py
|
Python
|
Decryptor.py
|
XerosLab/Xeransomware
|
12948ace0562997bf2c6a1978a1cdaccf9d0fd8f
|
[
"MIT"
] | 1
|
2021-08-20T16:28:55.000Z
|
2021-08-20T16:28:55.000Z
|
Decryptor.py
|
XerosLab/Xeransomware
|
12948ace0562997bf2c6a1978a1cdaccf9d0fd8f
|
[
"MIT"
] | null | null | null |
Decryptor.py
|
XerosLab/Xeransomware
|
12948ace0562997bf2c6a1978a1cdaccf9d0fd8f
|
[
"MIT"
] | null | null | null |
# import pyAesCrypt
# pyAesCrypt.decryptFile(os.path.join(baseDir, "data.txt.aes"), os.path.join(baseDir, "data.txt.dec"), password, bufferSize)
| 72.5
| 124
| 0.751724
| 20
| 145
| 5.45
| 0.65
| 0.110092
| 0.183486
| 0.311927
| 0.440367
| 0.440367
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 145
| 2
| 124
| 72.5
| 0.807407
| 0.965517
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8e37066a568e58e2dbd4ec8902db4fb66e3aa9b0
| 76
|
py
|
Python
|
app/api/base.py
|
shun99/blog-server-python
|
b61a6d81bcbe1bdf499cc724012def2954d7f848
|
[
"Apache-2.0"
] | null | null | null |
app/api/base.py
|
shun99/blog-server-python
|
b61a6d81bcbe1bdf499cc724012def2954d7f848
|
[
"Apache-2.0"
] | 1
|
2019-01-14T11:13:40.000Z
|
2019-01-14T11:13:40.000Z
|
app/api/base.py
|
shun99/blog-server-python
|
b61a6d81bcbe1bdf499cc724012def2954d7f848
|
[
"Apache-2.0"
] | null | null | null |
from flask_restful import Resource
class BaseResource(Resource):
pass
| 12.666667
| 34
| 0.789474
| 9
| 76
| 6.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171053
| 76
| 5
| 35
| 15.2
| 0.936508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
8e39b378aa03a8931704ca6ec7c2b5125433bf6c
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/poetry/console/logging/formatters/formatter.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/poetry/console/logging/formatters/formatter.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/poetry/console/logging/formatters/formatter.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/f8/a7/8b/459b7d53107b95fe33194b8b2ac6b687319c24771330a43eaa63b3e2cb
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4375
| 0
| 96
| 1
| 96
| 96
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8e4a676473c1554a45a484948ba97c06de09b12f
| 13
|
py
|
Python
|
Chapter 01/Chap01_Example1.146.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 01/Chap01_Example1.146.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 01/Chap01_Example1.146.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
print((r1*2))
| 13
| 13
| 0.615385
| 3
| 13
| 2.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 0
| 13
| 1
| 13
| 13
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
f3e79d6e05091a32bd78eb822c2157a1764107db
| 468
|
py
|
Python
|
code/chapter_02/listing_02_05.py
|
guinslym/python_earth_science_book
|
f4dd0115dbbce140c6713989f630a71238daa72c
|
[
"MIT"
] | 80
|
2021-04-19T10:03:57.000Z
|
2022-03-30T15:34:47.000Z
|
code/chapter_02/listing_02_05.py
|
guinslym/python_earth_science_book
|
f4dd0115dbbce140c6713989f630a71238daa72c
|
[
"MIT"
] | null | null | null |
code/chapter_02/listing_02_05.py
|
guinslym/python_earth_science_book
|
f4dd0115dbbce140c6713989f630a71238daa72c
|
[
"MIT"
] | 23
|
2021-04-25T03:50:07.000Z
|
2022-03-22T03:06:19.000Z
|
print('a sequence from 0 to 2')
for i in range(3):
print(i)
print('----------------------')
print('a sequence from 2 to 4')
for i in range(2, 5):
print(i)
print('----------------------')
print('a sequence from 2 to 8 with a step of 2')
for i in range(2, 9, 2):
print(i)
'''
Output:
a sequence from 0 to 2
0
1
2
----------------------
a sequence from 2 to 4
2
3
4
----------------------
a sequence from 2 to 8 with a step of 2
2
4
6
8
'''
| 14.625
| 48
| 0.478632
| 86
| 468
| 2.604651
| 0.255814
| 0.241071
| 0.348214
| 0.25
| 0.861607
| 0.705357
| 0.473214
| 0.473214
| 0.473214
| 0.258929
| 0
| 0.084034
| 0.237179
| 468
| 32
| 49
| 14.625
| 0.543417
| 0
| 0
| 0.454545
| 0
| 0
| 0.42053
| 0.145695
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.727273
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
6d02f09727cb192e1b757e9c5468c3747a68160f
| 207
|
py
|
Python
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/sale/tests/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/sale/tests/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/sale/tests/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import test_sale_to_invoice
import test_sale_order
from . import test_product_id_change
import test_sale_ui
| 29.571429
| 74
| 0.801932
| 34
| 207
| 4.588235
| 0.764706
| 0.25641
| 0.269231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005587
| 0.135266
| 207
| 6
| 75
| 34.5
| 0.865922
| 0.454106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6d1ec445d0d152cdcbdf8ebbef8e6370b7a5c9b9
| 130
|
py
|
Python
|
open_mafia_engine/util/classes.py
|
open-mafia/open_mafia_engine
|
19296748757a4a18d395a940d30aa48aaac9dd7a
|
[
"Apache-2.0"
] | 9
|
2018-08-19T21:47:00.000Z
|
2021-11-30T20:46:09.000Z
|
open_mafia_engine/util/classes.py
|
open-mafia/open_mafia_engine
|
19296748757a4a18d395a940d30aa48aaac9dd7a
|
[
"Apache-2.0"
] | 2
|
2021-05-16T00:12:39.000Z
|
2021-05-16T18:36:47.000Z
|
open_mafia_engine/util/classes.py
|
open-mafia/open_mafia_engine
|
19296748757a4a18d395a940d30aa48aaac9dd7a
|
[
"Apache-2.0"
] | 2
|
2020-11-28T06:13:10.000Z
|
2021-05-16T22:23:22.000Z
|
from typing import Type
def class_name(cls: Type[object]) -> str:
"""Returns the class name."""
return cls.__qualname__
| 18.571429
| 41
| 0.684615
| 18
| 130
| 4.666667
| 0.777778
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192308
| 130
| 6
| 42
| 21.666667
| 0.8
| 0.176923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6d43c48e0a0e839427af653d80a400f8f8366cf6
| 92
|
py
|
Python
|
Mock-JOI2018/Q1.py
|
taiki-okano/Competitive-Programming
|
7df76273740ae743ff04e9ed1ae8ffd6562d288d
|
[
"MIT"
] | 1
|
2016-01-23T13:33:05.000Z
|
2016-01-23T13:33:05.000Z
|
Mock-JOI2018/Q1.py
|
taiki-okano/algorithm
|
7df76273740ae743ff04e9ed1ae8ffd6562d288d
|
[
"MIT"
] | null | null | null |
Mock-JOI2018/Q1.py
|
taiki-okano/algorithm
|
7df76273740ae743ff04e9ed1ae8ffd6562d288d
|
[
"MIT"
] | null | null | null |
N = int(input())
print(int((((((N - 10) * 3 + 10) / 2) + 15) * 3 * (2 / 9) - N) * 3 + 47))
| 23
| 73
| 0.358696
| 17
| 92
| 1.941176
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215385
| 0.293478
| 92
| 3
| 74
| 30.666667
| 0.292308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
ed9cc87d0a4a2b6529ac87dee0fea1d9f571db41
| 359
|
py
|
Python
|
src/process/models/base/secret/__init__.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | 14
|
2020-12-19T15:06:13.000Z
|
2022-01-12T19:52:17.000Z
|
src/process/models/base/secret/__init__.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | 43
|
2021-01-06T22:05:22.000Z
|
2022-03-10T10:30:30.000Z
|
src/process/models/base/secret/__init__.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | 4
|
2020-12-18T23:10:09.000Z
|
2021-04-02T13:03:12.000Z
|
from models.base.secret.SecretBase import SecretBase
from models.base.secret.SecretTypeBase import SecretTypeBase
from models.base.secret.SecretSourceBase import SecretSourceBase
from models.base.secret.SecretSourceBasicAuthenticationBase import SecretSourceBasicAuthenticationBase
from models.base.secret.AuthenticationTypeBase import AuthenticationTypeBase
| 59.833333
| 102
| 0.902507
| 35
| 359
| 9.257143
| 0.285714
| 0.154321
| 0.216049
| 0.308642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05571
| 359
| 5
| 103
| 71.8
| 0.955752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b64ad9ec8469be0354d3bee9ac76c77903d9d7ee
| 35
|
py
|
Python
|
backprop/models/hf_seq2seq_tg_model/__init__.py
|
lucky7323/backprop
|
4daa756f3a46600d4dfa0631bb3607237df1fed6
|
[
"Apache-2.0"
] | 200
|
2021-03-22T17:29:46.000Z
|
2022-03-20T21:58:31.000Z
|
backprop/models/hf_seq2seq_tg_model/__init__.py
|
lucky7323/backprop
|
4daa756f3a46600d4dfa0631bb3607237df1fed6
|
[
"Apache-2.0"
] | 6
|
2021-04-15T06:48:32.000Z
|
2021-12-21T08:07:49.000Z
|
backprop/models/hf_seq2seq_tg_model/__init__.py
|
lucky7323/backprop
|
4daa756f3a46600d4dfa0631bb3607237df1fed6
|
[
"Apache-2.0"
] | 15
|
2021-03-25T05:25:43.000Z
|
2022-01-04T08:12:29.000Z
|
from .model import HFSeq2SeqTGModel
| 35
| 35
| 0.885714
| 4
| 35
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.085714
| 35
| 1
| 35
| 35
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b663d237b615991d169382ad076d2fe7f1174375
| 7,653
|
py
|
Python
|
build/build.py
|
felix3008/PkSploit
|
8c9ba99abb7018dfde181bdd1257941da2c41cd8
|
[
"MIT"
] | 16
|
2017-09-28T23:06:04.000Z
|
2021-07-16T20:23:08.000Z
|
build/build.py
|
felix3008/PkSploit
|
8c9ba99abb7018dfde181bdd1257941da2c41cd8
|
[
"MIT"
] | 4
|
2018-03-20T22:37:56.000Z
|
2021-12-26T17:35:07.000Z
|
build/build.py
|
felix3008/PkSploit
|
8c9ba99abb7018dfde181bdd1257941da2c41cd8
|
[
"MIT"
] | 1
|
2018-02-09T05:39:03.000Z
|
2018-02-09T05:39:03.000Z
|
#!python2
import os
import sys
import shutil
import ConfigParser
print "PkSploit Build Script - Revision 24-11-17"
if not os.path.exists("config.ini"):
print "No config.ini file! Rename and edit sample_config.ini in this folder!"
sys.exit(1)
Config = ConfigParser.ConfigParser()
Config.read("config.ini")
#ConfigParser Helper function from the Python wiki
#https://wiki.python.org/moin/ConfigParserExamples
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
choice=""
everything=5
if len(sys.argv) > 1:
if sys.argv[1] == "full":
choice=everything
odir=ConfigSectionMap("General")["outputdir"]
while choice not in range(1,6):
print "---------------------------"
print "What do you want to build?"
print ""
print "1. Assemble Gameboy ASM"
print "2. Assemble Gameboy ASM (Patch Savefile for quick start)"
print "3. Prepare Arduino Project"
print "4. Build and Upload Arduino Project"
print "5. Everything (run \"py build.py full\" to skip this menu in the future)"
print ""
print "9. Exit"
try:
choice = int(raw_input('--> '))
if choice == 9:
sys.exit(0)
except SystemExit:
sys.exit(0)
except:
print "Invalid Number"
choice = 0
def assemble():
print "Assembling Gameboy Code with trade offsets"
if os.path.exists(odir+"/gb_asm_trade/"):
shutil.rmtree(odir+"/gb_asm_trade/")
shutil.copytree("../gb_asm/",odir+"/gb_asm_trade/")
fa=open(odir+"/gb_asm_trade/main.asm","rb")
code=fa.read()
fa.close()
fa=open(odir+"/gb_asm_trade/main.asm","wb")
fa.write("rOFFSET EQUS \"$c486\"\n\rrEXTRA EQUS \"\""+code)
fa.close()
os.system("rgbasm.exe -o "+odir+"temp.o "+odir+"gb_asm_trade/main.asm")
os.system("rgblink.exe -o "+odir+"temp.gb "+odir+"temp.o")
fi = open(odir+"temp.gb","rb")
gb_rom = fi.read()
fi.close()
fo = open(odir+"main.bin", "wb")
fo.write(gb_rom[0x150:0x214])
fo.close()
print "Done Assembling Gameboy Code"
return
def makesave():
print "Assembling Gameboy Code with save file offsets"
if os.path.exists(odir+"/gb_asm_save/"):
shutil.rmtree(odir+"/gb_asm_save/")
shutil.copytree("../gb_asm/",odir+"/gb_asm_save/")
fa=open(odir+"/gb_asm_save/main.asm","rb")
code=fa.read()
fa.close()
fa=open(odir+"/gb_asm_save/main.asm","wb")
fa.write("rOFFSET EQUS \"$d280\"\n\rrEXTRA EQUS \"jr .turnoff\""+code)
fa.close()
os.system("rgbasm.exe -o "+odir+"temp_save.o "+odir+"gb_asm_save/main.asm")
os.system("rgblink.exe -o "+odir+"temp_save.gb "+odir+"temp_save.o")
fi = open(odir+"temp_save.gb","rb")
gb_rom = fi.read()
fi.close()
fo = open(odir+"main_save.bin", "wb")
fo.write(gb_rom[0x150:0x214])
fo.close()
print "Done Assembling Gameboy Code"
fsg=open("../savefile_templates/pokemon_blue_german.sav","rb")
save=fsg.read()
fsg.close()
fp = open(odir+"main_save.bin","rb")
code=fp.read()
fp.close()
save=save[:9847]+code+save[10043:]
#generate valid checksum
checksum=0
save_data = map(ord, save[9624:13602])
for num,bb in enumerate(save_data):
checksum=checksum+bb
flip=0xFF
checksum=chr((checksum%256)^flip)
save=save[:13603]+checksum+save[13604:]
fsgs = open(odir+"pokemon_blue_german_pksploit.sav", "wb")
fsgs.write(save)
fsgs.close()
return
def prepare():
print "Preparing Arduino Project"
if not os.path.exists(odir+"main.bin"):
print "Gameboy Code not assembled yet, doing that now..."
assemble()
if os.path.exists(odir+ConfigSectionMap("Arduino")['projectname']+"/"):
shutil.rmtree(odir+ConfigSectionMap("Arduino")['projectname']+"/")
shutil.copytree("../arduino/"+ConfigSectionMap("Arduino")["projectname"]+"/", odir+ConfigSectionMap("Arduino")["projectname"]+"/")
#Data Preperation Code By
#Esteban Fuentealba
# load program to run
fp = open(odir+"main.bin","rb")
program_str = fp.read()
fp.close()
program = map(ord, program_str)
data = []
# seed
data += [182, 147, 113, 81, 51, 23, 228, 205, 184, 165]
# preamble
data += [253, 253, 253, 253, 253, 253, 253, 253]
# party (bootstrap)
party = [248, 0, 54, 253, 1, 62, 88, 197, 195, 0xd6, 0xc5, 6, 21, 21, 21, 21, 21, 21, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 206, 227, 227, 255, 33, 160, 195, 1, 136, 1, 62, 0, 205, 224, 54, 17, 24, 218, 33, 89, 196, 205, 85, 25, 195, 21, 218, 139, 142, 128, 131, 136, 141, 134, 232, 232, 232, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 64, 0, 0]
data += party
# preamble
data += [253, 253, 253, 253, 253]
# patchlist (196 bytes total)
patchlist = [255, 255] + program + ([0] * 200)
patchlist = patchlist[:196]
data += patchlist
party_and_patchlist = ", ".join(map(str, party + [253, 253, 253, 253, 253] + patchlist))
fileo = open(odir+ConfigSectionMap("Arduino")["projectname"]+"/data.h","wb")
fileo.write("unsigned const char DATA_BLOCK[] PROGMEM = {" + party_and_patchlist + "};")
fileo.close()
print "Done Preparing Arduino Project"
return
def buildarduino():
print "Building and uploading Arduino Project"
if not os.path.exists(odir+ConfigSectionMap("Arduino")["projectname"]+"/"):
print "Arduino Project not prepared yet, doing that now..."
prepare()
os.system("\""+ConfigSectionMap("Arduino")['path']+"/arduino_debug.exe\" -v --upload "+odir+ConfigSectionMap("Arduino")["projectname"]+"/"+ConfigSectionMap("Arduino")["projectname"]+".ino --board "+ConfigSectionMap("Arduino")["board"]+" --port "+ConfigSectionMap("Arduino")["port"])
print "Done Building and uploading Arduino Project"
return
if not os.path.exists(odir):
os.makedirs(odir)
if choice == 1 or choice == everything:
assemble()
if choice == 2 or choice == everything:
makesave()
if choice == 3 or choice == everything:
prepare()
if choice == 4 or choice == everything:
buildarduino()
| 37.150485
| 2,040
| 0.65164
| 1,217
| 7,653
| 4.05341
| 0.204601
| 0.420839
| 0.62761
| 0.834381
| 0.484492
| 0.403406
| 0.386986
| 0.337726
| 0.298804
| 0.285019
| 0
| 0.214497
| 0.161767
| 7,653
| 205
| 2,041
| 37.331707
| 0.554482
| 0.033974
| 0
| 0.223684
| 0
| 0
| 0.230999
| 0.028587
| 0
| 0
| 0.004335
| 0
| 0
| 0
| null | null | 0
| 0.026316
| null | null | 0.157895
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b6642a202bb13ee1e29ef92339b442dc2b75e2f9
| 12,159
|
py
|
Python
|
Net640/apps/user_profile/tests/test_forms.py
|
86Ilya/net640kb
|
6724f3da3b678b637e0e776ee0d4953753ee2e05
|
[
"MIT"
] | 1
|
2019-06-18T09:50:29.000Z
|
2019-06-18T09:50:29.000Z
|
Net640/apps/user_profile/tests/test_forms.py
|
86Ilya/net640kb
|
6724f3da3b678b637e0e776ee0d4953753ee2e05
|
[
"MIT"
] | 10
|
2019-12-24T07:05:29.000Z
|
2022-02-10T07:42:44.000Z
|
Net640/apps/user_profile/tests/test_forms.py
|
86Ilya/net640kb
|
6724f3da3b678b637e0e776ee0d4953753ee2e05
|
[
"MIT"
] | null | null | null |
from uuid import uuid1
from django.test import TestCase
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils import timezone
from Net640.apps.user_profile.models import User
from Net640.apps.user_profile.forms import UserForm, UserUpdateForm,\
UserRequestPasswordResetForm, UserPasswordUpdateForm
from Net640.testing.helpers import create_test_image
from Net640.settings import DATE_FORMAT, MAX_PAGE_SIZE
class TestUserForm(TestCase):
def setUp(self):
self.random_name = str(uuid1())
self.email = self.random_name + '@m.ru'
self.password = '12345678'
def test_user_form(self):
img_file, content_type = create_test_image()
avatar = SimpleUploadedFile('myavatar.bmp', img_file.read(), content_type)
user_form_data = {'username': self.random_name,
'email': self.email,
'password': self.password,
'password_again': self.password,
}
user_form = UserForm(user_form_data, {'avatar': avatar})
self.assertTrue(user_form.is_valid())
def test_user_form_when_username_is_too_long(self):
name = 'X' * 121
user_form_data = {'username': name,
'email': self.email,
'password': self.password,
'password_again': self.password,
}
user_form = UserForm(user_form_data)
self.assertFalse(user_form.is_valid())
self.assertIn('Ensure this value has at most 120 characters', user_form.errors['username'][0])
def test_user_form_when_username_is_too_short(self):
name = 'XX'
user_form_data = {'username': name,
'email': self.email,
'password': self.password,
'password_again': self.password,
}
user_form = UserForm(user_form_data)
self.assertFalse(user_form.is_valid())
self.assertIn('Ensure this value has at least 3 characters', user_form.errors['username'][0])
def test_user_form_when_username_had_wrong_symbols(self):
name = 'X.X'
user_form_data = {'username': name,
'email': self.email,
'password': self.password,
'password_again': self.password,
}
user_form = UserForm(user_form_data)
self.assertFalse(user_form.is_valid())
self.assertIn('Username should contain only letters, digits, underscores, and dashes',
user_form.errors['username'][0])
def test_user_form_when_email_is_incorrect(self):
user_form_data = {'username': self.random_name,
'email': self.random_name + 'm.ru',
'password': self.password,
'password_again': self.password,
}
user_form = UserForm(user_form_data)
self.assertFalse(user_form.is_valid())
self.assertEqual(user_form.errors['email'][0], 'Enter a valid email address.')
def test_user_form_when_password_is_incorrect(self):
user_form_data = {'username': self.random_name,
'email': self.email,
'password': '123456789',
'password_again': '12345678',
}
user_form = UserForm(user_form_data)
self.assertFalse(user_form.is_valid())
self.assertEqual(user_form.errors['__all__'][0], 'Passwords mismatch')
def test_user_form_when_avatar_is_too_large(self):
img_file, content_type = create_test_image(3000)
avatar = SimpleUploadedFile('myavatar.bmp', img_file.read(), content_type)
user_form_data = {'username': self.random_name,
'email': self.email,
'password': self.password,
'password_again': self.password,
}
user_form = UserForm(user_form_data, {'avatar': avatar})
self.assertFalse(user_form.is_valid())
self.assertEqual(user_form.errors['__all__'][0], 'You have only 640Kb for all purposes!')
class TestUserUpdateForm(TestCase):
def setUp(self):
random_name = str(uuid1())
img_file, content_type = create_test_image()
avatar = SimpleUploadedFile('myavatar.bmp', img_file.read(), content_type)
self.user = User(username=random_name, email=random_name + '@m.ru', avatar=avatar)
self.user.set_password('12345678')
self.user.firstname = 'user firstname'
self.user.lastname = 'user lastname'
self.user.patronymic = 'user patronymic'
self.user.birth_date = timezone.datetime(year=1986, month=4, day=10)
self.user.save()
def test_update_basic_user_text_data(self):
newfirstname = 'new firstname'
newlastname = 'new lastname'
newpatronymic = 'new patronymic'
newbirth_date = '10.04.1986'
oldpass = self.user.password
update_form = UserUpdateForm({'firstname': newfirstname,
'lastname': newlastname,
'patronymic': newpatronymic,
'birth_date': newbirth_date},
instance=self.user)
update_form.is_valid()
self.assertTrue(update_form.is_valid())
update_form.save()
self.user.refresh_from_db()
self.assertEqual(self.user.firstname, newfirstname)
self.assertEqual(self.user.lastname, newlastname)
self.assertEqual(self.user.patronymic, newpatronymic)
self.assertEqual(self.user.password, oldpass)
self.assertEqual(timezone.datetime.strftime(self.user.birth_date, DATE_FORMAT), newbirth_date)
def test_update_user_avatar(self):
img_file, content_type = create_test_image(100)
avatar = {'avatar': SimpleUploadedFile('newavatar.bmp', img_file.read(), content_type)}
update_form = UserUpdateForm({}, avatar, instance=self.user)
self.assertTrue(update_form.is_valid())
update_form.save()
self.assertEqual(avatar['avatar'].size, update_form.cleaned_data['avatar'].size)
self.assertEqual('newavatar.bmp', update_form.cleaned_data['avatar'].name)
def test_update_user_avatar_when_pic_is_too_large(self):
img_file, content_type = create_test_image(3000)
avatar = {'avatar': SimpleUploadedFile('newavatar.bmp', img_file.read(), content_type)}
update_form = UserUpdateForm({}, avatar, instance=self.user)
self.assertFalse(update_form.is_valid())
self.assertEqual(update_form.errors['__all__'][0], 'You have only 640Kb for all purposes!')
def test_update_user_password(self):
newpass = 'qweasdzxc'
update_form = UserUpdateForm({'password': newpass,
'password_again': newpass},
instance=self.user)
self.assertTrue(update_form.is_valid())
update_form.save()
self.user.refresh_from_db()
self.assertTrue(self.user.check_password(newpass))
def test_update_user_password_when_passwords_are_differ(self):
newpass = 'qweasdzxc'
update_form = UserUpdateForm({'password': newpass,
'password_again': newpass + "occasional symbols"},
instance=self.user)
self.assertFalse(update_form.is_valid())
self.assertEqual(update_form.errors['__all__'][0], 'Passwords mismatch')
def test_update_user_password_when_password_is_short(self):
newpass = 'x' * 7
update_form = UserUpdateForm({'password': newpass,
'password_again': newpass},
instance=self.user)
self.assertFalse(update_form.is_valid())
self.assertEqual(update_form.errors['__all__'][0], 'Password length must be at least 8 symbols')
def test_update_user_password_when_password_is_too_large(self):
newpass = 'x' * (MAX_PAGE_SIZE + 1)
update_form = UserUpdateForm({'password': newpass,
'password_again': newpass},
instance=self.user)
self.assertFalse(update_form.is_valid())
self.assertEqual(update_form.errors['__all__'][0], 'You have only 640Kb for all purposes!')
class TestUserRequestPasswordResetForm(TestCase):
def test_request_password_reset_form_when_email_is_correct(self):
email = "xxx@m.ru"
reset_form = UserRequestPasswordResetForm({'email': email})
self.assertTrue(reset_form.is_valid())
def test_request_password_reset_form_when_email_is_too_long(self):
email = 'X' * 252 + "@m.ru"
reset_form = UserRequestPasswordResetForm({'email': email})
self.assertFalse(reset_form.is_valid())
self.assertIn('Ensure this value has at most 256 characters', reset_form.errors['email'][0])
def test_request_password_reset_form_when_email_had_wrong_symbols(self):
email = "xФxx@m.ru"
reset_form = UserRequestPasswordResetForm({'email': email})
self.assertFalse(reset_form.is_valid())
self.assertIn('Enter a valid email address.', reset_form.errors['email'][0])
def test_request_password_reset_form_when_email_is_simple_string(self):
email = "abc"
reset_form = UserRequestPasswordResetForm({'email': email})
self.assertFalse(reset_form.is_valid())
self.assertEqual(reset_form.errors['email'][0], 'Enter a valid email address.')
class TestUserPasswordUpdateForm(TestCase):
def setUp(self):
random_name = str(uuid1())
email = random_name + '@m.ru'
self.user = User(username=random_name, email=email)
self.user.set_password('12345678')
self.user.firstname = 'user firstname'
self.user.lastname = 'user lastname'
self.user.patronymic = 'user patronymic'
self.user.birth_date = timezone.datetime(year=1986, month=4, day=10)
self.user.save()
def test_user_password_update_form(self):
newpass = 'qweasdzxc'
update_form = UserPasswordUpdateForm({'password': newpass,
'password_again': newpass},
instance=self.user)
self.assertTrue(update_form.is_valid())
update_form.save()
self.user.refresh_from_db()
self.assertTrue(self.user.check_password(newpass))
def test_user_password_update_form_when_passwords_are_differ(self):
newpass = 'qweasdzxc'
update_form = UserPasswordUpdateForm({'password': newpass,
'password_again': newpass + "occasional symbols"},
instance=self.user)
self.assertFalse(update_form.is_valid())
self.assertEqual(update_form.errors['__all__'][0], 'Passwords mismatch')
def test_user_password_update_form_when_password_is_short(self):
newpass = 'x' * 7
update_form = UserPasswordUpdateForm({'password': newpass,
'password_again': newpass},
instance=self.user)
self.assertFalse(update_form.is_valid())
self.assertEqual(update_form.errors['__all__'][0], 'Password length must be at least 8 symbols')
def test_user_password_update_form_when_password_is_too_large(self):
newpass = 'x' * (MAX_PAGE_SIZE + 1)
update_form = UserPasswordUpdateForm({'password': newpass,
'password_again': newpass},
instance=self.user)
self.assertFalse(update_form.is_valid())
self.assertEqual(update_form.errors['__all__'][0], 'You have only 640Kb for all purposes!')
| 47.311284
| 104
| 0.614689
| 1,309
| 12,159
| 5.411765
| 0.12605
| 0.046302
| 0.035714
| 0.035997
| 0.804489
| 0.756917
| 0.74633
| 0.733343
| 0.698899
| 0.660785
| 0
| 0.0157
| 0.282342
| 12,159
| 256
| 105
| 47.496094
| 0.796127
| 0
| 0
| 0.60181
| 0
| 0
| 0.129369
| 0
| 0
| 0
| 0
| 0
| 0.21267
| 1
| 0.113122
| false
| 0.316742
| 0.036199
| 0
| 0.167421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
b695ecf15abe5add173a5043a06f9d1f0fcef128
| 129
|
py
|
Python
|
challenges/admin.py
|
SuperLeet-CTF/SuperLeet-CTF
|
3af085d310a8303ef3aff376ba930649586d5993
|
[
"MIT"
] | 4
|
2017-10-09T21:53:44.000Z
|
2020-12-02T19:11:08.000Z
|
challenges/admin.py
|
SuperLeet-CTF/SuperLeet-CTF
|
3af085d310a8303ef3aff376ba930649586d5993
|
[
"MIT"
] | null | null | null |
challenges/admin.py
|
SuperLeet-CTF/SuperLeet-CTF
|
3af085d310a8303ef3aff376ba930649586d5993
|
[
"MIT"
] | 1
|
2020-09-02T06:02:31.000Z
|
2020-09-02T06:02:31.000Z
|
from django.contrib import admin
from .models import Challenge, ChallengeAdmin
admin.site.register(Challenge, ChallengeAdmin)
| 18.428571
| 46
| 0.829457
| 15
| 129
| 7.133333
| 0.666667
| 0.429907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108527
| 129
| 6
| 47
| 21.5
| 0.930435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fcdfbba710783b70611c3dcf3046802aca1143eb
| 109
|
py
|
Python
|
nri/nri/models/decoders/__init__.py
|
take-cheeze/models
|
3ded8fd062c57f20f6154cac2dd0d998181de755
|
[
"MIT"
] | 112
|
2018-04-18T07:13:03.000Z
|
2022-03-11T03:36:34.000Z
|
nri/nri/models/decoders/__init__.py
|
take-cheeze/models
|
3ded8fd062c57f20f6154cac2dd0d998181de755
|
[
"MIT"
] | 16
|
2018-05-11T11:41:08.000Z
|
2021-04-24T03:50:54.000Z
|
nri/nri/models/decoders/__init__.py
|
take-cheeze/models
|
3ded8fd062c57f20f6154cac2dd0d998181de755
|
[
"MIT"
] | 45
|
2018-04-18T07:13:06.000Z
|
2021-12-22T03:46:18.000Z
|
from nri.models.decoders.mlp_decoder import MLPDecoder
from nri.models.decoders.rnn_decoder import RNNDecoder
| 54.5
| 54
| 0.880734
| 16
| 109
| 5.875
| 0.625
| 0.148936
| 0.276596
| 0.446809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06422
| 109
| 2
| 55
| 54.5
| 0.921569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1e00f3d6ffc9e102f59668c641f2bbe3d94be3c5
| 260
|
py
|
Python
|
hwtHls/scheduler/errors.py
|
Nic30/hwtHls
|
1fac6ed128318e698d51e15e9871249ddf243e1c
|
[
"MIT"
] | 8
|
2018-09-25T03:28:11.000Z
|
2021-12-15T07:44:38.000Z
|
hwtHls/scheduler/errors.py
|
Nic30/hwtHls
|
1fac6ed128318e698d51e15e9871249ddf243e1c
|
[
"MIT"
] | 1
|
2020-12-21T10:56:44.000Z
|
2020-12-21T10:56:44.000Z
|
hwtHls/scheduler/errors.py
|
Nic30/hwtHls
|
1fac6ed128318e698d51e15e9871249ddf243e1c
|
[
"MIT"
] | 2
|
2018-09-25T03:28:18.000Z
|
2021-12-15T10:28:35.000Z
|
class UnresolvedChild(Exception):
"""
Exception raised when children should be lazyloaded first
"""
pass
class TimeConstraintError(Exception):
"""
Exception raised when it is not possble to satisfy timing constraints
"""
pass
| 20
| 73
| 0.688462
| 27
| 260
| 6.62963
| 0.740741
| 0.201117
| 0.268156
| 0.312849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.242308
| 260
| 12
| 74
| 21.666667
| 0.908629
| 0.488462
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
1e016da072aa58cd89dab909bf6e6b57e9af98ca
| 89
|
py
|
Python
|
gym-matris/Matris/kezmenu/__init__.py
|
yipsang/tetris-rl
|
d8d8e13eabee1321c54e941725b788d7b26b04d0
|
[
"MIT"
] | 3
|
2020-12-15T18:08:22.000Z
|
2020-12-18T06:09:49.000Z
|
gym-matris/Matris/kezmenu/__init__.py
|
yipsang/tetris-rl
|
d8d8e13eabee1321c54e941725b788d7b26b04d0
|
[
"MIT"
] | null | null | null |
gym-matris/Matris/kezmenu/__init__.py
|
yipsang/tetris-rl
|
d8d8e13eabee1321c54e941725b788d7b26b04d0
|
[
"MIT"
] | null | null | null |
from .kezmenu import KezMenu, runTests
from .kezmenu import __version__, __description__
| 29.666667
| 49
| 0.842697
| 10
| 89
| 6.7
| 0.6
| 0.328358
| 0.507463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11236
| 89
| 2
| 50
| 44.5
| 0.848101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1e0c8fa359dd6bb82555ee018c7ce0d12d41fb97
| 41
|
py
|
Python
|
Neis_API/service/request/__init__.py
|
Piop2/Neis-API
|
517041ab3ba21b3d5d147e2d708cd7ee0dd25fcb
|
[
"MIT"
] | 3
|
2021-11-06T08:41:03.000Z
|
2022-01-13T16:02:29.000Z
|
Neis_API/service/request/__init__.py
|
Piop2/Neis-API
|
517041ab3ba21b3d5d147e2d708cd7ee0dd25fcb
|
[
"MIT"
] | null | null | null |
Neis_API/service/request/__init__.py
|
Piop2/Neis-API
|
517041ab3ba21b3d5d147e2d708cd7ee0dd25fcb
|
[
"MIT"
] | 1
|
2022-01-25T14:59:08.000Z
|
2022-01-25T14:59:08.000Z
|
import Neis_API.service.request.request
| 13.666667
| 39
| 0.853659
| 6
| 41
| 5.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 41
| 2
| 40
| 20.5
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1e130b6d4b2aa9d208332b7c6e19640612d0470c
| 61
|
py
|
Python
|
tests/test.py
|
floodsung/raisimGym
|
f0162d4f31e653e27f0048448d5745c7d7c12369
|
[
"MIT"
] | 2
|
2019-08-18T03:18:53.000Z
|
2021-02-02T08:16:03.000Z
|
tests/test.py
|
floodsung/raisimGym
|
f0162d4f31e653e27f0048448d5745c7d7c12369
|
[
"MIT"
] | null | null | null |
tests/test.py
|
floodsung/raisimGym
|
f0162d4f31e653e27f0048448d5745c7d7c12369
|
[
"MIT"
] | null | null | null |
import raisim_gym
print(raisim_gym.env.ANYmal.AnymalVecEnv)
| 15.25
| 41
| 0.852459
| 9
| 61
| 5.555556
| 0.777778
| 0.36
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065574
| 61
| 4
| 41
| 15.25
| 0.877193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
1e25887b2bec1ab9f61a4638f513fb25ebaabc2b
| 150
|
py
|
Python
|
apps/life_sci/python/dgllife/__init__.py
|
LunaBlack/dgl
|
bd1e48a51e348b0e8e25622325adeb5ddea1c0ea
|
[
"Apache-2.0"
] | 2
|
2021-12-09T12:36:13.000Z
|
2022-03-01T21:22:36.000Z
|
apps/life_sci/python/dgllife/__init__.py
|
sherry-1001/dgl
|
60d2e7d3c928d43bbb18e7ab17c066451c49f649
|
[
"Apache-2.0"
] | null | null | null |
apps/life_sci/python/dgllife/__init__.py
|
sherry-1001/dgl
|
60d2e7d3c928d43bbb18e7ab17c066451c49f649
|
[
"Apache-2.0"
] | 2
|
2020-12-07T09:34:01.000Z
|
2020-12-13T06:18:58.000Z
|
"""DGL-based package for applications in life science."""
from . import data
from . import model
from . import utils
from .libinfo import __version__
| 25
| 57
| 0.766667
| 21
| 150
| 5.285714
| 0.714286
| 0.27027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153333
| 150
| 5
| 58
| 30
| 0.874016
| 0.34
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1e4f75e717744e15d2c98e6269c58182b4d5c92d
| 5,722
|
py
|
Python
|
tests/test_prandtl_meyer_relations.py
|
Rigel09/CompAero
|
79a2902880c5bf6030794d585a48fbbf0c7df344
|
[
"MIT"
] | 1
|
2022-03-29T23:59:16.000Z
|
2022-03-29T23:59:16.000Z
|
tests/test_prandtl_meyer_relations.py
|
Rigel09/CompAero
|
79a2902880c5bf6030794d585a48fbbf0c7df344
|
[
"MIT"
] | 7
|
2022-01-15T15:38:45.000Z
|
2022-01-22T16:32:16.000Z
|
tests/test_prandtl_meyer_relations.py
|
Rigel09/CompAero
|
79a2902880c5bf6030794d585a48fbbf0c7df344
|
[
"MIT"
] | null | null | null |
import math
from numpy import isnat
from pytest import approx
import pytest
from CompAero.PrandtlMeyer import PrandtlMeyer as pm
from CompAero.internal import InvalidOptionCombinationError
#########################################################################################
# Test the static functions of the class
#########################################################################################
class TestPrandtlMeyerClassFuncs:
gamma = 1.4
def test_subsonic_nu_from_mach(self):
assert pm.calc_nu(0.5, self.gamma) == 0
def test_supersonic_nu_from_mach(self):
assert pm.calc_nu(1.5, self.gamma) == approx(11.9052, rel=1e-4)
def test_subsonic_mach_from_nu(self):
assert pm.calc_mach_from_nu(0, self.gamma) == 1.0
def test_supersonic_mach_from_nu(self):
assert pm.calc_mach_from_nu(11.9052, self.gamma) == approx(1.5, rel=1e-1)
#########################################################################################
# Test the different construction methods of the class
#########################################################################################
class TestPrandtlMeyerClassSubsonic:
gamma = 1.4
def test_subsonic_construction_given_mach(self):
inst = pm(self.gamma, mach=0.5)
assert inst.gamma == approx(self.gamma, rel=1e-1)
assert inst.mach == approx(0.5, rel=1e-1)
assert math.isnan(inst.nu)
assert math.isnan(inst.mu)
assert math.isnan(inst.deflectionAngle)
assert math.isnan(inst.dwmStrm_nu)
assert math.isnan(inst.dwmStrm_mu)
assert math.isnan(inst.dwmStrm_mach)
class TestPrandtlMeyerClassSupersonic:
gamma = 1.4
def test_supersonic_construction_given_mach(self):
inst = pm(self.gamma, mach=1.5)
assert inst.gamma == approx(self.gamma, rel=1e-1)
assert inst.mach == approx(1.5, rel=1e-1)
assert inst.nu == approx(11.9052, rel=1e-4)
assert inst.mu == approx(41.81031, rel=1e-4)
assert math.isnan(inst.deflectionAngle)
assert math.isnan(inst.dwmStrm_nu)
assert math.isnan(inst.dwmStrm_mu)
assert math.isnan(inst.dwmStrm_mach)
def test_supersonic_construction_given_nu(self):
inst = pm(self.gamma, nu=11.9052)
assert inst.gamma == approx(self.gamma, rel=1e-1)
assert inst.mach == approx(1.5, rel=1e-1)
assert inst.nu == approx(11.9052, rel=1e-4)
assert inst.mu == approx(41.81031, rel=1e-4)
assert math.isnan(inst.deflectionAngle)
assert math.isnan(inst.dwmStrm_nu)
assert math.isnan(inst.dwmStrm_mu)
assert math.isnan(inst.dwmStrm_mach)
def test_supersonic_construction_given_mu(self):
inst = pm(self.gamma, mu=41.81031)
assert inst.gamma == approx(self.gamma, rel=1e-1)
assert inst.mach == approx(1.5, rel=1e-1)
assert inst.nu == approx(11.9052, rel=1e-4)
assert inst.mu == approx(41.81031, rel=1e-4)
assert math.isnan(inst.deflectionAngle)
assert math.isnan(inst.dwmStrm_nu)
assert math.isnan(inst.dwmStrm_mu)
assert math.isnan(inst.dwmStrm_mach)
def test_supersonic_construction_given_deflection_dwnstrm_mach(self):
inst = pm(self.gamma, deflectionAngle=10, dwnStreamMach=1.84099)
assert inst.gamma == approx(self.gamma, rel=1e-1)
assert inst.mach == approx(1.5, rel=1e-1)
assert inst.nu == approx(11.9052, rel=1e-4)
assert inst.mu == approx(41.81031, rel=1e-4)
assert inst.deflectionAngle == approx(10)
assert inst.dwmStrm_nu == approx(21.90521, rel=1e-4)
assert inst.dwmStrm_mu == approx(32.9008, rel=1e-4)
assert inst.dwmStrm_mach == approx(1.84099, rel=1e-4)
def test_supersonic_construction_given_deflection_dwnstrm_mu(self):
inst = pm(self.gamma, deflectionAngle=10, dwnStreamMu=32.9008)
assert inst.gamma == approx(self.gamma, rel=1e-1)
assert inst.mach == approx(1.5, rel=1e-1)
assert inst.nu == approx(11.9052, rel=1e-4)
assert inst.mu == approx(41.81031, rel=1e-4)
assert inst.deflectionAngle == approx(10)
assert inst.dwmStrm_nu == approx(21.90521, rel=1e-4)
assert inst.dwmStrm_mu == approx(32.9008, rel=1e-4)
assert inst.dwmStrm_mach == approx(1.84099, rel=1e-4)
def test_supersonic_construction_given_deflection_dwnstrm_nu(self):
inst = pm(self.gamma, deflectionAngle=10, dwnstreamNu=21.90521)
assert inst.gamma == approx(self.gamma, rel=1e-1)
assert inst.mach == approx(1.5, rel=1e-1)
assert inst.nu == approx(11.9052, rel=1e-4)
assert inst.mu == approx(41.81031, rel=1e-4)
assert inst.deflectionAngle == approx(10)
assert inst.dwmStrm_nu == approx(21.90521, rel=1e-4)
assert inst.dwmStrm_mu == approx(32.9008, rel=1e-4)
assert inst.dwmStrm_mach == approx(1.84099, rel=1e-4)
def test_supersonic_construction_given_defelction_angle_radians(self):
inst = pm(self.gamma, deflectionAngle=math.radians(10), inDegrees=False, dwnstreamNu=21.90521)
assert inst.gamma == approx(self.gamma, rel=1e-1)
assert inst.mach == approx(1.5, rel=1e-1)
assert inst.nu == approx(11.9052, rel=1e-4)
assert inst.mu == approx(41.81031, rel=1e-4)
assert inst.deflectionAngle == approx(10)
assert inst.dwmStrm_nu == approx(21.90521, rel=1e-4)
assert inst.dwmStrm_mu == approx(32.9008, rel=1e-4)
assert inst.dwmStrm_mach == approx(1.84099, rel=1e-4)
def test_supersonic_invalid_construction(self):
with pytest.raises(InvalidOptionCombinationError):
pm(self.gamma, deflectionAngle=10)
| 43.679389
| 102
| 0.629151
| 791
| 5,722
| 4.436157
| 0.087231
| 0.131091
| 0.046167
| 0.075235
| 0.823027
| 0.801083
| 0.750356
| 0.714734
| 0.698775
| 0.673696
| 0
| 0.078261
| 0.196085
| 5,722
| 130
| 103
| 44.015385
| 0.684565
| 0.015904
| 0
| 0.621359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.660194
| 1
| 0.126214
| false
| 0
| 0.058252
| 0
| 0.242718
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1e57932b115711c3c675a96f47758d730ff7509d
| 19,452
|
py
|
Python
|
tests/system/action/mediafile/test_update.py
|
ostcar/openslides-backend
|
e6ceac497c37a1e3e7f408c6cfb29cf21d985b4c
|
[
"MIT"
] | null | null | null |
tests/system/action/mediafile/test_update.py
|
ostcar/openslides-backend
|
e6ceac497c37a1e3e7f408c6cfb29cf21d985b4c
|
[
"MIT"
] | 19
|
2021-11-22T16:25:54.000Z
|
2021-11-25T13:38:13.000Z
|
tests/system/action/mediafile/test_update.py
|
ostcar/openslides-backend
|
e6ceac497c37a1e3e7f408c6cfb29cf21d985b4c
|
[
"MIT"
] | null | null | null |
from openslides_backend.permissions.permissions import Permissions
from tests.system.action.base import BaseActionTestCase
class MediafileUpdateActionTest(BaseActionTestCase):
def setUp(self) -> None:
super().setUp()
self.permission_test_model = {
"group/7": {"name": "group_LxAHErRs", "user_ids": [], "meeting_id": 1},
"mediafile/111": {"title": "title_srtgb123", "meeting_id": 1},
}
def test_update_correct(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"group/7": {"name": "group_LxAHErRs", "user_ids": [], "meeting_id": 1},
"mediafile/111": {"title": "title_srtgb123", "meeting_id": 1},
}
)
response = self.request(
"mediafile.update",
{"id": 111, "title": "title_Xcdfgee", "access_group_ids": [7]},
)
self.assert_status_code(response, 200)
model = self.get_model("mediafile/111")
assert model.get("title") == "title_Xcdfgee"
assert model.get("access_group_ids") == [7]
assert model.get("inherited_access_group_ids") == [7]
assert model.get("is_public") is False
def test_update_children(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"group/7": {"name": "group_LxAHErRs", "user_ids": [], "meeting_id": 1},
"mediafile/110": {
"title": "title_ekxORNiV",
"child_ids": [111],
"is_public": False,
"inherited_access_group_ids": [7],
"access_group_ids": [7],
"meeting_id": 1,
},
"mediafile/111": {
"title": "title_srtgb123",
"parent_id": 110,
"meeting_id": 1,
},
}
)
response = self.request(
"mediafile.update",
{"id": 110, "title": "title_Xcdfgee", "access_group_ids": [7]},
)
self.assert_status_code(response, 200)
model = self.get_model("mediafile/111")
assert model.get("title") == "title_srtgb123"
assert model.get("inherited_access_group_ids") == [7]
assert model.get("is_public") is False
def test_update_parent(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"group/7": {"name": "group_LxAHErRs", "user_ids": [], "meeting_id": 1},
"mediafile/110": {
"title": "title_srtgb199",
"child_ids": [111],
"meeting_id": 1,
},
"mediafile/111": {
"title": "title_srtgb123",
"parent_id": 110,
"meeting_id": 1,
},
}
)
response = self.request(
"mediafile.update",
{"id": 111, "title": "title_Xcdfgee", "access_group_ids": [7]},
)
self.assert_status_code(response, 200)
model = self.get_model("mediafile/111")
assert model.get("title") == "title_Xcdfgee"
assert model.get("access_group_ids") == [7]
assert model.get("inherited_access_group_ids") == [7]
assert model.get("is_public") is False
def test_update_parent_inherited_list(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"group/7": {"name": "group_LxAHErRs", "user_ids": [], "meeting_id": 1},
"group/8": {"name": "group_sdfafd", "user_ids": [], "meeting_id": 1},
"mediafile/110": {
"title": "title_srtgb199",
"child_ids": [111],
"inherited_access_group_ids": [8],
"is_public": False,
"meeting_id": 1,
},
"mediafile/111": {
"title": "title_srtgb123",
"parent_id": 110,
"meeting_id": 1,
},
}
)
response = self.request(
"mediafile.update",
{"id": 111, "title": "title_Xcdfgee", "access_group_ids": [7]},
)
self.assert_status_code(response, 200)
model = self.get_model("mediafile/111")
assert model.get("title") == "title_Xcdfgee"
assert model.get("access_group_ids") == [7]
assert model.get("inherited_access_group_ids") == []
assert model.get("is_public") is False
def test_update_parent_case1(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"mediafile/110": {
"title": "title_srtgb199",
"child_ids": [111],
"access_group_ids": [],
"inherited_access_group_ids": [],
"is_public": True,
"meeting_id": 1,
},
"mediafile/111": {
"title": "title_srtgb123",
"parent_id": 110,
"meeting_id": 1,
},
}
)
response = self.request(
"mediafile.update",
{"id": 111, "title": "title_Xcdfgee", "access_group_ids": []},
)
self.assert_status_code(response, 200)
model_child = self.get_model("mediafile/111")
assert model_child.get("access_group_ids") == []
assert model_child.get("inherited_access_group_ids") == []
assert model_child.get("is_public") is True
def test_update_parent_case2(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"group/2": {"name": "group_LxAHErRs", "user_ids": [], "meeting_id": 1},
"group/4": {"name": "group_sdfafd", "user_ids": [], "meeting_id": 1},
"mediafile/110": {
"title": "title_srtgb199",
"child_ids": [111],
"inherited_access_group_ids": [2, 4],
"access_group_ids": [2, 4],
"is_public": False,
"meeting_id": 1,
},
"mediafile/111": {
"title": "title_srtgb123",
"parent_id": 110,
"meeting_id": 1,
},
}
)
response = self.request(
"mediafile.update",
{"id": 111, "title": "title_Xcdfgee", "access_group_ids": []},
)
self.assert_status_code(response, 200)
model = self.get_model("mediafile/111")
assert model.get("access_group_ids") == []
assert model.get("inherited_access_group_ids") == [2, 4]
assert model.get("is_public") is False
def test_update_parent_case3(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"group/3": {"name": "group_LxAHErRs", "user_ids": [], "meeting_id": 1},
"group/6": {"name": "group_sdfafd", "user_ids": [], "meeting_id": 1},
"mediafile/110": {
"title": "title_srtgb199",
"child_ids": [111],
"inherited_access_group_ids": [],
"access_group_ids": [],
"is_public": True,
"meeting_id": 1,
},
"mediafile/111": {
"title": "title_srtgb123",
"parent_id": 110,
"meeting_id": 1,
},
}
)
response = self.request(
"mediafile.update",
{
"id": 111,
"title": "title_Xcdfgee",
"access_group_ids": [3, 6],
},
)
self.assert_status_code(response, 200)
model = self.get_model("mediafile/111")
assert model.get("access_group_ids") == [3, 6]
assert model.get("inherited_access_group_ids") == [3, 6]
assert model.get("is_public") is False
def test_update_parent_case4(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"group/1": {"name": "group_LxAHErRs", "user_ids": [], "meeting_id": 1},
"group/2": {"name": "group_sdfafd", "user_ids": [], "meeting_id": 1},
"group/3": {"name": "group_ghjeei", "user_ids": [], "meeting_id": 1},
"mediafile/110": {
"title": "title_srtgb199",
"child_ids": [111],
"inherited_access_group_ids": [1, 2],
"access_group_ids": [1, 2],
"is_public": False,
"meeting_id": 1,
},
"mediafile/111": {
"title": "title_srtgb123",
"parent_id": 110,
"meeting_id": 1,
},
}
)
response = self.request(
"mediafile.update",
{
"id": 111,
"title": "title_Xcdfgee",
"access_group_ids": [2, 3],
},
)
self.assert_status_code(response, 200)
model = self.get_model("mediafile/111")
assert model.get("access_group_ids") == [2, 3]
assert model.get("inherited_access_group_ids") == [2]
assert model.get("is_public") is False
def test_update_parent_case5(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"group/1": {"name": "group_LxAHErRs", "user_ids": [], "meeting_id": 1},
"group/2": {"name": "group_sdfafd", "user_ids": [], "meeting_id": 1},
"group/3": {"name": "group_ghjeei", "user_ids": [], "meeting_id": 1},
"mediafile/110": {
"title": "title_srtgb199",
"child_ids": [111],
"inherited_access_group_ids": [1, 2],
"access_group_ids": [1, 2],
"is_public": False,
"meeting_id": 1,
},
"mediafile/111": {
"title": "title_srtgb123",
"parent_id": 110,
"meeting_id": 1,
},
}
)
response = self.request(
"mediafile.update",
{"id": 111, "title": "title_Xcdfgee", "access_group_ids": [3]},
)
self.assert_status_code(response, 200)
model = self.get_model("mediafile/111")
assert model.get("access_group_ids") == [3]
assert model.get("inherited_access_group_ids") == []
assert model.get("is_public") is False
def test_update_parent_inherited_true(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"mediafile/110": {
"title": "title_srtgb199",
"child_ids": [111],
"inherited_access_group_ids": [],
"access_group_ids": [],
"is_public": False,
"meeting_id": 1,
},
"mediafile/111": {
"title": "title_srtgb123",
"parent_id": 110,
"meeting_id": 1,
},
}
)
response = self.request(
"mediafile.update",
{"id": 111, "title": "title_Xcdfgee", "access_group_ids": []},
)
self.assert_status_code(response, 200)
model = self.get_model("mediafile/111")
assert model.get("access_group_ids") == []
assert model.get("inherited_access_group_ids") == []
assert model.get("is_public") is False
def test_update_wrong_id(self) -> None:
self.create_model(
"mediafile/111",
{"title": "title_srtgb123"},
)
response = self.request(
"mediafile.update", {"id": 112, "title": "title_Xcdfgee"}
)
self.assert_status_code(response, 400)
model = self.get_model("mediafile/111")
assert model.get("title") == "title_srtgb123"
def test_update_parent_and_children(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"group/7": {"name": "group_LxAHErRs", "user_ids": [], "meeting_id": 1},
"mediafile/110": {
"title": "title_srtgb199",
"child_ids": [111],
"meeting_id": 1,
},
"mediafile/111": {
"title": "title_srtgb123",
"parent_id": 110,
"child_ids": [112],
"meeting_id": 1,
},
"mediafile/112": {
"title": "title_srtgb123",
"parent_id": 111,
"access_group_ids": [7],
"meeting_id": 1,
},
}
)
response = self.request(
"mediafile.update",
{"id": 111, "title": "title_Xcdfgee", "access_group_ids": [7]},
)
self.assert_status_code(response, 200)
model = self.get_model("mediafile/111")
assert model.get("title") == "title_Xcdfgee"
assert model.get("access_group_ids") == [7]
assert model.get("inherited_access_group_ids") == [7]
assert model.get("is_public") is False
child = self.get_model("mediafile/112")
assert child.get("access_group_ids") == [7]
assert child.get("inherited_access_group_ids") == [7]
assert child.get("is_public") is False
def test_update_parent_and_children_2(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"group/7": {"name": "group_LxAHErRs", "user_ids": [], "meeting_id": 1},
"mediafile/110": {
"title": "title_srtgb199",
"child_ids": [111],
"meeting_id": 1,
},
"mediafile/111": {
"title": "title_srtgb123",
"parent_id": 110,
"child_ids": [112, 113],
"meeting_id": 1,
},
"mediafile/112": {
"title": "title_srtgb123",
"parent_id": 111,
"access_group_ids": [7],
"meeting_id": 1,
},
"mediafile/113": {
"title": "title_srtgb123",
"parent_id": 111,
"access_group_ids": [7],
"meeting_id": 1,
},
}
)
response = self.request(
"mediafile.update",
{"id": 111, "title": "title_Xcdfgee", "access_group_ids": [7]},
)
self.assert_status_code(response, 200)
model = self.get_model("mediafile/111")
assert model.get("title") == "title_Xcdfgee"
assert model.get("access_group_ids") == [7]
assert model.get("inherited_access_group_ids") == [7]
assert model.get("is_public") is False
child = self.get_model("mediafile/112")
assert child.get("access_group_ids") == [7]
assert child.get("inherited_access_group_ids") == [7]
assert child.get("is_public") is False
child = self.get_model("mediafile/113")
assert child.get("access_group_ids") == [7]
assert child.get("inherited_access_group_ids") == [7]
assert child.get("is_public") is False
def test_update_parent_and_children_3(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"group/7": {"name": "group_LxAHErRs", "user_ids": [], "meeting_id": 1},
"mediafile/110": {
"title": "title_srtgb199",
"child_ids": [111],
"meeting_id": 1,
},
"mediafile/111": {
"title": "title_srtgb123",
"parent_id": 110,
"child_ids": [112],
"meeting_id": 1,
},
"mediafile/112": {
"title": "title_srtgb123",
"parent_id": 111,
"access_group_ids": [7],
"child_ids": [113],
"meeting_id": 1,
},
"mediafile/113": {
"title": "title_srtgb123",
"parent_id": 112,
"access_group_ids": [7],
"meeting_id": 1,
},
}
)
response = self.request(
"mediafile.update",
{"id": 111, "title": "title_Xcdfgee", "access_group_ids": [7]},
)
self.assert_status_code(response, 200)
model = self.get_model("mediafile/111")
assert model.get("title") == "title_Xcdfgee"
assert model.get("access_group_ids") == [7]
assert model.get("inherited_access_group_ids") == [7]
assert model.get("is_public") is False
child = self.get_model("mediafile/112")
assert child.get("access_group_ids") == [7]
assert child.get("inherited_access_group_ids") == [7]
assert child.get("is_public") is False
child = self.get_model("mediafile/113")
assert child.get("access_group_ids") == [7]
assert child.get("inherited_access_group_ids") == [7]
assert child.get("is_public") is False
def test_update_filename_error(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"mediafile/110": {
"title": "title_srtgb199",
"filename": "testfile.txt",
"meeting_id": 1,
},
}
)
response = self.request(
"mediafile.update",
{"id": 110, "filename": "testfile.txt2"},
)
self.assert_status_code(response, 400)
self.assertIn(
"data must not contain {'filename'} properties", response.json["message"]
)
def test_update_no_permissions(self) -> None:
self.base_permission_test(
self.permission_test_model,
"mediafile.update",
{"id": 111, "title": "title_Xcdfgee", "access_group_ids": [7]},
)
def test_update_permissions(self) -> None:
self.base_permission_test(
self.permission_test_model,
"mediafile.update",
{"id": 111, "title": "title_Xcdfgee", "access_group_ids": [7]},
Permissions.Mediafile.CAN_MANAGE,
)
| 39.138833
| 87
| 0.473833
| 1,868
| 19,452
| 4.633833
| 0.050857
| 0.088956
| 0.113216
| 0.06585
| 0.932994
| 0.921557
| 0.90677
| 0.893022
| 0.883087
| 0.873498
| 0
| 0.052478
| 0.384793
| 19,452
| 496
| 88
| 39.217742
| 0.670845
| 0
| 0
| 0.675052
| 0
| 0
| 0.292412
| 0.054904
| 0
| 0
| 0
| 0
| 0.161426
| 1
| 0.037736
| false
| 0
| 0.004193
| 0
| 0.044025
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1e707b6fa9b69855859890bbdf3539cc7ee849ff
| 228
|
py
|
Python
|
Examples/test.py
|
juancotrino/Hapi
|
db65313a333f9b763c1021f84ef656835fb2f855
|
[
"MIT"
] | null | null | null |
Examples/test.py
|
juancotrino/Hapi
|
db65313a333f9b763c1021f84ef656835fb2f855
|
[
"MIT"
] | null | null | null |
Examples/test.py
|
juancotrino/Hapi
|
db65313a333f9b763c1021f84ef656835fb2f855
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 19 16:34:44 2020
@author: mofarrag
"""
try:
import Hapi
except ImportError:
try:
import HAPI
except ImportError:
import sys
sys.path.append(".")
import Hapi
| 14.25
| 35
| 0.627193
| 31
| 228
| 4.612903
| 0.709677
| 0.20979
| 0.181818
| 0.265734
| 0.41958
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075145
| 0.241228
| 228
| 15
| 36
| 15.2
| 0.751445
| 0.337719
| 0
| 0.666667
| 0
| 0
| 0.006993
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1e9c97b363555034dfdfbf385e30547184709b7f
| 1,317
|
py
|
Python
|
tabs.py
|
hugo-barros/AI_sudoku_solutions
|
df537254496c5a843dbfcf2bb737ab4624927877
|
[
"MIT"
] | null | null | null |
tabs.py
|
hugo-barros/AI_sudoku_solutions
|
df537254496c5a843dbfcf2bb737ab4624927877
|
[
"MIT"
] | null | null | null |
tabs.py
|
hugo-barros/AI_sudoku_solutions
|
df537254496c5a843dbfcf2bb737ab4624927877
|
[
"MIT"
] | null | null | null |
tabuleiro_easy_1 = [
[4, 0, 1, 8, 3, 9, 5, 2, 0],
[3, 0, 9, 2, 7, 5, 1, 4, 6],
[5, 2, 7, 6, 0, 1, 9, 8, 0],
[0, 5, 8, 1, 0, 7, 3, 9, 4],
[0, 7, 3, 9, 8, 4, 2, 5, 0],
[9, 1, 4, 5, 2, 3, 6, 7, 8],
[7, 4, 0, 3, 0, 6, 8, 1, 2],
[8, 0, 6, 4, 1, 2, 7, 3, 5],
[1, 3, 2, 7, 5, 8, 4, 0, 9],
]
tabuleiro_easy_2 = [
[0, 6, 1, 8, 0, 0, 0, 0, 7],
[0, 8, 9, 2, 0, 5, 0, 4, 0],
[0, 0, 0, 0, 4, 0, 9, 0, 3],
[2, 0, 0, 1, 6, 0, 3, 0, 0],
[6, 7, 0, 0, 0, 0, 0, 5, 1],
[0, 0, 4, 0, 2, 3, 0, 0, 8],
[7, 0, 5, 0, 9, 0, 0, 0, 0],
[0, 9, 0, 4, 0, 2, 7, 3, 0],
[1, 0, 0, 0, 0, 8, 4, 6, 0],
]
tabuleiro_med = [
[0, 5, 0, 3, 6, 0, 0, 0, 0],
[2, 8, 0, 7, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 8, 0, 9, 0],
[6, 0, 0, 0, 0, 0, 0, 8, 3],
[0, 0, 4, 0, 0, 0, 2, 0, 0],
[8, 9, 0, 0, 0, 0, 0, 0, 6],
[0, 7, 0, 5, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 3, 9],
[0, 0, 0, 0, 4, 3, 0, 6, 0],
]
tabuleiro_hard = [
[0, 7, 0, 0, 0, 0, 0, 9, 0],
[0, 0, 0, 0, 5, 0, 4, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 3, 0],
[6, 0, 0, 0, 1, 3, 2, 0, 0],
[0, 0, 9, 0, 8, 0, 0, 0, 0],
[0, 3, 1, 0, 0, 6, 0, 0, 0],
[4, 6, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 8, 0, 0, 4, 6, 0, 0],
[0, 0, 0, 0, 3, 5, 0, 0, 0],
]
| 21.241935
| 33
| 0.290812
| 334
| 1,317
| 1.128743
| 0.041916
| 0.535809
| 0.549072
| 0.498674
| 0.435013
| 0.299735
| 0.228117
| 0.116711
| 0.106101
| 0.106101
| 0
| 0.412137
| 0.399393
| 1,317
| 61
| 34
| 21.590164
| 0.064475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1ecbca40de2281a37d2ebaca8351cfa08f576597
| 45
|
py
|
Python
|
src/hani/__init__.py
|
define16/Bot
|
30a1e05ce5e53c88a1cad0bac92c359f180287f7
|
[
"Apache-2.0"
] | null | null | null |
src/hani/__init__.py
|
define16/Bot
|
30a1e05ce5e53c88a1cad0bac92c359f180287f7
|
[
"Apache-2.0"
] | null | null | null |
src/hani/__init__.py
|
define16/Bot
|
30a1e05ce5e53c88a1cad0bac92c359f180287f7
|
[
"Apache-2.0"
] | null | null | null |
# 한겨레 신문
from hani.handler import HaniHandler
| 22.5
| 36
| 0.822222
| 7
| 45
| 5.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 45
| 2
| 36
| 22.5
| 0.948718
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1ed6089435c6825a5a52fb672a19f1dbc46d0cb0
| 3,011
|
py
|
Python
|
plugins/active_directory_ldap/unit_test/test_action_query_group_membership.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/active_directory_ldap/unit_test/test_action_query_group_membership.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/active_directory_ldap/unit_test/test_action_query_group_membership.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
from unittest import TestCase, mock
from komand.exceptions import PluginException
from komand_active_directory_ldap.actions.query_group_membership import QueryGroupMembership
from komand_active_directory_ldap.actions.query_group_membership.schema import Input, Output
from unit_test.common import MockServer
from unit_test.common import MockConnection
from unit_test.common import default_connector
class TestActionQueryGroupMembership(TestCase):
@mock.patch("ldap3.Server", mock.MagicMock(return_value=MockServer))
@mock.patch("ldap3.Connection", mock.MagicMock(return_value=MockConnection()))
@default_connector(action=QueryGroupMembership())
def test_query_group(self, action):
actual = action.run({Input.SEARCH_BASE: "CN=Users,DC=example,DC=com", Input.GROUP_NAME: "Users"})
expected = {Output.COUNT: 1, Output.RESULTS: [{"dn": "DN=user"}]}
self.assertEqual(actual, expected)
@mock.patch("ldap3.Server", mock.MagicMock(return_value=MockServer))
@mock.patch("ldap3.Connection", mock.MagicMock(return_value=MockConnection()))
@default_connector(action=QueryGroupMembership())
def test_query_group_false(self, action):
with self.assertRaises(PluginException) as context:
action.run({Input.SEARCH_BASE: "CN=empty_search,DC=example,DC=com", Input.GROUP_NAME: "Users"})
self.assertEqual("The specified group was not found.", context.exception.cause)
self.assertEqual(
"Please check that the provided group name and search base are correct and try again.",
context.exception.assistance,
)
@mock.patch("ldap3.Server", mock.MagicMock(return_value=MockServer))
@mock.patch("ldap3.Connection", mock.MagicMock(return_value=MockConnection()))
@default_connector(action=QueryGroupMembership())
def test_query_group_bad_response(self, action):
with self.assertRaises(PluginException) as context:
action.run({Input.SEARCH_BASE: "CN=bad_response,DC=example,DC=com", Input.GROUP_NAME: "Users"})
self.assertEqual("The specified group was not found.", context.exception.cause)
self.assertEqual(
"Please check that the provided group name and search base are correct and try again.",
context.exception.assistance,
)
@mock.patch("ldap3.Server", mock.MagicMock(return_value=MockServer))
@mock.patch("ldap3.Connection", mock.MagicMock(return_value=MockConnection()))
@default_connector(action=QueryGroupMembership())
def test_query_group_no_response(self, action):
with self.assertRaises(PluginException) as context:
action.run({Input.SEARCH_BASE: "CN=no_response,DC=example,DC=com", Input.GROUP_NAME: "Users"})
self.assertEqual("The specified group was not found.", context.exception.cause)
self.assertEqual(
"Please check that the provided group name and search base are correct and try again.",
context.exception.assistance,
)
| 51.913793
| 107
| 0.732647
| 361
| 3,011
| 5.972299
| 0.224377
| 0.033395
| 0.051948
| 0.089054
| 0.851577
| 0.818182
| 0.806122
| 0.806122
| 0.790816
| 0.738868
| 0
| 0.003555
| 0.159083
| 3,011
| 57
| 108
| 52.824561
| 0.847946
| 0
| 0
| 0.5625
| 0
| 0
| 0.20558
| 0.041182
| 0
| 0
| 0
| 0
| 0.208333
| 1
| 0.083333
| false
| 0
| 0.145833
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
94def3975c2f875a310e6842dd894c01c192cdf2
| 778
|
py
|
Python
|
user_credentials.py
|
michael-muga/Password_Locker
|
f4d690574a59581bab748b03782d009e5ccdb0f1
|
[
"MIT"
] | null | null | null |
user_credentials.py
|
michael-muga/Password_Locker
|
f4d690574a59581bab748b03782d009e5ccdb0f1
|
[
"MIT"
] | null | null | null |
user_credentials.py
|
michael-muga/Password_Locker
|
f4d690574a59581bab748b03782d009e5ccdb0f1
|
[
"MIT"
] | null | null | null |
class Usercredentials:
'''
class to generate new instances of usercredentials
'''
user_credential_list = [] #empty list for user creddential
def __init__(self,site_name,password):
'''
method to define properties of the object
'''
self.site_name = site_name
self.password = password
def save_credentials(self):
'''
method to save a credential into the user credential list
'''
Usercredentials.user_credential_list.append(self)
def delete_credentials(self):
'''
method to delete saved credential
'''
Usercredentials.user_credential_list.remove(self)
@classmethod
def display_credentials(cls):
return cls.user_credential_list
| 24.3125
| 65
| 0.642674
| 83
| 778
| 5.807229
| 0.433735
| 0.145228
| 0.186722
| 0.205394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.284062
| 778
| 32
| 66
| 24.3125
| 0.86535
| 0.277635
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0
| 0.083333
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
a21c75a71ca19b195335a39f1652f7a4639e174a
| 43
|
py
|
Python
|
RPi/GPIO/definitions/PWM/__init__.py
|
Def4l71diot/RPi.GPIO-def
|
ed5f93bf8aa59e41df59001ba74691b396101983
|
[
"MIT"
] | 8
|
2018-08-24T03:34:40.000Z
|
2022-01-05T11:10:34.000Z
|
RPi/GPIO/definitions/PWM/__init__.py
|
Def4l71diot/RPi.GPIO-def
|
ed5f93bf8aa59e41df59001ba74691b396101983
|
[
"MIT"
] | 1
|
2018-09-14T17:33:55.000Z
|
2018-09-14T17:33:55.000Z
|
RPi/GPIO/definitions/PWM/__init__.py
|
Def4l71diot/RPi.GPIO-def
|
ed5f93bf8aa59e41df59001ba74691b396101983
|
[
"MIT"
] | 4
|
2017-02-04T11:29:12.000Z
|
2020-12-29T20:26:27.000Z
|
from RPi.GPIO.definitions.PWM.PWM import *
| 21.5
| 42
| 0.790698
| 7
| 43
| 4.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.871795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bf405fc48d000a33552e377fcbf360efe86532ea
| 130
|
py
|
Python
|
python-sdk/nuscenes/eval/detection/evaluate.py
|
tanjiangyuan/Classification_nuScence
|
b94c4b0b6257fc1c048a676e3fd9e71183108d53
|
[
"Apache-2.0"
] | null | null | null |
python-sdk/nuscenes/eval/detection/evaluate.py
|
tanjiangyuan/Classification_nuScence
|
b94c4b0b6257fc1c048a676e3fd9e71183108d53
|
[
"Apache-2.0"
] | null | null | null |
python-sdk/nuscenes/eval/detection/evaluate.py
|
tanjiangyuan/Classification_nuScence
|
b94c4b0b6257fc1c048a676e3fd9e71183108d53
|
[
"Apache-2.0"
] | null | null | null |
version https://git-lfs.github.com/spec/v1
oid sha256:7546974e223fcd1a2e44b290634ee472eb29668307530fa1a727f94508188aab
size 13639
| 32.5
| 75
| 0.884615
| 13
| 130
| 8.846154
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.443548
| 0.046154
| 130
| 3
| 76
| 43.333333
| 0.483871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bf4b1abbefc34c61d10d3afb8911d9940761dce5
| 21,951
|
py
|
Python
|
pyannote/algorithms/segmentation/hmm.py
|
pyannote/pyannote-algorithms
|
5a646fdaf3250527569e3f9a1c666d6397e53ce4
|
[
"MIT"
] | 5
|
2015-04-13T19:59:10.000Z
|
2020-09-13T23:58:22.000Z
|
pyannote/algorithms/segmentation/hmm.py
|
pyannote/pyannote-algorithms
|
5a646fdaf3250527569e3f9a1c666d6397e53ce4
|
[
"MIT"
] | 7
|
2015-03-12T16:53:31.000Z
|
2018-09-03T11:36:23.000Z
|
pyannote/algorithms/segmentation/hmm.py
|
pyannote/pyannote-algorithms
|
5a646fdaf3250527569e3f9a1c666d6397e53ce4
|
[
"MIT"
] | 7
|
2015-03-11T09:40:08.000Z
|
2021-01-07T10:39:05.000Z
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014-2016 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from __future__ import unicode_literals
import six
import numpy as np
from ..utils.viterbi import viterbi_decoding, \
VITERBI_CONSTRAINT_NONE, \
VITERBI_CONSTRAINT_MANDATORY, \
VITERBI_CONSTRAINT_FORBIDDEN
from pyannote.core import Annotation, Scores
from pyannote.core.util import pairwise
from ..utils.sklearn import SKLearnMixin, LabelConverter
from ..classification.gmm import \
SKLearnGMMClassification, SKLearnGMMUBMClassification
class SKLearnGMMSegmentation(SKLearnGMMClassification):
"""
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
calibration : string, optional
Controls how log-likelihoods are calibrated into log-likelihood ratios.
Must be one of 'naive_bayes' (for Gaussian naive Bayes) or 'isotonic'
for isotonic regression. Defaults to no calibration.
lbg : boolean, optional
Controls whether to use the LBG algorithm for training.
Defaults to False.
equal_priors : boolean, optional
Defaults to False
"""
def _n_classes(self,):
K = len(self.classes_)
return K
def _fit_structure(self, y_iter):
K = self._n_classes()
initial = np.zeros((K, ), dtype=float)
transition = np.zeros((K, K), dtype=float)
for y in y_iter:
initial[y[0]] += 1
for n, m in pairwise(y):
transition[n, m] += 1
# log-probabilities
self.initial_ = np.log(initial / np.sum(initial))
self.transition_ = np.log(transition.T / np.sum(transition, axis=1)).T
return self
def fit(self, X_iter, y_iter):
y_iter = list(y_iter)
super(SKLearnGMMSegmentation, self).fit(
np.vstack([X for X in X_iter]),
np.hstack([y for y in y_iter]))
self._fit_structure(y_iter)
return self
def predict(self, X, consecutive=None, constraint=None):
"""
Parameters
----------
X : array-like, shape (N, D)
consecutive : array-like, shape (K, )
constraint : array-like, shape (N, K)
N is the number of samples.
D is the features dimension.
K is the number of classes (including the rejection class as the last
class, when appropriate).
"""
if self.calibration is None:
emission = self.predict_log_likelihood(X)
else:
emission = self.predict_log_proba(X)
sequence = viterbi_decoding(
emission, self.transition_,
initial=self.initial_,
consecutive=consecutive, constraint=constraint)
return sequence
class SKLearnGMMUBMSegmentation(SKLearnGMMUBMClassification):
"""
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
precomputed_ubm : GMM, optional
When provided, class GMMs are adapted from this UBM.
adapt_params : string, optional
Controls which parameters are updated in the adaptation
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'm'.
adapt_iter : int, optional
Number of EM iterations to perform during adaptation.
calibration : string, optional
Controls how raw GMM scores are calibrated into log-likelihood ratios.
Must be one of 'naive_bayes' (for Gaussian naive Bayes) or 'isotonic'
for isotonic regression. Defaults to no calibration.
lbg : boolean, optional
Controls whether to use the LBG algorithm for training.
Defaults to False.
"""
def _n_classes(self,):
K = len(self.classes_)
if self.open_set_:
K = K + 1
return K
def _fit_structure(self, y_iter):
K = self._n_classes()
initial = np.zeros((K, ), dtype=float)
transition = np.zeros((K, K), dtype=float)
for y in y_iter:
initial[y[0]] += 1
for n, m in pairwise(y):
transition[n, m] += 1
# log-probabilities
self.initial_ = np.log(initial / np.sum(initial))
self.transition_ = np.log(transition.T / np.sum(transition, axis=1)).T
return self
def fit(self, X_iter, y_iter):
y_iter = list(y_iter)
super(SKLearnGMMUBMSegmentation, self).fit(
np.vstack([X for X in X_iter]),
np.hstack([y for y in y_iter]))
self._fit_structure(y_iter)
return self
def predict(self, X, consecutive=None, constraint=None):
"""
Parameters
----------
X : array-like, shape (N, D)
consecutive : array-like, shape (K, )
constraint : array-like, shape (N, K)
N is the number of samples.
D is the features dimension.
K is the number of classes (including the rejection class as the last
class, when appropriate).
"""
K = self._n_classes()
N, D = X.shape
# assert consecutive.shape == (K, )
# assert constraint.shape == (N, K)
posteriors = self.predict_proba(X)
if self.open_set_:
unknown_posterior = 1. - np.sum(posteriors, axis=1)
posteriors = np.vstack([posteriors.T, unknown_posterior.T]).T
sequence = viterbi_decoding(
np.log(posteriors), self.transition_,
initial=self.initial_,
consecutive=consecutive, constraint=constraint)
if self.open_set_:
sequence[sequence == (K - 1)] = -1
return sequence
class GMMSegmentation(SKLearnMixin):
"""
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
calibration : string, optional
Controls how raw GMM scores are calibrated into log-likelihood ratios.
Must be one of 'naive_bayes' (for Gaussian naive Bayes) or 'isotonic'
for isotonic regression. Defaults to no calibration.
lbg : boolean, optional
Controls whether to use the LBG algorithm for training.
Defaults to False.
equal_priors : boolean, optional
Defaults to False.
"""
def __init__(self, n_jobs=1, n_components=1, covariance_type='diag',
random_state=None, tol=1e-2, min_covar=1e-3,
n_iter=10, n_init=1, params='wmc', init_params='wmc',
calibration=None, lbg=False, equal_priors=False):
self.n_components = n_components
self.covariance_type = covariance_type
self.random_state = random_state
self.tol = tol
self.min_covar = min_covar
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.calibration = calibration
self.n_jobs = n_jobs
self.lbg = lbg
self.equal_priors = equal_priors
def fit(self, features_iter, annotation_iter):
self.classifier_ = SKLearnGMMSegmentation(
n_jobs=self.n_jobs,
n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=self.random_state,
tol=self.tol,
min_covar=self.min_covar,
n_iter=self.n_iter,
n_init=self.n_init,
params=self.params,
init_params=self.init_params,
calibration=self.calibration,
lbg=self.lbg,
equal_priors=self.equal_priors
)
X_iter, y_iter = list(zip(*list(
self.Xy_iter(features_iter, annotation_iter, unknown='unique'))))
self.label_converter_ = LabelConverter()
self.label_converter_.fit(np.hstack(y_iter))
encoded_y_iter = [self.label_converter_.transform(y) for y in y_iter]
self.classifier_.fit(X_iter, encoded_y_iter)
return self
def _constraint(self, constraint, features):
N = features.getNumber()
K = self.classifier_._n_classes()
mapping = self.label_converter_.mapping()
sliding_window = features.sliding_window
# defaults to no constraint
constraint_ = VITERBI_CONSTRAINT_NONE * np.ones((N, K), dtype=int)
if isinstance(constraint, Scores):
for segment, _, label, value in constraint.itervalues():
t, dt = sliding_window.segmentToRange(segment)
constraint_[t:t + dt, mapping[label]] = value
if isinstance(constraint, Annotation):
# forbidden everywhere...
for label in constraint.labels():
constraint_[:, mapping[label]] = VITERBI_CONSTRAINT_FORBIDDEN
# ... but in labeled segments
for segment, _, label in constraint.itertracks(label=True):
t, dt = sliding_window.segmentToRange(segment)
constraint_[t:t + dt, mapping[label]] = \
VITERBI_CONSTRAINT_MANDATORY
return constraint_
def _consecutive(self, min_duration, features):
K = self.classifier_._n_classes()
consecutive = np.ones((K, ), dtype=int)
sliding_window = features.sliding_window
if isinstance(min_duration, float):
consecutive[:] = sliding_window.durationToSamples(min_duration)
if isinstance(min_duration, dict):
mapping = self.label_converter_.mapping()
for label, duration in six.iteritems(min_duration):
consecutive[mapping[label]] = \
sliding_window.durationToSamples(duration)
return consecutive
def predict(self, features, min_duration=None, constraint=None):
"""
Parameters
----------
min_duration : float or dict, optional
Minimum duration for each label, in seconds.
constraint : Annotation or Scores, optional
"""
constraint_ = self._constraint(constraint, features)
consecutive = self._consecutive(min_duration, features)
X = self.X(features, unknown='keep')
sliding_window = features.sliding_window
converted_y = self.classifier_.predict(
X, consecutive=consecutive, constraint=constraint_)
annotation = Annotation()
diff = list(np.where(np.diff(converted_y))[0])
diff = [-1] + diff + [len(converted_y)]
for t, T in pairwise(diff):
segment = sliding_window.rangeToSegment(t, T - t)
annotation[segment] = converted_y[t + 1]
translation = self.label_converter_.inverse_mapping()
return annotation.translate(translation)
@classmethod
def resegment(cls, features, annotation,
equal_priors=True, calibration=None,
min_duration=None, constraint=None,
**segmenter_args):
segmenter = cls(
equal_priors=equal_priors,
calibration=calibration,
**segmenter_args)
segmenter.fit([features], [annotation])
return segmenter.predict(
features, min_duration=min_duration, constraint=constraint)
class GMMUBMSegmentation(SKLearnMixin):
"""
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
precomputed_ubm : GMM, optional
When provided, class GMMs are adapted from this UBM.
adapt_params : string, optional
Controls which parameters are updated in the adaptation
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'm'.
adapt_iter : int, optional
Number of EM iterations to perform during adaptation.
calibration : string, optional
Controls how raw GMM scores are calibrated into log-likelihood ratios.
Must be one of 'naive_bayes' (for Gaussian naive Bayes) or 'isotonic'
for isotonic regression. Defaults to no calibration.
lbg : boolean, optional
Controls whether to use the LBG algorithm for training.
Defaults to False.
"""
def __init__(self, n_jobs=1, n_components=1, covariance_type='diag',
random_state=None, tol=1e-2, min_covar=1e-3,
n_iter=10, n_init=1, params='wmc', init_params='wmc',
precomputed_ubm=None, adapt_iter=10, adapt_params='m',
calibration=None, lbg=False):
self.n_components = n_components
self.covariance_type = covariance_type
self.random_state = random_state
self.tol = tol
self.min_covar = min_covar
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.precomputed_ubm = precomputed_ubm
self.adapt_iter = adapt_iter
self.adapt_params = adapt_params
self.calibration = calibration
self.lbg = lbg
self.n_jobs = n_jobs
def fit(self, features_iter, annotation_iter):
self.classifier_ = SKLearnGMMUBMSegmentation(
n_jobs=self.n_jobs,
n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=self.random_state,
tol=self.tol,
min_covar=self.min_covar,
n_iter=self.n_iter,
n_init=self.n_init,
params=self.params,
init_params=self.init_params,
precomputed_ubm=self.precomputed_ubm,
adapt_iter=self.adapt_iter,
adapt_params=self.adapt_params,
calibration=self.calibration,
lbg=self.lbg
)
X_iter, y_iter = list(zip(*list(
self.Xy_iter(features_iter, annotation_iter, unknown='unique'))))
self.label_converter_ = LabelConverter()
self.label_converter_.fit(np.hstack(y_iter))
encoded_y_iter = [self.label_converter_.transform(y) for y in y_iter]
self.classifier_.fit(X_iter, encoded_y_iter)
return self
def _constraint(self, constraint, features):
N = features.getNumber()
K = self.classifier_._n_classes()
mapping = self.label_converter_.mapping()
sliding_window = features.sliding_window
constraint_ = VITERBI_CONSTRAINT_NONE * np.ones((N, K), dtype=int)
if constraint is not None:
for segment, _, label, value in constraint.itervalues():
t, dt = sliding_window.segmentToRange(segment)
constraint_[t:t + dt, mapping[label]] = value
return constraint_
def _consecutive(self, min_duration, features):
K = self.classifier_._n_classes()
consecutive = np.ones((K, ), dtype=int)
sliding_window = features.sliding_window
if isinstance(min_duration, float):
consecutive[:] = sliding_window.durationToSamples(min_duration)
if isinstance(min_duration, dict):
mapping = self.label_converter_.mapping()
for label, duration in six.iteritems(min_duration):
consecutive[mapping[label]] = \
sliding_window.durationToSamples(duration)
return consecutive
def predict(self, features, min_duration=None, constraint=None):
"""
Parameters
----------
min_duration : float or dict, optional
Minimum duration for each label, in seconds.
"""
constraint_ = self._constraint(constraint, features)
consecutive = self._consecutive(min_duration, features)
X = self.X(features, unknown='keep')
sliding_window = features.sliding_window
converted_y = self.classifier_.predict(
X, consecutive=consecutive, constraint=constraint_)
annotation = Annotation()
diff = list(np.where(np.diff(converted_y))[0])
diff = [-1] + diff + [len(converted_y)]
for t, T in pairwise(diff):
segment = sliding_window.rangeToSegment(t, T - t)
annotation[segment] = converted_y[t + 1]
translation = self.label_converter_.inverse_mapping()
return annotation.translate(translation)
| 32.71386
| 79
| 0.637511
| 2,642
| 21,951
| 5.15405
| 0.125284
| 0.024234
| 0.017478
| 0.019534
| 0.79107
| 0.782625
| 0.782625
| 0.776897
| 0.776897
| 0.758684
| 0
| 0.003802
| 0.281035
| 21,951
| 670
| 80
| 32.762687
| 0.859017
| 0.409093
| 0
| 0.746212
| 0
| 0
| 0.003418
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07197
| false
| 0
| 0.030303
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bf63395bc0d0dcf37fb50e36bab65a356851cf2a
| 239
|
py
|
Python
|
Services/document_parser.py
|
dev-11/eigen-technical-task
|
c0b041fc2bd27d2706ccdab94f6eb618f17098bd
|
[
"MIT"
] | null | null | null |
Services/document_parser.py
|
dev-11/eigen-technical-task
|
c0b041fc2bd27d2706ccdab94f6eb618f17098bd
|
[
"MIT"
] | null | null | null |
Services/document_parser.py
|
dev-11/eigen-technical-task
|
c0b041fc2bd27d2706ccdab94f6eb618f17098bd
|
[
"MIT"
] | null | null | null |
import re
class DocumentParser:
@staticmethod
def split_to_sentences(text):
return re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text)
@staticmethod
def split_to_words(text):
return text.split()
| 19.916667
| 75
| 0.573222
| 31
| 239
| 4.290323
| 0.548387
| 0.225564
| 0.300752
| 0.330827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.209205
| 239
| 11
| 76
| 21.727273
| 0.703704
| 0
| 0
| 0.25
| 0
| 0
| 0.171548
| 0.171548
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.125
| 0.25
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
bf65f5a442f14c48b06bc08ecd60d0af82e28db7
| 186
|
py
|
Python
|
tests/test_trades.py
|
a-re/ledgerx-python
|
413bf44758b52ab8b650b1aa1155fde67858e72e
|
[
"BSD-3-Clause"
] | 4
|
2021-02-10T18:25:05.000Z
|
2022-02-01T14:12:10.000Z
|
tests/test_trades.py
|
a-re/ledgerx-python
|
413bf44758b52ab8b650b1aa1155fde67858e72e
|
[
"BSD-3-Clause"
] | 3
|
2021-03-09T03:16:31.000Z
|
2021-05-10T15:59:11.000Z
|
tests/test_trades.py
|
a-re/ledgerx-python
|
413bf44758b52ab8b650b1aa1155fde67858e72e
|
[
"BSD-3-Clause"
] | 3
|
2021-04-01T07:04:46.000Z
|
2022-01-19T05:03:55.000Z
|
import ledgerx
def test_methods():
class_methods = dir(ledgerx.Trades)
assert "next" in class_methods
assert "list" in class_methods
assert "list_all" in class_methods
| 20.666667
| 39
| 0.731183
| 26
| 186
| 5
| 0.5
| 0.369231
| 0.323077
| 0.307692
| 0.369231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.198925
| 186
| 8
| 40
| 23.25
| 0.872483
| 0
| 0
| 0
| 0
| 0
| 0.086022
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bf919658c32ed25fe8daed5fdcc1918d5dcc7f10
| 44
|
py
|
Python
|
Space-Combat-Sim/SpaceSim/Simulation/__init__.py
|
tannervoas742/Simulations
|
2156c052c70b7ccae3fb37d560a286a4d9b7f31e
|
[
"MIT"
] | null | null | null |
Space-Combat-Sim/SpaceSim/Simulation/__init__.py
|
tannervoas742/Simulations
|
2156c052c70b7ccae3fb37d560a286a4d9b7f31e
|
[
"MIT"
] | null | null | null |
Space-Combat-Sim/SpaceSim/Simulation/__init__.py
|
tannervoas742/Simulations
|
2156c052c70b7ccae3fb37d560a286a4d9b7f31e
|
[
"MIT"
] | null | null | null |
from SpaceSim.Simulation.Group import Group
| 22
| 43
| 0.863636
| 6
| 44
| 6.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bf9d33ad396fdf7ed1f4a74ae1f0b9f6190590c7
| 169
|
py
|
Python
|
app/django_social_auth/models.py
|
elmadah/django_social
|
5420b851f7904de05f24d4b671f58b467cb26d4c
|
[
"MIT"
] | null | null | null |
app/django_social_auth/models.py
|
elmadah/django_social
|
5420b851f7904de05f24d4b671f58b467cb26d4c
|
[
"MIT"
] | 6
|
2020-06-05T20:35:01.000Z
|
2021-09-22T18:26:02.000Z
|
app/django_social_auth/models.py
|
elmadah/django_social
|
5420b851f7904de05f24d4b671f58b467cb26d4c
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
# Create your models here.
class User(AbstractBaseUser):
pass
| 24.142857
| 75
| 0.828402
| 22
| 169
| 6.318182
| 0.727273
| 0.143885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112426
| 169
| 7
| 76
| 24.142857
| 0.926667
| 0.142012
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
bf9fb886a304cfbc2007778b945b965ea8e80a74
| 53
|
py
|
Python
|
level0/questioin90.py
|
kevin00000000/Python-programming-exercises
|
87546906d817263ae7ddbd0276f0bb36e0d63c41
|
[
"MIT"
] | null | null | null |
level0/questioin90.py
|
kevin00000000/Python-programming-exercises
|
87546906d817263ae7ddbd0276f0bb36e0d63c41
|
[
"MIT"
] | null | null | null |
level0/questioin90.py
|
kevin00000000/Python-programming-exercises
|
87546906d817263ae7ddbd0276f0bb36e0d63c41
|
[
"MIT"
] | null | null | null |
print([x for x in [12,24,35,24,88,120,155] if x!=24])
| 53
| 53
| 0.622642
| 15
| 53
| 2.2
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.382979
| 0.113208
| 53
| 1
| 53
| 53
| 0.319149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
bfb1511cf45e34987b4b0b3d712f3f5dce89f363
| 165
|
py
|
Python
|
stentor/viewer/views.py
|
ADWright18/Project-Stentor
|
29d2ac47d310313545509bfabdcb598db3ab12cf
|
[
"BSD-2-Clause"
] | null | null | null |
stentor/viewer/views.py
|
ADWright18/Project-Stentor
|
29d2ac47d310313545509bfabdcb598db3ab12cf
|
[
"BSD-2-Clause"
] | null | null | null |
stentor/viewer/views.py
|
ADWright18/Project-Stentor
|
29d2ac47d310313545509bfabdcb598db3ab12cf
|
[
"BSD-2-Clause"
] | null | null | null |
from django.shortcuts import render
from .models import Channel
# Create your views here.
def stream(request):
return render(request, 'viewer/stream.html', {})
| 23.571429
| 52
| 0.751515
| 22
| 165
| 5.636364
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145455
| 165
| 6
| 53
| 27.5
| 0.879433
| 0.139394
| 0
| 0
| 0
| 0
| 0.128571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
bfcd5667afb7cb509eb85aff78dc7346d0d8a30b
| 7,739
|
py
|
Python
|
understandability/test.py
|
zhangshyue/regex-library
|
69a26b580bcc94f95dda3536cd790fb59c81a31b
|
[
"MIT"
] | null | null | null |
understandability/test.py
|
zhangshyue/regex-library
|
69a26b580bcc94f95dda3536cd790fb59c81a31b
|
[
"MIT"
] | null | null | null |
understandability/test.py
|
zhangshyue/regex-library
|
69a26b580bcc94f95dda3536cd790fb59c81a31b
|
[
"MIT"
] | null | null | null |
import root_pb2
import base64
import json
from understandability import analyze
from google.protobuf.json_format import MessageToJson, MessageToDict
test_S1S3L1 = root_pb2.Expression(
raw='S{3}S{4,4}S{7,7}A{2,}',
tokens=[
root_pb2.Token(
token="S",
type=root_pb2.TokenType.Character,
character="S"
),
root_pb2.Token(
token="{3}",
type=root_pb2.TokenType.QuantifierModifier,
quantifiermodifier=root_pb2.QuantifierModifierType.SpecifiedQuantifier
),
root_pb2.Token(
token="S",
type=root_pb2.TokenType.Character,
character="S"
),
root_pb2.Token(
token="{4,4}",
type=root_pb2.TokenType.QuantifierModifier,
quantifiermodifier=root_pb2.QuantifierModifierType.SpecifiedQuantifier
),
root_pb2.Token(
token="S",
type=root_pb2.TokenType.Character,
character="S"
),
root_pb2.Token(
token="{7,7}",
type=root_pb2.TokenType.QuantifierModifier,
quantifiermodifier=root_pb2.QuantifierModifierType.SpecifiedQuantifier
),
root_pb2.Token(
token="A",
type=root_pb2.TokenType.Character,
character="S"
),
root_pb2.Token(
token="{2,}",
type=root_pb2.TokenType.QuantifierModifier,
quantifiermodifier=root_pb2.QuantifierModifierType.SpecifiedQuantifier
),
],
)
ans_S1S3L1 = {"status": "4 understandability errors found","annotations": [{"note": "SSS","entity": "S{3}"},{"note": "SSSS","entity": "S{4,4}"},{"note": "S{7}","entity": "S{7,7}"},{"note": "AA+","entity": "A{2,}"}]}
test_L2 = root_pb2.Expression(
raw='AAA*',
tokens=[
root_pb2.Token(
token="AAA",
type=root_pb2.TokenType.Character,
character="AAA"
),
root_pb2.Token(
token="*",
type=root_pb2.TokenType.QuantifierModifier,
quantifiermodifier=root_pb2.QuantifierModifierType.Star
),
],)
ans_L2 = {'status': '1 understandability error found', 'annotations': [{'note': 'AA+', 'entity': 'AAA*'}]}
test_T2T4 = root_pb2.Expression(
raw='\\x61\\141',
tokens=[
root_pb2.Token(
token="\\x61",
type=root_pb2.TokenType.Escape,
escape=root_pb2.EscapeType.Hexadecimal
),
root_pb2.Token(
token="\\141",
type=root_pb2.TokenType.Escape,
escape=root_pb2.EscapeType.Octal
),
],)
ans_T2T4 = {'status': '2 understandability errors found', 'annotations': [{'note': 'a', 'entity': '\\x61'}, {'note': 'a', 'entity': '\\141'}]}
test_Set = root_pb2.Expression(
raw='[0-9][^\\s][$][aaa]',
tokens=[
root_pb2.Token(
token="[",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.OpenSet
),
root_pb2.Token(
token="0-9",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.RangeSet
),
root_pb2.Token(
token="]",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.CloseSet
),
root_pb2.Token(
token="[",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.OpenSet
),
root_pb2.Token(
token="^",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.SetNegation
),
root_pb2.Token(
token="\\s",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.Whitespace
),
root_pb2.Token(
token="]",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.CloseSet
),
root_pb2.Token(
token="[",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.OpenSet
),
root_pb2.Token(
token="$",
type=root_pb2.TokenType.Character,
character="$"
),
root_pb2.Token(
token="]",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.CloseSet
),
root_pb2.Token(
token="[",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.OpenSet
),
root_pb2.Token(
token="a",
type=root_pb2.TokenType.Character,
character="a"
),
root_pb2.Token(
token="a",
type=root_pb2.TokenType.Character,
character="a"
),
root_pb2.Token(
token="a",
type=root_pb2.TokenType.Character,
character="a"
),
root_pb2.Token(
token="]",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.CloseSet
),
],)
ans_Set = {'status': '4 understandability errors found', 'annotations': [{'note': '\\d', 'entity': '[0-9]'}, {'note': '\\S', 'entity': '[^\\s]'}, {'note': '\\$', 'entity': '[$]'}, {'note': 'repeated characters in []', 'entity': '[aaa]'}]}
test_SetT2T4 = root_pb2.Expression(
raw='\\x61\\141',
tokens=[
root_pb2.Token(
token="[",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.OpenSet
),
root_pb2.Token(
token="\\x61",
type=root_pb2.TokenType.Escape,
escape=root_pb2.EscapeType.Hexadecimal
),
root_pb2.Token(
token="\\141",
type=root_pb2.TokenType.Escape,
escape=root_pb2.EscapeType.Octal
),
root_pb2.Token(
token="]",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.CloseSet
),
],)
ans_SetT2T4 = {'status': '2 understandability errors found', 'annotations': [{'note': 'a', 'entity': '\\x61'}, {'note': 'a', 'entity': '\\141'}]}
test_TooLong = root_pb2.Expression(
raw='([A-Z])\w+([A-Z])\w+([A-Z])\w+([A-Z])\w+([A-Z])\w+([A-Z])\w+([A-Z])\w+([A-Z])\w+([A-Z])\w+([A-Z])\w+([A-Z])\w+([A-Z])\w+([A-Z])\w+',
tokens=[],)
ans_TooLong = {'status': '1 understandability error found', 'annotations': [{'note': 'rejex is too long. Limit rejex to 128 characters', 'entity': '([A-Z])\\w+([A-Z])\\w+([A-Z])\\w+([A-Z])\\w+([A-Z])\\w+([A-Z])\\w+([A-Z])\\w+([A-Z])\\w+([A-Z])\\w+([A-Z])\\w+([A-Z])\\w+([A-Z])\\w+([A-Z])\\w+'}]}
test_1 = root_pb2.Expression(
raw='S{3}[aaa]',
tokens=[
root_pb2.Token(
token="S",
type=root_pb2.TokenType.Character,
character="S"
),
root_pb2.Token(
token="{3}",
type=root_pb2.TokenType.QuantifierModifier,
quantifiermodifier=root_pb2.QuantifierModifierType.SpecifiedQuantifier
),
root_pb2.Token(
token="[",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.OpenSet
),
root_pb2.Token(
token="a",
type=root_pb2.TokenType.Character,
character="a"
),
root_pb2.Token(
token="a",
type=root_pb2.TokenType.Character,
character="a"
),
root_pb2.Token(
token="a",
type=root_pb2.TokenType.Character,
character="a"
),
root_pb2.Token(
token="]",
type=root_pb2.TokenType.CharacterClass,
characterclass=root_pb2.CharacterClassType.CloseSet
),
],)
def test(t, ans):
expr_raw = base64.b64decode(base64.b64encode(t.SerializeToString()).decode('utf-8'))
expr = root_pb2.Expression()
expr.ParseFromString(expr_raw)
output = root_pb2.Output()
analyze(expr.tokens, output, expr.raw)
if len(output.annotations) <= 1:
output.status = str(len(output.annotations)) + " understandability error found"
else:
output.status = str(len(output.annotations)) + " understandability errors found"
json1 = json.dumps(ans, sort_keys=True)
json2 = json.dumps(MessageToDict(output), sort_keys=True)
return json1 == json2
def main():
print(test(test_S1S3L1, ans_S1S3L1))
print(test(test_L2, ans_L2))
print(test(test_T2T4, ans_T2T4))
print(test(test_Set, ans_Set))
print(test(test_SetT2T4, ans_SetT2T4))
print(test(test_TooLong, ans_TooLong))
if __name__ == "__main__":
main()
| 29.425856
| 295
| 0.650213
| 922
| 7,739
| 5.296095
| 0.111714
| 0.159123
| 0.093385
| 0.132296
| 0.794184
| 0.794184
| 0.767151
| 0.705304
| 0.701823
| 0.650829
| 0
| 0.035301
| 0.172761
| 7,739
| 263
| 296
| 29.425856
| 0.727429
| 0
| 0
| 0.74502
| 0
| 0.007968
| 0.143282
| 0.037985
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007968
| false
| 0
| 0.01992
| 0
| 0.031873
| 0.023904
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
44c3d6199b28f8f339f16d1ebeefd400bc60e00e
| 35
|
py
|
Python
|
src/zhinst/qcodes/control/drivers/base/__init__.py
|
jenshnielsen/zhinst-qcodes
|
fdcf8d1d2af99af81913bc00213f4a815b4d8478
|
[
"MIT"
] | 4
|
2020-09-21T07:09:57.000Z
|
2022-02-23T08:56:35.000Z
|
src/zhinst/qcodes/control/drivers/base/__init__.py
|
jenshnielsen/zhinst-qcodes
|
fdcf8d1d2af99af81913bc00213f4a815b4d8478
|
[
"MIT"
] | 23
|
2020-09-30T12:40:05.000Z
|
2022-03-11T06:34:37.000Z
|
src/zhinst/qcodes/control/drivers/base/__init__.py
|
jenshnielsen/zhinst-qcodes
|
fdcf8d1d2af99af81913bc00213f4a815b4d8478
|
[
"MIT"
] | 9
|
2020-09-02T07:42:31.000Z
|
2022-02-22T07:48:04.000Z
|
from .base import ZIBaseInstrument
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
44d5b3f19873d9a5a96a218c4f328f8037021d41
| 66
|
py
|
Python
|
pyCTT/__init__.py
|
dpjrodrigues/pyCTT
|
a0ae4fb311eed31f4e6d60a2e241cc622ae4c13a
|
[
"MIT"
] | 1
|
2021-01-04T22:26:45.000Z
|
2021-01-04T22:26:45.000Z
|
pyCTT/__init__.py
|
dpjrodrigues/pyCTT
|
a0ae4fb311eed31f4e6d60a2e241cc622ae4c13a
|
[
"MIT"
] | null | null | null |
pyCTT/__init__.py
|
dpjrodrigues/pyCTT
|
a0ae4fb311eed31f4e6d60a2e241cc622ae4c13a
|
[
"MIT"
] | null | null | null |
from .items import *
from .scrapper import *
from .consts import *
| 22
| 23
| 0.742424
| 9
| 66
| 5.444444
| 0.555556
| 0.408163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 66
| 3
| 24
| 22
| 0.890909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
787d58d111c37ad3a519a4c283b0177ae67fdbad
| 34
|
py
|
Python
|
waterwheel/__init__.py
|
Govind9/waterwheel
|
056d748cd52213459f77c20fc42656bd273c8069
|
[
"Apache-2.0"
] | 1
|
2020-03-27T13:39:22.000Z
|
2020-03-27T13:39:22.000Z
|
waterwheel/__init__.py
|
Govind9/waterwheel
|
056d748cd52213459f77c20fc42656bd273c8069
|
[
"Apache-2.0"
] | 35
|
2020-02-29T10:02:18.000Z
|
2020-09-23T17:48:24.000Z
|
waterwheel/__init__.py
|
Govind9/waterwheel
|
056d748cd52213459f77c20fc42656bd273c8069
|
[
"Apache-2.0"
] | 5
|
2020-02-29T02:30:38.000Z
|
2020-09-15T19:18:42.000Z
|
from .waterwheel import WaterWheel
| 34
| 34
| 0.882353
| 4
| 34
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 34
| 1
| 34
| 34
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
78c94061a40b6a1aa31b4abcb90ca57221c12eb0
| 40
|
py
|
Python
|
core/math/__init__.py
|
Marxlp/RLFrame
|
1fcfa4fb26c1f0e407c8ea77c86d9d51af8b579a
|
[
"MIT"
] | null | null | null |
core/math/__init__.py
|
Marxlp/RLFrame
|
1fcfa4fb26c1f0e407c8ea77c86d9d51af8b579a
|
[
"MIT"
] | null | null | null |
core/math/__init__.py
|
Marxlp/RLFrame
|
1fcfa4fb26c1f0e407c8ea77c86d9d51af8b579a
|
[
"MIT"
] | null | null | null |
from .mmd import mmd
from .cg import cg
| 13.333333
| 20
| 0.75
| 8
| 40
| 3.75
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 40
| 2
| 21
| 20
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
78cfe95943c05b42d98e37fb840fca8dfb7ffe91
| 261
|
py
|
Python
|
system/Pumba12.py
|
dambdmitry/Geek-Playgraund
|
4730600ae40b85d7bc0040910414341b2fa2f060
|
[
"Apache-2.0"
] | 1
|
2022-03-31T16:20:52.000Z
|
2022-03-31T16:20:52.000Z
|
system/Vadim3.py
|
dambdmitry/Geek-Playgraund
|
4730600ae40b85d7bc0040910414341b2fa2f060
|
[
"Apache-2.0"
] | null | null | null |
system/Vadim3.py
|
dambdmitry/Geek-Playgraund
|
4730600ae40b85d7bc0040910414341b2fa2f060
|
[
"Apache-2.0"
] | null | null | null |
response = 51
<br>print(response)
<br>
<br>while True:
<br> answer = int(input())
<br> response += 1
<br> print(response)
| 261
| 261
| 0.689655
| 50
| 261
| 3.6
| 0.26
| 0.533333
| 0.2
| 0.166667
| 0.366667
| 0.366667
| 0.366667
| 0
| 0
| 0
| 0
| 0.011765
| 0.022989
| 261
| 1
| 261
| 261
| 0.694118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.285714
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
153eb90f2e7380448354b38a4fd420c996a24ca7
| 357
|
py
|
Python
|
lightbus/__init__.py
|
C0DK/lightbus
|
be5cc2771b1058f7c927cca870ed75d4cbbe61a3
|
[
"Apache-2.0"
] | 178
|
2017-07-22T12:35:00.000Z
|
2022-03-28T07:53:13.000Z
|
lightbus/__init__.py
|
adamcharnock/warren
|
5e7069da06cd37a8131e8c592ee957ccb73603d5
|
[
"Apache-2.0"
] | 26
|
2017-08-03T12:09:29.000Z
|
2021-10-19T16:47:18.000Z
|
lightbus/__init__.py
|
adamcharnock/warren
|
5e7069da06cd37a8131e8c592ee957ccb73603d5
|
[
"Apache-2.0"
] | 19
|
2017-09-15T17:51:24.000Z
|
2022-02-28T13:00:16.000Z
|
from lightbus.utilities.logging import configure_logging
from lightbus.transports import *
from lightbus.client import BusClient
from lightbus.path import *
from lightbus.message import *
from lightbus.api import *
from lightbus.schema import *
from lightbus.creation import *
from lightbus.client.utilities import OnError
from lightbus.exceptions import *
| 32.454545
| 56
| 0.834734
| 46
| 357
| 6.456522
| 0.347826
| 0.40404
| 0.363636
| 0.161616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112045
| 357
| 10
| 57
| 35.7
| 0.936909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1581042f84efbc911f547bceae7719fd9a90e1f8
| 28
|
py
|
Python
|
tests/agent/test_measurement.py
|
dioptra-io/iris
|
1a7dfb8210fdc0f0554b61b81cbfdba7872f9d39
|
[
"MIT"
] | 6
|
2022-01-13T16:09:57.000Z
|
2022-03-26T08:39:47.000Z
|
tests/agent/test_measurement.py
|
dioptra-io/iris
|
1a7dfb8210fdc0f0554b61b81cbfdba7872f9d39
|
[
"MIT"
] | 16
|
2022-02-01T06:09:13.000Z
|
2022-03-01T06:12:30.000Z
|
tests/agent/test_measurement.py
|
dioptra-io/iris
|
1a7dfb8210fdc0f0554b61b81cbfdba7872f9d39
|
[
"MIT"
] | null | null | null |
# TODO: test_do_measurement
| 14
| 27
| 0.821429
| 4
| 28
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.84
| 0.892857
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
159141fb1bd5de12edfe309af3ea5614ba4effaa
| 331
|
py
|
Python
|
lib/output.py
|
cjmlgrto/heroku-buildpack-typekit
|
e1fce9c4b8a39eeb1464e1609df2e19703e77b9f
|
[
"MIT"
] | 5
|
2017-09-21T09:21:08.000Z
|
2022-01-03T16:11:34.000Z
|
lib/output.py
|
cjmlgrto/heroku-buildpack-typekit
|
e1fce9c4b8a39eeb1464e1609df2e19703e77b9f
|
[
"MIT"
] | 3
|
2017-08-16T20:31:21.000Z
|
2018-05-31T08:18:10.000Z
|
lib/output.py
|
cjmlgrto/heroku-buildpack-typekit
|
e1fce9c4b8a39eeb1464e1609df2e19703e77b9f
|
[
"MIT"
] | 6
|
2017-12-18T17:36:57.000Z
|
2018-08-23T18:26:57.000Z
|
#!/usr/bin/env python
##
# Print a heading.
#
# @var string text
# @return string
##
def heading(text):
return '-----> ' + text;
##
# Print a single line.
#
# @var string text
# @return string
##
def line(text):
return ' ' + text;
##
# Print a single new line.
#
# @return string
##
def nl():
return line('');
| 11.821429
| 28
| 0.558912
| 42
| 331
| 4.404762
| 0.380952
| 0.216216
| 0.243243
| 0.205405
| 0.583784
| 0.583784
| 0
| 0
| 0
| 0
| 0
| 0
| 0.247734
| 331
| 27
| 29
| 12.259259
| 0.742972
| 0.489426
| 0
| 0
| 0
| 0
| 0.096552
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
ecaedd3499bd634c1aa413a6bbbfce32a4b87619
| 35
|
py
|
Python
|
CodeWars/8 Kyu/Enumerable Magic #25 - Take the First N Elements.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/8 Kyu/Enumerable Magic #25 - Take the First N Elements.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/8 Kyu/Enumerable Magic #25 - Take the First N Elements.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
def take(arr,n):
return arr[:n]
| 17.5
| 18
| 0.6
| 7
| 35
| 3
| 0.714286
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 35
| 2
| 18
| 17.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
01bf06656835ebbbd3ad9111e57dab6c2eed5cd8
| 49
|
py
|
Python
|
pydomosed/__init__.py
|
evtn/pydomosed
|
bdab19ea9f5e31dd0ce26a97e65accf78e88213c
|
[
"WTFPL"
] | 8
|
2020-10-26T06:46:14.000Z
|
2021-09-21T21:32:11.000Z
|
pydomosed/__init__.py
|
evtn/pydomosed
|
bdab19ea9f5e31dd0ce26a97e65accf78e88213c
|
[
"WTFPL"
] | null | null | null |
pydomosed/__init__.py
|
evtn/pydomosed
|
bdab19ea9f5e31dd0ce26a97e65accf78e88213c
|
[
"WTFPL"
] | null | null | null |
from .base import Session
from .hooks import Hook
| 24.5
| 25
| 0.816327
| 8
| 49
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 49
| 2
| 26
| 24.5
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
01c18a1749dbab06a9fe6b3f6ceeca0631f66094
| 213
|
py
|
Python
|
app/accounts/models.py
|
gladunvv/url-shorteners-api
|
ecb01fc0b825f8972140bc99ac331735432ab966
|
[
"MIT"
] | null | null | null |
app/accounts/models.py
|
gladunvv/url-shorteners-api
|
ecb01fc0b825f8972140bc99ac331735432ab966
|
[
"MIT"
] | 1
|
2020-06-05T20:26:01.000Z
|
2020-06-05T20:26:01.000Z
|
app/accounts/models.py
|
gladunvv/app-quiz-django
|
ecb01fc0b825f8972140bc99ac331735432ab966
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
is_student = models.BooleanField(default=False)
is_teacher = models.BooleanField(default=False)
| 26.625
| 51
| 0.798122
| 27
| 213
| 6.222222
| 0.592593
| 0.119048
| 0.297619
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122066
| 213
| 7
| 52
| 30.428571
| 0.898396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bf190196fa8fb2c01c73a70ccd669832f6cbd8f5
| 14,587
|
py
|
Python
|
tests/pnr/test_hpwl.py
|
pretl/ALIGN-public
|
4b03042d9e96fa669740427842b0bf268b0c9a86
|
[
"BSD-3-Clause"
] | 119
|
2019-05-14T18:44:34.000Z
|
2022-03-17T01:01:02.000Z
|
tests/pnr/test_hpwl.py
|
pretl/ALIGN-public
|
4b03042d9e96fa669740427842b0bf268b0c9a86
|
[
"BSD-3-Clause"
] | 717
|
2019-04-03T15:36:35.000Z
|
2022-03-31T21:56:47.000Z
|
tests/pnr/test_hpwl.py
|
pretl/ALIGN-public
|
4b03042d9e96fa669740427842b0bf268b0c9a86
|
[
"BSD-3-Clause"
] | 34
|
2019-04-01T21:21:27.000Z
|
2022-03-21T09:46:57.000Z
|
import json
import pathlib
from align.pnr.hpwl import gen_netlist, calculate_HPWL_from_placement_verilog_d, Interval, SemiPerimeter
from align.pnr.render_placement import standalone_overlap_checker
def test_interval():
i = Interval()
i.add( 7)
assert 0 == i.dist()
i.add( 3)
assert 4 == i.dist()
def test_semiperimeter():
sp = SemiPerimeter()
sp.addPoint( (3,7))
assert 0 == sp.dist()
sp.addRect( (10,10,12,12))
assert 14 == sp.dist()
def test_gen_netlist():
placement_verilog_d = {
"global_signals": [],
"modules": [
{ "abstract_name": "top",
"concrete_name": "top",
"bbox": [0,0,100,100],
"parameters": [],
"instances": [
{
"abstract_template_name": "a",
"concrete_template_name": "a",
"instance_name": "u0",
"transformation": { "oX": 0, "oY": 0, "sX": 1, "sY": 1},
"fa_map": [{"formal": "x", "actual": "y"}]
},
{
"abstract_template_name": "a",
"concrete_template_name": "a",
"instance_name": "u1",
"transformation": { "oX": 0, "oY": 20, "sX": 1, "sY": 1},
"fa_map": [{"formal": "x", "actual": "y"}]
}
]
}
],
"leaves": [
{ "abstract_name": "a",
"concrete_name": "a",
"bbox": [0,0,10,10],
"terminals": [
{ "name": "x",
"rect": [4,4,6,6]
}
]
}
]
}
nets_d = gen_netlist( placement_verilog_d, 'top')
assert 24 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d)
def test_gen_netlist_flip():
placement_verilog_d = {
"global_signals": [],
"modules": [
{ "abstract_name": "top",
"concrete_name": "top",
"bbox": [0,0,100,100],
"parameters": [],
"instances": [
{
"abstract_template_name": "a",
"concrete_template_name": "a",
"instance_name": "u0",
"transformation": { "oX": 0, "oY": 0, "sX": 1, "sY": 1},
"fa_map": [{"formal": "x", "actual": "y"}]
},
{
"abstract_template_name": "a",
"concrete_template_name": "a",
"instance_name": "u1",
"transformation": { "oX": 15, "oY": 20, "sX": 1, "sY": 1},
"fa_map": [{"formal": "x", "actual": "y"}]
}
]
}
],
"leaves": [
{ "abstract_name": "a",
"concrete_name": "a",
"bbox": [0,0,10,10],
"terminals": [
{ "name": "x",
"rect": [1,2,3,4]
}
]
}
]
}
nets_d = gen_netlist( placement_verilog_d, 'top')
assert 39 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d)
placement_verilog_d['modules'][0]['instances'][0]['transformation'] = { "oX": 10, "oY": 0, "sX": -1, "sY": 1}
assert 33 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d)
placement_verilog_d['modules'][0]['instances'][0]['transformation'] = { "oX": 10, "oY": 10, "sX": -1, "sY": -1}
assert 29 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d)
placement_verilog_d['modules'][0]['instances'][0]['transformation'] = { "oX": 0, "oY": 10, "sX": 1, "sY": -1}
assert 35 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d)
def test_gen_netlist():
placement_verilog_d = {
"global_signals": [],
"modules": [
{ "abstract_name": "top",
"concrete_name": "top",
"bbox": [0,0,100,100],
"parameters": [],
"instances": [
{
"abstract_template_name": "a",
"concrete_template_name": "a",
"instance_name": "u0",
"transformation": { "oX": 0, "oY": 0, "sX": 1, "sY": 1},
"fa_map": [{"formal": "x", "actual": "y"}]
},
{
"abstract_template_name": "a",
"concrete_template_name": "a",
"instance_name": "u1",
"transformation": { "oX": 0, "oY": 20, "sX": 1, "sY": 1},
"fa_map": [{"formal": "x", "actual": "y"}]
}
]
}
],
"leaves": [
{ "abstract_name": "a",
"concrete_name": "a",
"bbox": [0,0,10,10],
"terminals": [
{ "name": "x",
"rect": [4,4,6,6]
}
]
}
],
"global_signals": [
{
"actual": "y"
}
]
}
nets_d = gen_netlist( placement_verilog_d, 'top')
assert 24 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d, skip_globals=False)
assert 0 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d, skip_globals=True)
placement_verilog_d['global_signals'][0]['actual'] = "a"
assert 24 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, 'top', nets_d, skip_globals=True)
def test_gen_netlist_matrix():
txt = """{
"global_signals": [],
"leaves": [
{
"abstract_name": "slice",
"bbox": [
0,
0,
800,
840
],
"concrete_name": "slice_a",
"terminal_centers": [
{
"center": [
400,
168
],
"name": "inp"
},
{
"center": [
400,
672
],
"name": "out"
}
],
"terminals": [
{
"name": "inp",
"rect": [
124,
152,
676,
184
]
},
{
"name": "out",
"rect": [
124,
656,
676,
688
]
}
]
}
],
"modules": [
{
"abstract_name": "matrix",
"bbox": [
0,
0,
2480,
3528
],
"concrete_name": "matrix_0",
"constraints": [
{
"abut": false,
"constraint": "order",
"direction": "top_to_bottom",
"instances": [
"u0",
"u1",
"u2",
"u3"
]
},
{
"constraint": "same_template",
"instances": [
"u0",
"u1",
"u2",
"u3"
]
}
],
"instances": [
{
"abstract_template_name": "row",
"concrete_template_name": "row_0",
"fa_map": [
{
"actual": "inp",
"formal": "inp"
},
{
"actual": "x1",
"formal": "out"
}
],
"instance_name": "u0",
"transformation": {
"oX": 0,
"oY": 2688,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "row",
"concrete_template_name": "row_0",
"fa_map": [
{
"actual": "x1",
"formal": "inp"
},
{
"actual": "x2",
"formal": "out"
}
],
"instance_name": "u1",
"transformation": {
"oX": 0,
"oY": 1764,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "row",
"concrete_template_name": "row_0",
"fa_map": [
{
"actual": "x2",
"formal": "inp"
},
{
"actual": "x3",
"formal": "out"
}
],
"instance_name": "u2",
"transformation": {
"oX": 0,
"oY": 924,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "row",
"concrete_template_name": "row_0",
"fa_map": [
{
"actual": "x3",
"formal": "inp"
},
{
"actual": "out",
"formal": "out"
}
],
"instance_name": "u3",
"transformation": {
"oX": 0,
"oY": 0,
"sX": 1,
"sY": 1
}
}
],
"parameters": [
"inp",
"out"
]
},
{
"abstract_name": "row",
"bbox": [
0,
0,
2480,
840
],
"concrete_name": "row_0",
"constraints": [
{
"abut": false,
"constraint": "order",
"direction": "left_to_right",
"instances": [
"u0",
"u1",
"u2"
]
},
{
"constraint": "same_template",
"instances": [
"u0",
"u1",
"u2"
]
}
],
"instances": [
{
"abstract_template_name": "slice",
"concrete_template_name": "slice_a",
"fa_map": [
{
"actual": "inp",
"formal": "inp"
},
{
"actual": "x1",
"formal": "out"
}
],
"instance_name": "u0",
"transformation": {
"oX": 0,
"oY": 0,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "slice",
"concrete_template_name": "slice_a",
"fa_map": [
{
"actual": "x1",
"formal": "inp"
},
{
"actual": "x2",
"formal": "out"
}
],
"instance_name": "u1",
"transformation": {
"oX": 880,
"oY": 0,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "slice",
"concrete_template_name": "slice_a",
"fa_map": [
{
"actual": "x2",
"formal": "inp"
},
{
"actual": "out",
"formal": "out"
}
],
"instance_name": "u2",
"transformation": {
"oX": 1680,
"oY": 0,
"sX": 1,
"sY": 1
}
}
],
"parameters": [
"inp",
"out"
]
}
]
}
"""
placement_verilog_d = json.loads(txt)
cn = 'matrix_0'
nets_d = gen_netlist( placement_verilog_d, cn)
assert 27584 == calculate_HPWL_from_placement_verilog_d( placement_verilog_d, cn, nets_d)
placement_verilog_d['modules'][1]['instances'][1]['transformation']["oY"] += 840
placement_verilog_d['modules'][1]['instances'][1]['transformation']["sY"] = -1
assert standalone_overlap_checker( placement_verilog_d, cn)
hpwl = calculate_HPWL_from_placement_verilog_d( placement_verilog_d, cn, nets_d)
print(hpwl)
assert 27584 > hpwl
placement_verilog_d['modules'][0]['instances'][1]['transformation']["oX"] += 2480
placement_verilog_d['modules'][0]['instances'][1]['transformation']["sX"] = -1
assert standalone_overlap_checker( placement_verilog_d, cn)
hpwl2 = calculate_HPWL_from_placement_verilog_d( placement_verilog_d, cn, nets_d)
print(hpwl2)
assert hpwl > hpwl2
placement_verilog_d['modules'][0]['instances'][3]['transformation']["oX"] += 2480
placement_verilog_d['modules'][0]['instances'][3]['transformation']["sX"] = -1
assert standalone_overlap_checker( placement_verilog_d, cn)
hpwl3 = calculate_HPWL_from_placement_verilog_d( placement_verilog_d, cn, nets_d)
print(hpwl3)
assert hpwl2 > hpwl3
placement_verilog_d['modules'][0]['instances'][0]['transformation']["oY"] += 840
placement_verilog_d['modules'][0]['instances'][0]['transformation']["sY"] = -1
placement_verilog_d['modules'][0]['instances'][1]['transformation']["oY"] += 840
placement_verilog_d['modules'][0]['instances'][1]['transformation']["sY"] = -1
placement_verilog_d['modules'][0]['instances'][2]['transformation']["oY"] += 840
placement_verilog_d['modules'][0]['instances'][2]['transformation']["sY"] = -1
placement_verilog_d['modules'][0]['instances'][3]['transformation']["oY"] += 840
placement_verilog_d['modules'][0]['instances'][3]['transformation']["sY"] = -1
assert standalone_overlap_checker( placement_verilog_d, cn)
hpwl4 = calculate_HPWL_from_placement_verilog_d( placement_verilog_d, cn, nets_d)
print(hpwl4)
assert hpwl3 > hpwl4
placement_verilog_d['modules'][1]['instances'][1]['transformation']["oX"] -= 80
placement_verilog_d['modules'][1]['instances'][2]['transformation']["oX"] -= 80
assert standalone_overlap_checker( placement_verilog_d, cn)
hpwl5 = calculate_HPWL_from_placement_verilog_d( placement_verilog_d, cn, nets_d)
print(hpwl5)
assert hpwl4 > hpwl5
placement_verilog_d['modules'][0]['instances'][0]['transformation']["oY"] -= 2*84
placement_verilog_d['modules'][0]['instances'][1]['transformation']["oY"] -= 84
placement_verilog_d['modules'][0]['instances'][2]['transformation']["oY"] -= 84
placement_verilog_d['modules'][0]['instances'][1]['transformation']["oX"] -= 80
placement_verilog_d['modules'][0]['instances'][3]['transformation']["oX"] -= 80
assert standalone_overlap_checker( placement_verilog_d, cn)
hpwl6 = calculate_HPWL_from_placement_verilog_d( placement_verilog_d, cn, nets_d)
print(hpwl6)
assert hpwl5 > hpwl6
print( hpwl6 / 27584 - 1)
| 27.574669
| 115
| 0.434496
| 1,303
| 14,587
| 4.586339
| 0.10284
| 0.187416
| 0.19913
| 0.096386
| 0.842035
| 0.832831
| 0.824297
| 0.77326
| 0.748829
| 0.574799
| 0
| 0.047053
| 0.405567
| 14,587
| 528
| 116
| 27.626894
| 0.64214
| 0
| 0
| 0.529412
| 0
| 0
| 0.50449
| 0.042092
| 0
| 0
| 0
| 0
| 0.052521
| 1
| 0.012605
| false
| 0
| 0.008403
| 0
| 0.021008
| 0.014706
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1713c05bc92b91cd1c6a06f43c0194b7764d5409
| 4,422
|
py
|
Python
|
level_three.py
|
KlaudijaMedeksaite/GBUI-voice-project
|
bd2cd979483e3ac43de5009d148e2e0403f50eda
|
[
"MIT"
] | null | null | null |
level_three.py
|
KlaudijaMedeksaite/GBUI-voice-project
|
bd2cd979483e3ac43de5009d148e2e0403f50eda
|
[
"MIT"
] | null | null | null |
level_three.py
|
KlaudijaMedeksaite/GBUI-voice-project
|
bd2cd979483e3ac43de5009d148e2e0403f50eda
|
[
"MIT"
] | null | null | null |
import extras
import mike
from deep_translator import (GoogleTranslator)
import main
# The three parts of the third level
def food(lan):
language = extras.get_language_long(lan)
print("\n\nFood in " + language)
print("---------------------------------------------------------")
numEn = ["bread", "chicken", "beef", "meat", "fruit",
"vegetable", "apple", "banana", "tomato", "carrot", "pizza"]
numL = []
for n in numEn:
numOth = GoogleTranslator(
source='en', target=lan).translate(text=n)
numOth = numOth.lower()
numL.append(numOth)
newP = 0
i = 8
while i < 11:
correct = False
mike.mike(numEn[i] + " in " + language + " is ")
mike.mike(numL[i], lan)
mike.mike("Say " + numEn[i] + " in " + language)
user_input = mike.record_audio(lang=lan)
attempt = 0
while correct == False:
print("You said: " + user_input)
if numL[i] in user_input:
print("Correct!")
newP = newP + 1
correct = True
elif "quit" in user_input:
main.save_progress()
exit()
else:
if attempt < 3:
mike.mike("Let's try again, say ")
user_input = mike.record_audio(numL[i], lan)
else:
print("Let's move on")
correct = True
attempt = attempt + 1
i = i + 1
return newP
def clothes(lan):
language = extras.get_language_long(lan)
print("\n\nClothes in " + language)
print("---------------------------------------------------------")
numEn = ["blouse", "t-shirt", "hoodie", "pants", "jeans",
"socks", "shoes", "belt", "hat", "scarf", "jacket"]
numL = []
for n in numEn:
numOth = GoogleTranslator(
source='en', target=lan).translate(text=n)
numOth = numOth.lower()
numL.append(numOth)
newP = 0
i = 0
while i < 11:
correct = False
mike.mike(numEn[i] + " in " + language + " is ")
mike.mike(numL[i], lan)
mike.mike("Say " + numEn[i] + " in " + language)
user_input = mike.record_audio(lang=lan)
attempt = 0
while correct == False:
print("You said: " + user_input)
if numL[i] in user_input:
print("Correct!")
newP = newP + 1
correct = True
elif "quit" in user_input:
main.save_progress()
exit()
else:
if attempt < 3:
mike.mike("Let's try again, say ")
user_input = mike.record_audio(numL[i], lan)
else:
print("Let's move on")
correct = True
attempt = attempt + 1
i = i + 1
return newP
def buildings(lan):
language = extras.get_language_long(lan)
print("\n\nBuildings in " + language)
print("---------------------------------------------------------")
numEn = ["shop", "church", "gym", "library", "townhall",
"house", "apartment", "school", "university", "factory", "police station"]
numL = []
for n in numEn:
numOth = GoogleTranslator(
source='en', target=lan).translate(text=n)
numOth = numOth.lower()
numL.append(numOth)
newP = 0
i = 0
while i < 11:
correct = False
mike.mike(numEn[i] + " in " + language + " is ")
mike.mike(numL[i], lan)
mike.mike("Say " + numEn[i] + " in " + language)
user_input = mike.record_audio(lang=lan)
attempt = 0
while correct == False:
print("You said: " + user_input)
if numL[i] in user_input:
print("Correct!")
newP = newP + 1
correct = True
elif "quit" in user_input:
main.save_progress()
exit()
else:
if attempt < 3:
mike.mike("Let's try again, say ")
user_input = mike.record_audio(numL[i], lan)
else:
print("Let's move on")
correct = True
attempt = attempt + 1
i = i + 1
return newP
| 30.081633
| 87
| 0.459521
| 467
| 4,422
| 4.284797
| 0.233405
| 0.067466
| 0.023988
| 0.047976
| 0.798601
| 0.798601
| 0.798601
| 0.798601
| 0.798601
| 0.737131
| 0
| 0.009905
| 0.383537
| 4,422
| 146
| 88
| 30.287671
| 0.724138
| 0.007689
| 0
| 0.862903
| 0
| 0
| 0.144779
| 0.038988
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024194
| false
| 0
| 0.032258
| 0
| 0.080645
| 0.120968
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1723c4928af3a354632243f8b13774335cb42cfa
| 206
|
py
|
Python
|
xv_leak_tools/test_components/cleanup/cleanup.py
|
UAEKondaya1/expressvpn_leak_testing
|
9e4cee899ac04f7820ac351fa55efdc0c01370ba
|
[
"MIT"
] | 219
|
2017-12-12T09:42:46.000Z
|
2022-03-13T08:25:13.000Z
|
xv_leak_tools/test_components/cleanup/cleanup.py
|
UAEKondaya1/expressvpn_leak_testing
|
9e4cee899ac04f7820ac351fa55efdc0c01370ba
|
[
"MIT"
] | 11
|
2017-12-14T08:14:51.000Z
|
2021-08-09T18:37:45.000Z
|
xv_leak_tools/test_components/cleanup/cleanup.py
|
UAEKondaya1/expressvpn_leak_testing
|
9e4cee899ac04f7820ac351fa55efdc0c01370ba
|
[
"MIT"
] | 45
|
2017-12-14T07:26:36.000Z
|
2022-03-11T09:36:56.000Z
|
from abc import ABCMeta, abstractmethod
from xv_leak_tools.test_components.component import Component
class Cleanup(Component, metaclass=ABCMeta):
@abstractmethod
def cleanup(self):
pass
| 20.6
| 61
| 0.771845
| 24
| 206
| 6.5
| 0.708333
| 0.269231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169903
| 206
| 9
| 62
| 22.888889
| 0.912281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.166667
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
1758cc112b3b494c9fcb1a4f011424e5a917504e
| 14,446
|
py
|
Python
|
tests/action/test_create_field.py
|
Mohsen-Khodabakhshi/mongoengine-migrate
|
1a7a26a47a474f70743c04700ce2a42f1872f166
|
[
"Apache-2.0"
] | 15
|
2020-08-05T22:25:54.000Z
|
2022-02-08T20:50:35.000Z
|
tests/action/test_create_field.py
|
Mohsen-Khodabakhshi/mongoengine-migrate
|
1a7a26a47a474f70743c04700ce2a42f1872f166
|
[
"Apache-2.0"
] | 36
|
2020-10-22T09:05:01.000Z
|
2022-02-21T14:50:17.000Z
|
tests/action/test_create_field.py
|
Mohsen-Khodabakhshi/mongoengine-migrate
|
1a7a26a47a474f70743c04700ce2a42f1872f166
|
[
"Apache-2.0"
] | 5
|
2020-10-23T04:06:32.000Z
|
2022-02-21T14:35:33.000Z
|
import itertools
from copy import deepcopy
from unittest.mock import patch
import jsonpath_rw
import pytest
from mongoengine_migrate.actions import CreateField
from mongoengine_migrate.exceptions import SchemaError
from mongoengine_migrate.graph import MigrationPolicy
from mongoengine_migrate.schema import Schema
@pytest.fixture
def left_schema():
return Schema({
'Document1': Schema.Document({
'field1': {'param11': 'schemavalue11', 'param12': 'schemavalue12'},
'field2': {'param21': 'schemavalue21', 'param22': 'schemavalue22'},
}, parameters={'collection': 'document1'}),
'~EmbeddedDocument2': Schema.Document({
'field1': {'param3': 'schemavalue3'},
'field2': {'param4': 'schemavalue4'},
})
})
class TestCreateFieldInDocument:
def test_forward__if_default_is_not_set__should_do_nothing(self,
load_fixture,
test_db,
dump_db):
schema = load_fixture('schema1').get_schema()
dump = dump_db()
action = CreateField('Schema1Doc1', 'test_field',
choices=None, db_field='test_field', default=None, max_length=None,
min_length=None, null=False, primary_key=False, regex=None,
required=False, sparse=False, type_key='StringField', unique=False,
unique_with=None)
action.prepare(test_db, schema, MigrationPolicy.strict)
action.run_forward()
assert dump == dump_db()
def test_forward__if_required_and_default_is_set__should_create_field_and_set_a_value(
self, load_fixture, test_db, dump_db
):
schema = load_fixture('schema1').get_schema()
dump = dump_db()
default = 'test!'
expect = deepcopy(dump)
parser = jsonpath_rw.parse('schema1_doc1[*]')
for rec in parser.find(expect):
rec.value['test_field'] = default
action = CreateField('Schema1Doc1', 'test_field',
choices=None, db_field='test_field', default=default, max_length=None,
min_length=None, null=False, primary_key=False, regex=None,
required=True, sparse=False, type_key='StringField', unique=False,
unique_with=None)
action.prepare(test_db, schema, MigrationPolicy.strict)
action.run_forward()
assert expect == dump_db()
def test_forward__if_required_and_default_is_set_and_field_in_db__should_not_touch_field(
self, load_fixture, test_db, dump_db
):
schema = load_fixture('schema1').get_schema()
default = 'test!'
ids = set()
for doc in test_db['schema1_doc1'].find({}, limit=2):
test_db['schema1_doc1'].update_one({'_id': doc['_id']},
{'$set': {'test_field': 'old_value'}})
ids.add(doc['_id'])
action = CreateField('Schema1Doc1', 'test_field',
choices=None, db_field='test_field', default=default, max_length=None,
min_length=None, null=False, primary_key=False, regex=None,
required=True, sparse=False, type_key='StringField', unique=False,
unique_with=None)
action.prepare(test_db, schema, MigrationPolicy.strict)
action.run_forward()
assert all(d['test_field'] == 'old_value'
for d in test_db['schema1_doc1'].find()
if d['_id'] in ids)
def test_backward__should_drop_field(self, load_fixture, test_db, dump_db):
schema = load_fixture('schema1').get_schema()
del schema['Schema1Doc1']['doc1_str']
expect = dump_db()
parser = jsonpath_rw.parse('schema1_doc1[*]')
for rec in parser.find(expect):
if 'doc1_str' in rec.value:
del rec.value['doc1_str']
action = CreateField('Schema1Doc1', 'doc1_str',
choices=None, db_field='doc1_str', default=None, max_length=None,
min_length=None, null=False, primary_key=False, regex=None,
required=True, sparse=False, type_key='StringField', unique=False,
unique_with=None)
action.prepare(test_db, schema, MigrationPolicy.strict)
action.run_backward()
assert expect == dump_db()
def test_prepare__if_such_document_is_not_in_schema__should_raise_error(self,
load_fixture,
test_db):
schema = load_fixture('schema1').get_schema()
del schema['Schema1Doc1']
action = CreateField('Schema1Doc1', 'doc1_str',
choices=None, db_field='doc1_str', default=None, max_length=None,
min_length=None, null=False, primary_key=False, regex=None,
required=True, sparse=False, type_key='StringField', unique=False,
unique_with=None)
with pytest.raises(SchemaError):
action.prepare(test_db, schema, MigrationPolicy.strict)
def test_prepare__if_such_field_in_document_is_in_schema__should_raise_error(self,
load_fixture,
test_db):
schema = load_fixture('schema1').get_schema()
action = CreateField('Schema1Doc1', 'doc1_str',
choices=None, db_field='doc1_str', default=None, max_length=None,
min_length=None, null=False, primary_key=False, regex=None,
required=True, sparse=False, type_key='StringField', unique=False,
unique_with=None)
with pytest.raises(SchemaError):
action.prepare(test_db, schema, MigrationPolicy.strict)
def test_build_object__if_field_creates__should_return_object(self, left_schema):
right_schema = Schema({
'Document1': Schema.Document({
'field1': {'param11': 'schemavalue11', 'param12': 'schemavalue12'},
'field2': {'param21': 'schemavalue21', 'param22': 'schemavalue22'},
'field3': {'param31': 'schemavalue31', 'param32': 'schemavalue32'},
}, parameters={'collection': 'document1'}),
'~EmbeddedDocument2': Schema.Document({
'field1': {'param3': 'schemavalue3'},
'field2': {'param4': 'schemavalue4'},
})
})
res = CreateField.build_object('Document1', 'field3', left_schema, right_schema)
assert isinstance(res, CreateField)
assert res.document_type == 'Document1'
assert res.field_name == 'field3'
assert res.parameters == {'param31': 'schemavalue31', 'param32': 'schemavalue32'}
@pytest.mark.parametrize('document_type', ('Document1', 'Document_new', 'Document_unknown'))
def test_build_object__if_document_not_in_both_schemas__should_return_none(
self, left_schema, document_type
):
right_schema = Schema({
'Document_new': Schema.Document({
'field1': {'param11': 'schemavalue11', 'param12': 'schemavalue12'},
'field2': {'param21': 'schemavalue21', 'param22': 'schemavalue22'},
'field3': {'param31': 'schemavalue31', 'param32': 'schemavalue32'},
}, parameters={'collection': 'document1'}),
'~EmbeddedDocument2': Schema.Document({
'field1': {'param3': 'schemavalue3'},
'field2': {'param4': 'schemavalue4'},
})
})
res = CreateField.build_object(document_type, 'field3', left_schema, right_schema)
assert res is None
@pytest.mark.parametrize('field_name', ('field1', 'field2', 'field_unknown'))
def test_build_object__if_field_does_not_create_in_schema__should_return_none(
self, left_schema, field_name
):
right_schema = Schema({
'Document1': Schema.Document({
'field1': {'param11': 'schemavalue11', 'param12': 'schemavalue12'},
'field3': {'param31': 'schemavalue31', 'param32': 'schemavalue32'},
}, parameters={'collection': 'document1'}),
'~EmbeddedDocument2': Schema.Document({
'field1': {'param3': 'schemavalue3'},
'field2': {'param4': 'schemavalue4'},
})
})
res = CreateField.build_object('Document1', field_name, left_schema, right_schema)
assert res is None
def test_to_schema_patch__should_return_dictdiff_object(self):
left_schema = Schema({
'Document1': Schema.Document({
'field1': {'param11': 'schemavalue11', 'param12': 'schemavalue12'},
'field2': {'param21': 'schemavalue21', 'param22': 'schemavalue22'},
'field3': {'param31': 'schemavalue31', 'param32': 'schemavalue32'},
}, parameters={'collection': 'document1'})
})
action = CreateField('Document1', 'field3',
db_field='field3', type_key='StringField', param1='value1')
test_schema_skel = {'type_key': None, 'db_field': None, 'param1': None, 'param2': None}
field_params = {
'type_key': 'StringField',
'db_field': 'field3',
'param1': 'value1',
'param2': None
}
expect = [(
'add',
'Document1',
[('field3', field_params)]
)]
patcher = patch.object(action, 'get_field_handler_cls')
with patcher as get_field_handler_cls_mock:
get_field_handler_cls_mock().schema_skel.return_value = test_schema_skel
res = action.to_schema_patch(left_schema)
assert res == expect
@pytest.mark.parametrize('parameters', (
{'db_field': 'field3', 'param1': 'value1'}, # Missed 'type_key"
{'type_key': 'StringField', 'param1': 'value1'}, # Missed 'db_field"
# 'unknown_param' not in schema skel
{'type_key': 'StringField', 'param1': 'value1', 'unknown_param': 'value'},
))
def test_to_schema_patch__if_wrong_parameters_passed__should_raise_error(self, parameters):
left_schema = Schema({
'Document1': Schema.Document({
'field1': {'param11': 'schemavalue11', 'param12': 'schemavalue12'},
'field2': {'param21': 'schemavalue21', 'param22': 'schemavalue22'},
'field3': {'param31': 'schemavalue31', 'param32': 'schemavalue32'},
}, parameters={'collection': 'document1'})
})
action = CreateField('Document1', 'field3', **parameters)
test_schema_skel = {'type_key': None, 'db_field': None, 'param1': None, 'param2': None}
patcher = patch.object(action, 'get_field_handler_cls')
with patcher as get_field_handler_cls_mock:
get_field_handler_cls_mock.schema_skel.return_value = test_schema_skel
with pytest.raises(SchemaError):
action.to_schema_patch(left_schema)
class TestCreateFieldEmbedded:
def test_forward__if_default_is_not_set__should_do_nothing(
self, load_fixture, test_db, dump_db
):
schema = load_fixture('schema1').get_schema()
dump = dump_db()
action = CreateField('~Schema1EmbDoc1', 'test_field',
choices=None, db_field='test_field', default=None, max_length=None,
min_length=None, null=False, primary_key=False, regex=None,
required=False, sparse=False, type_key='StringField', unique=False,
unique_with=None)
action.prepare(test_db, schema, MigrationPolicy.strict)
action.run_forward()
assert dump == dump_db()
def test_forward__if_required_and_default_is_set__should_create_field_and_set_a_value(
self, load_fixture, test_db, dump_db
):
schema = load_fixture('schema1').get_schema()
dump = dump_db()
default = 'test!'
expect = deepcopy(dump)
parsers = load_fixture('schema1').get_embedded_jsonpath_parsers('~Schema1EmbDoc1')
for rec in itertools.chain.from_iterable(p.find(expect) for p in parsers):
rec.value['test_field'] = default
action = CreateField('~Schema1EmbDoc1', 'test_field',
choices=None, db_field='test_field', default=default, max_length=None,
min_length=None, null=False, primary_key=False, regex=None,
required=True, sparse=False, type_key='StringField', unique=False,
unique_with=None)
action.prepare(test_db, schema, MigrationPolicy.strict)
action.run_forward()
assert expect == dump_db()
def test_backward__should_drop_field(self, load_fixture, test_db, dump_db):
schema = load_fixture('schema1').get_schema()
del schema['~Schema1EmbDoc1']['embdoc1_str']
dump = dump_db()
expect = deepcopy(dump)
parsers = load_fixture('schema1').get_embedded_jsonpath_parsers('~Schema1EmbDoc1')
for rec in itertools.chain.from_iterable(p.find(expect) for p in parsers):
if 'embdoc1_str' in rec.value:
del rec.value['embdoc1_str']
action = CreateField('~Schema1EmbDoc1', 'embdoc1_str',
choices=None, db_field='embdoc1_str', default=None, max_length=None,
min_length=None, null=False, primary_key=False, regex=None,
required=False, sparse=False, type_key='StringField', unique=False,
unique_with=None)
action.prepare(test_db, schema, MigrationPolicy.strict)
action.run_backward()
assert expect == dump_db()
| 45.285266
| 99
| 0.57933
| 1,423
| 14,446
| 5.574139
| 0.111736
| 0.015885
| 0.029501
| 0.029123
| 0.826399
| 0.794377
| 0.758195
| 0.746092
| 0.737015
| 0.737015
| 0
| 0.028677
| 0.30479
| 14,446
| 318
| 100
| 45.427673
| 0.761127
| 0.004846
| 0
| 0.69112
| 0
| 0
| 0.167896
| 0.002922
| 0
| 0
| 0
| 0
| 0.054054
| 1
| 0.057915
| false
| 0.003861
| 0.034749
| 0.003861
| 0.104247
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
178ed138638309250e1c33442f34849d1aab1a6d
| 85
|
py
|
Python
|
solr/__init__.py
|
rkosenko/solrpy
|
6ab5b30b927c4047b8cccb30f1f200864fc28b74
|
[
"Apache-2.0"
] | 37
|
2015-04-01T19:33:50.000Z
|
2018-06-01T09:17:23.000Z
|
solr/__init__.py
|
rkosenko/solrpy
|
6ab5b30b927c4047b8cccb30f1f200864fc28b74
|
[
"Apache-2.0"
] | 115
|
2020-09-02T20:01:26.000Z
|
2022-03-30T11:47:23.000Z
|
solr/__init__.py
|
rkosenko/solrpy
|
6ab5b30b927c4047b8cccb30f1f200864fc28b74
|
[
"Apache-2.0"
] | 25
|
2015-04-07T04:44:18.000Z
|
2018-09-17T02:55:56.000Z
|
from __future__ import absolute_import
from .core import *
from .paginator import *
| 17
| 38
| 0.8
| 11
| 85
| 5.727273
| 0.545455
| 0.31746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152941
| 85
| 4
| 39
| 21.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
da06dcaa9db0fb78df84767956a4ba4547e0a538
| 202
|
py
|
Python
|
tests/utils.py
|
PSSF23/graspologic
|
d5ae48d0481b6a60fa580158c2e9bae9cc506a9d
|
[
"MIT"
] | 148
|
2020-09-15T21:45:51.000Z
|
2022-03-24T17:33:01.000Z
|
tests/utils.py
|
PSSF23/graspologic
|
d5ae48d0481b6a60fa580158c2e9bae9cc506a9d
|
[
"MIT"
] | 533
|
2020-09-15T18:49:00.000Z
|
2022-03-25T12:16:58.000Z
|
tests/utils.py
|
PSSF23/graspologic
|
d5ae48d0481b6a60fa580158c2e9bae9cc506a9d
|
[
"MIT"
] | 74
|
2020-09-16T02:24:23.000Z
|
2022-03-20T20:09:38.000Z
|
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import os
def data_file(filename):
return os.path.join(os.path.dirname(__file__), "test_data", filename)
| 22.444444
| 73
| 0.757426
| 28
| 202
| 5.25
| 0.785714
| 0.081633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138614
| 202
| 8
| 74
| 25.25
| 0.844828
| 0.420792
| 0
| 0
| 0
| 0
| 0.078947
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
da102ce168e5dd3598cf7f024df950cda3f463c9
| 78
|
py
|
Python
|
test/test_pyls.py
|
andfoy/python-language-server
|
91b5e4efe9db0d9d2cb1704c6132cf39cec780cd
|
[
"MIT"
] | null | null | null |
test/test_pyls.py
|
andfoy/python-language-server
|
91b5e4efe9db0d9d2cb1704c6132cf39cec780cd
|
[
"MIT"
] | 1
|
2017-11-02T22:27:03.000Z
|
2017-11-02T22:27:03.000Z
|
test/test_pyls.py
|
andfoy/python-language-server
|
91b5e4efe9db0d9d2cb1704c6132cf39cec780cd
|
[
"MIT"
] | null | null | null |
# Copyright 2017 Palantir Technologies, Inc.
def test_pyls():
return True
| 19.5
| 44
| 0.74359
| 10
| 78
| 5.7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.179487
| 78
| 3
| 45
| 26
| 0.828125
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
da3cfc2d4a78f56d92c66f6a43070a0a77e72a32
| 35,811
|
py
|
Python
|
tests/response_data/responses.py
|
koordinates/python-client
|
7e60a183b5d1dbb8d45423040e1bf8c42a5c2d1e
|
[
"BSD-3-Clause"
] | 3
|
2015-10-26T06:38:02.000Z
|
2018-08-17T04:41:10.000Z
|
tests/response_data/responses.py
|
koordinates/python-client
|
7e60a183b5d1dbb8d45423040e1bf8c42a5c2d1e
|
[
"BSD-3-Clause"
] | 21
|
2015-07-10T19:24:24.000Z
|
2020-09-22T01:44:20.000Z
|
tests/response_data/responses.py
|
koordinates/python-client
|
7e60a183b5d1dbb8d45423040e1bf8c42a5c2d1e
|
[
"BSD-3-Clause"
] | 2
|
2015-08-24T17:47:20.000Z
|
2019-01-16T20:55:40.000Z
|
layers_single_good_simulated_response = """{"id": 1474, "url": "https://koordinates.com/services/api/v1/layers/1474/", "type": "layer", "name": "Wellington City Building Footprints", "first_published_at": "2010-06-21T05:05:05.953", "published_at": "2012-05-09T02:11:27.020Z", "description": "Polygons representing building rooftop outlines for urban Wellington including Makara Beach and Makara Village. Each building has an associated elevation above MSL (Wellington 1953). The rooftop elevation does not include above roof structures such as aerials or chimneys. Captured in 1996 and updated in 1998, 1999, 2002, 2006, 2009, 2011 and 2012 in conjunction with aerial photography refly projects.", "description_html": "<p>Polygons representing building rooftop outlines for urban Wellington including Makara Beach and Makara Village. Each building has an associated elevation above MSL (Wellington 1953). The rooftop elevation does not include above roof structures such as aerials or chimneys. Captured in 1996 and updated in 1998, 1999, 2002, 2006, 2009, 2011 and 2012 in conjunction with aerial photography refly projects.</p>", "group": {"id": 119, "url": "https://koordinates.com/services/api/v1/groups/119/", "name": "Wellington City Council", "country": "NZ"}, "data": {"encoding": null, "crs": "EPSG:2193", "primary_key_fields": [], "datasources": [{"id": 65935}], "geometry_field": "GEOMETRY", "fields": [{"name": "GEOMETRY", "type": "geometry"}, {"name": "OBJECTID", "type": "integer"}, {"name": "Shape_Leng", "type": "double"}, {"name": "Shape_Area", "type": "double"}, {"name": "elevation", "type": "double"}, {"name": "feat_code", "type": "string"}, {"name": "source", "type": "string"}]}, "url_html": "https://koordinates.com/layer/1474-wellington-city-building-footprints/", "published_version": "https://koordinates.com/services/api/v1/layers/1474/versions/4067/", "latest_version": "https://koordinates.com/services/api/v1/layers/1474/versions/4067/", "this_version": "https://koordinates.com/services/api/v1/layers/1474/versions/4067/", "kind": "vector", "categories": [{"name": "Cadastral & Property", "slug": "cadastral"}], "tags": ["building", "footprint", "outline", "structure"], "collected_at": ["1996-12-31", "2012-05-01"], "created_at": "2010-06-21T05:05:05.953", "license": {"id": 9, "title": "Creative Commons Attribution 3.0 New Zealand", "type": "cc-by", "jurisdiction": "nz", "version": "3.0", "url": "https://koordinates.com/services/api/v1/licenses/9/", "url_html": "https://koordinates.com/license/attribution-3-0-new-zealand/"}, "metadata": {"iso": "https://koordinates.com/services/api/v1/layers/1474/versions/4067/metadata/iso/", "dc": "https://koordinates.com/services/api/v1/layers/1474/versions/4067/metadata/dc/", "native": "https://koordinates.com/services/api/v1/layers/1474/versions/4067/metadata/"}, "elevation_field": "elevation"}"""
layers_multiple_good_simulated_response = """[{"id": 1474, "url": "https://koordinates.com/services/api/v1/layers/1474/", "type": "layer", "name": "Wellington City Building Footprints", "first_published_at": "2010-06-21T05:05:05.953", "published_at": "2012-05-09T02:11:27.020Z"}, {"id": 1479, "url": "https://koordinates.com/services/api/v1/layers/1479/", "type": "layer", "name": "Wellington City 1m Contours (2009)", "first_published_at": "2010-06-23T06:35:44.803", "published_at": "2010-06-23T06:35:44.803Z"}, {"id": 3185, "url": "https://koordinates.com/services/api/v1/layers/3185/", "type": "layer", "name": "Christchurch Post-Earthquake Aerial Photos (24 Feb 2011)", "first_published_at": "2011-03-24T21:24:30.105", "published_at": "2014-12-08T03:10:27.305Z"}, {"id": 1236, "url": "https://koordinates.com/services/api/v1/layers/1236/", "type": "layer", "name": "NZ Cadastral Parcel Polygons", "first_published_at": "2009-10-19T14:46:22.876", "published_at": "2009-10-19T14:46:22.876Z"}, {"id": 183, "url": "https://koordinates.com/services/api/v1/layers/183/", "type": "layer", "name": "Improved NZ Road Centrelines (August 2011)", "first_published_at": "2008-06-11T00:00:00", "published_at": "2008-06-11T00:00:00Z"}, {"id": 1478, "url": "https://koordinates.com/services/api/v1/layers/1478/", "type": "layer", "name": "Wellington City Council Kerbs", "first_published_at": "2010-06-23T06:10:06.098", "published_at": "2012-05-09T01:55:29.311Z"}, {"id": 754, "url": "https://koordinates.com/services/api/v1/layers/754/", "type": "layer", "name": "DOC Public Conservation Areas", "first_published_at": "2009-05-05T23:23:17.637", "published_at": "2014-11-06T22:13:57.570Z"}, {"id": 1475, "url": "https://koordinates.com/services/api/v1/layers/1475/", "type": "layer", "name": "Wellington City 5m Contours (2004)", "first_published_at": "2010-06-21T21:13:05.280", "published_at": "2010-06-21T21:13:05.280Z"}, {"id": 513, "url": "https://koordinates.com/services/api/v1/layers/513/", "type": "layer", "name": "NZ Landcover (100m)", "first_published_at": "2009-02-09T01:17:42.293", "published_at": "2009-02-09T01:17:42.293Z"}, {"id": 743, "url": "https://koordinates.com/services/api/v1/layers/743/", "type": "layer", "name": "NZ School Zones (Sept 2010)", "first_published_at": "2009-04-25T04:57:35.376", "published_at": "2009-04-25T04:57:35.376Z"}, {"id": 281, "url": "https://koordinates.com/services/api/v1/layers/281/", "type": "layer", "name": "NZ Mainland Contours (Topo, 1 50k)", "first_published_at": "2008-09-02T06:15:20.402", "published_at": "2015-01-13T23:19:57.187Z"}, {"id": 1231, "url": "https://koordinates.com/services/api/v1/layers/1231/", "type": "layer", "name": "NZ Raster Image (Topo50)", "first_published_at": "2009-09-25T16:10:03.092", "published_at": "2015-01-09T19:32:26.673Z"}, {"id": 1066, "url": "https://koordinates.com/services/api/v1/layers/1066/", "type": "layer", "name": "NZ Deprivation Index 2006", "first_published_at": "2009-08-10T05:39:33.726", "published_at": "2009-08-10T05:39:33.726Z"}, {"id": 1431, "url": "https://koordinates.com/services/api/v1/layers/1431/", "type": "layer", "name": "Wellington City Suburbs", "first_published_at": "2010-04-27T01:17:08.579", "published_at": "2010-04-27T01:17:08.579Z"}, {"id": 3774, "url": "https://koordinates.com/services/api/v1/layers/3774/", "type": "layer", "name": "Wellington City 1m Contours (2011)", "first_published_at": "2011-07-25T01:31:00.173", "published_at": "2011-07-25T01:31:00.173Z"}, {"id": 1541, "url": "https://koordinates.com/services/api/v1/layers/1541/", "type": "layer", "name": "New Zealand Region Bathymetry", "first_published_at": "2010-11-02T23:59:30.552", "published_at": "2010-11-02T23:59:30.552Z"}, {"id": 1443, "url": "https://koordinates.com/services/api/v1/layers/1443/", "type": "layer", "name": "Wellington City Wind Zones", "first_published_at": "2010-05-18T04:29:58.694", "published_at": "2012-05-15T00:36:48.756Z"}, {"id": 1331, "url": "https://koordinates.com/services/api/v1/layers/1331/", "type": "layer", "name": "NZ State Highway Centrelines", "first_published_at": "2010-02-21T22:36:20.590", "published_at": "2012-06-20T20:26:32.559Z"}, {"id": 1418, "url": "https://koordinates.com/services/api/v1/layers/1418/", "type": "layer", "name": "NZ 80m Digital Elevation Model", "first_published_at": "2010-03-27T02:46:28.010", "published_at": "2010-03-27T02:46:28.010Z"}, {"id": 1245, "url": "https://koordinates.com/services/api/v1/layers/1245/", "type": "layer", "name": "NZ Area Units (2006 Census)", "first_published_at": "2009-11-13T02:16:23.196", "published_at": "2009-11-13T02:16:23.196Z"}, {"id": 2138, "url": "https://koordinates.com/services/api/v1/layers/2138/", "type": "layer", "name": "Wellington City 1m Digital Elevation Model", "first_published_at": "2011-01-12T04:23:09.875", "published_at": "2011-01-12T04:23:09.875Z"}, {"id": 243, "url": "https://koordinates.com/services/api/v1/layers/243/", "type": "layer", "name": "NZ Schools", "first_published_at": "2008-06-29T00:00:00", "published_at": "2008-06-29T00:00:00Z"}, {"id": 305, "url": "https://koordinates.com/services/api/v1/layers/305/", "type": "layer", "name": "NZ Rainfall", "first_published_at": "2008-09-26T11:55:05.581", "published_at": "2008-09-26T11:55:05.581Z"}, {"id": 413, "url": "https://koordinates.com/services/api/v1/layers/413/", "type": "layer", "name": "NZMS 260 Map Series Index", "first_published_at": "2008-12-02T03:06:00.754", "published_at": "2008-12-02T03:06:00.754Z"}, {"id": 306, "url": "https://koordinates.com/services/api/v1/layers/306/", "type": "layer", "name": "NZ Major Rivers", "first_published_at": "2008-09-26T11:55:10.002", "published_at": "2008-09-26T11:55:10.002Z"}, {"id": 1103, "url": "https://koordinates.com/services/api/v1/layers/1103/", "type": "layer", "name": "World Country Boundaries", "first_published_at": "2009-07-01T06:04:57.642", "published_at": "2009-07-01T06:04:57.642Z"}, {"id": 3903, "url": "https://koordinates.com/services/api/v1/layers/3903/", "type": "layer", "name": "NZ Post Postcode Boundaries (June 2011; licensed only for Mix & Mash 2011)", "first_published_at": "2011-08-26T02:51:11.827", "published_at": "2011-08-26T02:51:11.827Z"}, {"id": 753, "url": "https://koordinates.com/services/api/v1/layers/753/", "type": "layer", "name": "DOC Tracks", "first_published_at": "2009-05-04T01:51:35.509", "published_at": "2014-11-06T22:17:19.869Z"}, {"id": 4320, "url": "https://koordinates.com/services/api/v1/layers/4320/", "type": "layer", "name": "NZTA State Highway 2011-2012 Aerial Imagery, 0.15m", "first_published_at": "2012-07-31T05:58:45.937", "published_at": "2012-07-31T05:58:45.937Z"}, {"id": 518, "url": "https://koordinates.com/services/api/v1/layers/518/", "type": "layer", "name": "NZ Cadastral Titles", "first_published_at": "2009-03-24T08:24:05.908", "published_at": "2009-03-24T08:24:05.908Z"}, {"id": 4068, "url": "https://koordinates.com/services/api/v1/layers/4068/", "type": "layer", "name": "Wellington Region Liquefaction Potential", "first_published_at": "2012-02-23T01:29:39.321", "published_at": "2012-02-23T01:29:39.321Z"}, {"id": 3793, "url": "https://koordinates.com/services/api/v1/layers/3793/", "type": "layer", "name": "Porirua 1m Contours (2005)", "first_published_at": "2011-07-28T22:24:15.023", "published_at": "2011-07-28T22:24:15.023Z"}, {"id": 3658, "url": "https://koordinates.com/services/api/v1/layers/3658/", "type": "layer", "name": "NZ Populated Places - Polygons", "first_published_at": "2011-06-16T10:22:13.529", "published_at": "2011-06-16T10:22:13.529Z"}, {"id": 515, "url": "https://koordinates.com/services/api/v1/layers/515/", "type": "layer", "name": "NZ Greyscale Hillshade (100m)", "first_published_at": "2009-02-09T01:15:19.323", "published_at": "2009-02-09T01:15:19.323Z"}, {"id": 748, "url": "https://koordinates.com/services/api/v1/layers/748/", "type": "layer", "name": "US Airports", "first_published_at": "2009-04-27T00:48:16.085", "published_at": "2009-04-27T00:48:16.085Z"}, {"id": 1285, "url": "https://koordinates.com/services/api/v1/layers/1285/", "type": "layer", "name": "World Urban Areas (1:10 million)", "first_published_at": "2009-12-03T21:15:14.794", "published_at": "2009-12-03T21:15:14.794Z"}, {"id": 3732, "url": "https://koordinates.com/services/api/v1/layers/3732/", "type": "layer", "name": "05 Auckland 15m DEM (NZSoSDEM v1.0)", "first_published_at": "2011-08-11T21:26:03.746", "published_at": "2011-08-11T21:26:03.746Z"}, {"id": 874, "url": "https://koordinates.com/services/api/v1/layers/874/", "type": "layer", "name": "Oregon Highway Mileposts (2007)", "first_published_at": "2009-05-30T05:43:41.223", "published_at": "2009-05-30T05:43:41.223Z"}, {"id": 1359, "url": "https://koordinates.com/services/api/v1/layers/1359/", "type": "layer", "name": "NZ Residential Areas", "first_published_at": "2010-03-19T07:13:28.231", "published_at": "2010-03-19T07:13:28.231Z"}, {"id": 1156, "url": "https://koordinates.com/services/api/v1/layers/1156/", "type": "layer", "name": "Northland Aerial Photography Acquisition", "first_published_at": "2009-09-09T04:42:27.569", "published_at": "2009-09-09T04:42:27.569Z"}, {"id": 4071, "url": "https://koordinates.com/services/api/v1/layers/4071/", "type": "layer", "name": "Wellington Region Combined Earthquake Hazard", "first_published_at": "2012-03-01T23:43:56.546", "published_at": "2012-03-01T23:43:56.546Z"}, {"id": 152, "url": "https://koordinates.com/services/api/v1/layers/152/", "type": "layer", "name": "NZ Petrol Stations", "first_published_at": "2008-05-21T00:00:00", "published_at": "2012-10-09T21:41:48.247Z"}, {"id": 414, "url": "https://koordinates.com/services/api/v1/layers/414/", "type": "layer", "name": "Parcel Boundaries (Nov 2008)", "first_published_at": "2008-12-14T23:30:19.257", "published_at": "2008-12-14T23:30:19.257Z"}, {"id": 2147, "url": "https://koordinates.com/services/api/v1/layers/2147/", "type": "layer", "name": "NZ Meshblocks (2006 Census)", "first_published_at": "2011-01-18T02:43:43.587", "published_at": "2011-01-18T02:43:43.587Z"}, {"id": 284, "url": "https://koordinates.com/services/api/v1/layers/284/", "type": "layer", "name": "NZ Placenames March 2008", "first_published_at": "2008-09-05T06:37:49.514", "published_at": "2008-09-05T06:37:49.514Z"}, {"id": 197, "url": "https://koordinates.com/services/api/v1/layers/197/", "type": "layer", "name": "NZ Regional Councils (2008 Yearly Pattern)", "first_published_at": "2008-06-20T00:00:00", "published_at": "2008-06-20T00:00:00Z"}, {"id": 4328, "url": "https://koordinates.com/services/api/v1/layers/4328/", "type": "layer", "name": "NZTA State Highway 2009-2010 Aerial Imagery, 0.15m", "first_published_at": "2012-08-13T06:27:04.304", "published_at": "2012-08-13T06:27:04.304Z"}, {"id": 3751, "url": "https://koordinates.com/services/api/v1/layers/3751/", "type": "layer", "name": "23 Christchurch 15m DEM (NZSoSDEM v1.0)", "first_published_at": "2011-08-11T21:34:48.565", "published_at": "2011-08-11T21:34:48.565Z"}, {"id": 2216, "url": "https://koordinates.com/services/api/v1/layers/2216/", "type": "layer", "name": "Wellington City Park, Reserve or Cemetery", "first_published_at": "2011-01-31T00:01:10.229", "published_at": "2012-05-31T02:53:16.092Z"}, {"id": 1757, "url": "https://koordinates.com/services/api/v1/layers/1757/", "type": "layer", "name": "Wellington City Aerial Imagery (2009)", "first_published_at": "2010-12-15T21:20:27.081", "published_at": "2010-12-15T21:20:27.081Z"}, {"id": 3657, "url": "https://koordinates.com/services/api/v1/layers/3657/", "type": "layer", "name": "NZ Populated Places - Points", "first_published_at": "2011-06-16T10:12:23.259", "published_at": "2011-06-16T10:12:23.259Z"}, {"id": 6676, "url": "https://koordinates.com/services/api/v1/layers/6676/", "type": "layer", "name": "Christchurch City Building Footprints", "first_published_at": "2014-03-26T18:52:28.289", "published_at": "2014-12-29T20:50:27.710Z"}, {"id": 4241, "url": "https://koordinates.com/services/api/v1/layers/4241/", "type": "layer", "name": "NZ Territorial Authorities (2012 Yearly Pattern)", "first_published_at": "2012-05-18T13:13:06.060", "published_at": "2012-05-18T13:13:06.060Z"}, {"id": 4238, "url": "https://koordinates.com/services/api/v1/layers/4238/", "type": "layer", "name": "NZ Meshblocks (2012 Annual Pattern)", "first_published_at": "2012-05-18T02:50:04.930", "published_at": "2012-05-18T02:50:04.930Z"}, {"id": 198, "url": "https://koordinates.com/services/api/v1/layers/198/", "type": "layer", "name": "NZ Territorial Authorities (2008 Yearly Pattern)", "first_published_at": "2008-06-20T00:00:00", "published_at": "2008-06-20T00:00:00Z"}, {"id": 6612, "url": "https://koordinates.com/services/api/v1/layers/6612/", "type": "layer", "name": "Porirua Building Footprints", "first_published_at": "2013-12-16T19:42:35.118", "published_at": "2013-12-16T19:42:35.118Z"}, {"id": 1701, "url": "https://koordinates.com/services/api/v1/layers/1701/", "type": "layer", "name": "Wellington City Tsunami Evacuation Zones", "first_published_at": "2010-11-09T09:05:25.063", "published_at": "2010-11-09T09:05:25.063Z"}, {"id": 3789, "url": "https://koordinates.com/services/api/v1/layers/3789/", "type": "layer", "name": "Porirua 5m Contours (2005)", "first_published_at": "2011-07-28T04:09:53.691", "published_at": "2011-07-28T04:09:53.691Z"}, {"id": 3162, "url": "https://koordinates.com/services/api/v1/layers/3162/", "type": "layer", "name": "Christchurch / Canterbury Address Points (Feb 2011)", "first_published_at": "2011-02-28T21:49:07.721", "published_at": "2011-02-28T21:49:07.721Z"}, {"id": 189, "url": "https://koordinates.com/services/api/v1/layers/189/", "type": "layer", "name": "NZ Supermarkets", "first_published_at": "2008-06-18T00:00:00", "published_at": "2008-06-18T00:00:00Z"}, {"id": 3743, "url": "https://koordinates.com/services/api/v1/layers/3743/", "type": "layer", "name": "16 Wellington 15m DEM (NZSoSDEM v1.0)", "first_published_at": "2011-08-11T21:33:21.761", "published_at": "2011-08-11T21:33:21.761Z"}, {"id": 297, "url": "https://koordinates.com/services/api/v1/layers/297/", "type": "layer", "name": "NZ SeaCoast (poly)", "first_published_at": "2008-09-26T10:26:19.639", "published_at": "2008-09-26T10:26:19.639Z"}, {"id": 775, "url": "https://koordinates.com/services/api/v1/layers/775/", "type": "layer", "name": "Texas City Limits (2006)", "first_published_at": "2009-05-20T05:58:21.418", "published_at": "2009-05-20T05:58:21.418Z"}, {"id": 791, "url": "https://koordinates.com/services/api/v1/layers/791/", "type": "layer", "name": "Maryland Watersheds", "first_published_at": "2009-05-21T03:00:44.941", "published_at": "2009-05-21T03:00:44.941Z"}, {"id": 242, "url": "https://koordinates.com/services/api/v1/layers/242/", "type": "layer", "name": "NZ Speed Cameras", "first_published_at": "2008-06-25T00:00:00", "published_at": "2008-06-25T00:00:00Z"}, {"id": 514, "url": "https://koordinates.com/services/api/v1/layers/514/", "type": "layer", "name": "NZ Hypsometric Raster (100m)", "first_published_at": "2009-02-09T01:16:46.305", "published_at": "2009-02-09T01:16:46.305Z"}, {"id": 765, "url": "https://koordinates.com/services/api/v1/layers/765/", "type": "layer", "name": "NZ River Polygons (Topo 1:50k)", "first_published_at": "2009-05-18T02:52:38.644", "published_at": "2015-01-08T07:28:14.007Z"}, {"id": 390, "url": "https://koordinates.com/services/api/v1/layers/390/", "type": "layer", "name": "NZ Meshblocks (2008 Yearly Pattern)", "first_published_at": "2008-11-25T00:41:48.949", "published_at": "2008-11-25T00:41:48.949Z"}, {"id": 507, "url": "https://koordinates.com/services/api/v1/layers/507/", "type": "layer", "name": "NZ Payphone Locations", "first_published_at": "2009-01-22T00:58:44.184", "published_at": "2009-01-22T00:58:44.184Z"}, {"id": 412, "url": "https://koordinates.com/services/api/v1/layers/412/", "type": "layer", "name": "NZ Topo 1:50K (Raster Tiles, NZTM)", "first_published_at": "2008-12-08T05:15:38.787", "published_at": "2008-12-08T05:15:38.787Z"}, {"id": 4240, "url": "https://koordinates.com/services/api/v1/layers/4240/", "type": "layer", "name": "NZ Regional Councils (2012 Yearly Pattern)", "first_published_at": "2012-05-18T04:52:35.957", "published_at": "2012-05-18T04:52:35.957Z"}, {"id": 308, "url": "https://koordinates.com/services/api/v1/layers/308/", "type": "layer", "name": "NZ Urban (North)", "first_published_at": "2008-09-26T11:59:44.089", "published_at": "2008-09-26T11:59:44.089Z"}, {"id": 3736, "url": "https://koordinates.com/services/api/v1/layers/3736/", "type": "layer", "name": "09 Taumarunui 15m DEM (NZSoSDEM v1.0)", "first_published_at": "2011-08-11T21:31:03.606", "published_at": "2011-08-11T21:31:03.606Z"}, {"id": 1720, "url": "https://koordinates.com/services/api/v1/layers/1720/", "type": "layer", "name": "New Zealand 250m Bathymetry Rainbow (2008)", "first_published_at": "2010-12-01T03:26:17.579", "published_at": "2010-12-01T03:26:17.579Z"}, {"id": 749, "url": "https://koordinates.com/services/api/v1/layers/749/", "type": "layer", "name": "US Military Bases", "first_published_at": "2009-04-27T01:39:12.709", "published_at": "2009-04-27T01:39:12.709Z"}, {"id": 1721, "url": "https://koordinates.com/services/api/v1/layers/1721/", "type": "layer", "name": "New Zealand 250m Bathymetry Grid (2008)", "first_published_at": "2010-12-01T03:10:08.093", "published_at": "2010-12-01T03:10:08.093Z"}, {"id": 415, "url": "https://koordinates.com/services/api/v1/layers/415/", "type": "layer", "name": "NZ Topo50 Sheet Index", "first_published_at": "2008-12-03T22:36:16.422", "published_at": "2008-12-03T22:36:16.422Z"}, {"id": 2137, "url": "https://koordinates.com/services/api/v1/layers/2137/", "type": "layer", "name": "Wellington City 5m Digital Elevation Model (2004)", "first_published_at": "2011-01-12T02:32:30.290", "published_at": "2011-01-12T02:32:30.290Z"}, {"id": 18, "url": "https://koordinates.com/services/api/v1/layers/18/", "type": "layer", "name": "NZ Coastlines (Topo 1:50k)", "first_published_at": "2007-12-28T00:00:00", "published_at": "2015-01-12T02:37:42.889Z"}, {"id": 4322, "url": "https://koordinates.com/services/api/v1/layers/4322/", "type": "layer", "name": "NZTA State Highway 2010-2011 Aerial Imagery, 0.15m", "first_published_at": "2012-08-10T02:08:09.980", "published_at": "2012-08-10T02:08:09.980Z"}, {"id": 1164, "url": "https://koordinates.com/services/api/v1/layers/1164/", "type": "layer", "name": "NZ Traffic Lights", "first_published_at": "2009-08-05T11:00:33.141", "published_at": "2009-08-05T11:00:33.141Z"}, {"id": 2136, "url": "https://koordinates.com/services/api/v1/layers/2136/", "type": "layer", "name": "Potential Flood Hazards", "first_published_at": "2011-01-12T00:52:15.291", "published_at": "2012-08-29T01:54:53.822Z"}, {"id": 200, "url": "https://koordinates.com/services/api/v1/layers/200/", "type": "layer", "name": "NZ General Electoral Districts (2007)", "first_published_at": "2008-06-20T00:00:00", "published_at": "2008-06-20T00:00:00Z"}, {"id": 4025, "url": "https://koordinates.com/services/api/v1/layers/4025/", "type": "layer", "name": "Wellington Region Tsunami Evacuation Zones", "first_published_at": "2012-02-10T02:58:36.196", "published_at": "2012-02-10T02:58:36.196Z"}, {"id": 692, "url": "https://koordinates.com/services/api/v1/layers/692/", "type": "layer", "name": "California Hillshade (30m)", "first_published_at": "2009-04-04T04:19:55.254", "published_at": "2009-04-04T04:19:55.254Z"}, {"id": 3875, "url": "https://koordinates.com/services/api/v1/layers/3875/", "type": "layer", "name": "Wellington City Aerial Imagery (2011)", "first_published_at": "2011-08-12T01:28:14.691", "published_at": "2011-08-12T01:28:14.691Z"}, {"id": 801, "url": "https://koordinates.com/services/api/v1/layers/801/", "type": "layer", "name": "Texas Oyster Reefs (Galveston Bay System) (", "first_published_at": "2009-05-21T06:55:45.575", "published_at": "2009-05-21T06:55:45.575Z"}, {"id": 1304, "url": "https://koordinates.com/services/api/v1/layers/1304/", "type": "layer", "name": "NZ Historic Places", "first_published_at": "2009-12-18T00:01:34.745", "published_at": "2009-12-18T00:01:34.745Z"}, {"id": 1336, "url": "https://koordinates.com/services/api/v1/layers/1336/", "type": "layer", "name": "NZTM Sheet Layout 1:10,000", "first_published_at": "2010-03-03T02:58:21.543", "published_at": "2010-03-03T02:58:21.543Z"}, {"id": 4284, "url": "https://koordinates.com/services/api/v1/layers/4284/", "type": "layer", "name": "NZ Area Units (2012 Yearly Pattern, Clipped)", "first_published_at": "2012-06-14T05:13:58.490", "published_at": "2012-06-14T05:13:58.490Z"}, {"id": 1503, "url": "https://koordinates.com/services/api/v1/layers/1503/", "type": "layer", "name": "Spark (formerly Telecom) Cell Sites", "first_published_at": "2010-07-06T03:16:29.131", "published_at": "2014-12-28T21:12:51.212Z"}, {"id": 300, "url": "https://koordinates.com/services/api/v1/layers/300/", "type": "layer", "name": "NZ FSMS6 (North Island)", "first_published_at": "2008-09-26T11:42:50.822", "published_at": "2008-09-26T11:42:50.822Z"}, {"id": 346, "url": "https://koordinates.com/services/api/v1/layers/346/", "type": "layer", "name": "NZ Urban Areas (2008 Yearly Pattern)", "first_published_at": "2008-11-14T23:19:59.396", "published_at": "2008-11-14T23:19:59.396Z"}, {"id": 40, "url": "https://koordinates.com/services/api/v1/layers/40/", "type": "layer", "name": "NZ Road Centrelines (Topo 1:50k)", "first_published_at": "2007-12-29T00:00:00", "published_at": "2015-01-08T07:26:21.316Z"}, {"id": 1483, "url": "https://koordinates.com/services/api/v1/layers/1483/", "type": "layer", "name": "NZ St John Ambulance Stations", "first_published_at": "2010-06-24T21:58:57.355", "published_at": "2010-06-24T21:58:57.355Z"}, {"id": 1184, "url": "https://koordinates.com/services/api/v1/layers/1184/", "type": "layer", "name": "Northland Flood Susceptible Land", "first_published_at": "2009-09-09T04:43:52.244", "published_at": "2009-09-09T04:43:52.244Z"}, {"id": 1247, "url": "https://koordinates.com/services/api/v1/layers/1247/", "type": "layer", "name": "NZ Territorial Authorities (2006 Census)", "first_published_at": "2009-11-13T02:27:01.021", "published_at": "2009-11-13T02:27:01.021Z"}, {"id": 2213, "url": "https://koordinates.com/services/api/v1/layers/2213/", "type": "layer", "name": "NZ Territorial Local Authority Boundaries 2011", "first_published_at": "2011-01-28T03:29:44.942", "published_at": "2011-01-28T03:29:44.942Z"}, {"id": 1291, "url": "https://koordinates.com/services/api/v1/layers/1291/", "type": "layer", "name": "World Bathymetry (1:10 million)", "first_published_at": "2010-01-27T00:47:33.135", "published_at": "2014-01-23T20:02:43.150Z"}, {"id": 4069, "url": "https://koordinates.com/services/api/v1/layers/4069/", "type": "layer", "name": "Wellington Region Earthquake Induced Slope Failure", "first_published_at": "2012-03-01T23:42:03.835", "published_at": "2012-03-01T23:42:03.835Z"}]
"""
sets_multiple_good_simulated_response = """
[{"id": 933, "title": "Ultra Fast Broadband Initiative Coverage", "description": "", "description_html": "", "categories": [], "tags": [], "group": {"id": 141, "url": "https://koordinates.com/services/api/v1/groups/141/", "name": "New Zealand Broadband Map", "country": "NZ"}, "items": ["https://koordinates.com/services/api/v1/layers/4226/", "https://koordinates.com/services/api/v1/layers/4228/", "https://koordinates.com/services/api/v1/layers/4227/", "https://koordinates.com/services/api/v1/layers/4061/", "https://koordinates.com/services/api/v1/layers/4147/", "https://koordinates.com/services/api/v1/layers/4148/"], "url": "https://koordinates.com/services/api/v1/sets/933/", "url_html": "https://koordinates.com/set/933-ultra-fast-broadband-initiative-coverage/", "metadata": null, "created_at": "2012-03-21T21:49:51.420Z"}, {"id": 928, "title": "Fibre Optic Networks and Fibre Optic Coverage", "description": "", "description_html": "", "categories": [], "tags": [], "group": {"id": 141, "url": "https://koordinates.com/services/api/v1/groups/141/", "name": "New Zealand Broadband Map", "country": "NZ"}, "items": ["https://koordinates.com/services/api/v1/layers/4085/", "https://koordinates.com/services/api/v1/layers/4103/", "https://koordinates.com/services/api/v1/layers/4032/", "https://koordinates.com/services/api/v1/layers/4061/", "https://koordinates.com/services/api/v1/layers/4118/", "https://koordinates.com/services/api/v1/layers/4126/", "https://koordinates.com/services/api/v1/layers/4130/", "https://koordinates.com/services/api/v1/layers/4131/", "https://koordinates.com/services/api/v1/layers/4148/", "https://koordinates.com/services/api/v1/layers/4147/", "https://koordinates.com/services/api/v1/layers/4149/", "https://koordinates.com/services/api/v1/layers/4116/", "https://koordinates.com/services/api/v1/layers/4117/", "https://koordinates.com/services/api/v1/layers/4121/", "https://koordinates.com/services/api/v1/layers/4123/", "https://koordinates.com/services/api/v1/layers/4124/", "https://koordinates.com/services/api/v1/layers/4125/", "https://koordinates.com/services/api/v1/layers/4128/", "https://koordinates.com/services/api/v1/layers/4129/", "https://koordinates.com/services/api/v1/layers/4132/"], "url": "https://koordinates.com/services/api/v1/sets/928/", "url_html": "https://koordinates.com/set/928-fibre-optic-networks-and-fibre-optic-coverage/", "metadata": null, "created_at": "2012-03-21T00:27:25.448Z"}, {"id": 936, "title": "Rural Broadband Initiative Coverage 5 Mbps+", "description": "", "description_html": "", "categories": [], "tags": [], "group": {"id": 141, "url": "https://koordinates.com/services/api/v1/groups/141/", "name": "New Zealand Broadband Map", "country": "NZ"}, "items": ["https://koordinates.com/services/api/v1/layers/4188/", "https://koordinates.com/services/api/v1/layers/4086/", "https://koordinates.com/services/api/v1/layers/4084/", "https://koordinates.com/services/api/v1/layers/4187/", "https://koordinates.com/services/api/v1/layers/4186/", "https://koordinates.com/services/api/v1/layers/4083/", "https://koordinates.com/services/api/v1/layers/4196/"], "url": "https://koordinates.com/services/api/v1/sets/936/", "url_html": "https://koordinates.com/set/936-rural-broadband-initiative-coverage-5-mbps/", "metadata": null, "created_at": "2012-03-22T01:26:54.563Z"}, {"id": 927, "title": "Wireless Broadband Providers", "description": "", "description_html": "", "categories": [], "tags": [], "group": {"id": 141, "url": "https://koordinates.com/services/api/v1/groups/141/", "name": "New Zealand Broadband Map", "country": "NZ"}, "items": ["https://koordinates.com/services/api/v1/layers/4043/", "https://koordinates.com/services/api/v1/layers/4042/", "https://koordinates.com/services/api/v1/layers/4067/", "https://koordinates.com/services/api/v1/layers/4066/", "https://koordinates.com/services/api/v1/layers/4084/", "https://koordinates.com/services/api/v1/layers/4047/", "https://koordinates.com/services/api/v1/layers/4049/", "https://koordinates.com/services/api/v1/layers/4086/", "https://koordinates.com/services/api/v1/layers/4083/", "https://koordinates.com/services/api/v1/layers/4041/", "https://koordinates.com/services/api/v1/layers/4040/", "https://koordinates.com/services/api/v1/layers/4022/"], "url": "https://koordinates.com/services/api/v1/sets/927/", "url_html": "https://koordinates.com/set/927-wireless-broadband-providers/", "metadata": null, "created_at": "2012-03-21T00:22:35.415Z"}, {"id": 1563, "title": "Quattroshapes - All Data", "description": "", "description_html": "", "categories": [], "tags": [], "group": {"id": 137, "url": "https://koordinates.com/services/api/v1/groups/137/", "name": "Quattroshapes", "country": "US"}, "items": ["https://koordinates.com/services/api/v1/layers/6237/", "https://koordinates.com/services/api/v1/layers/6236/", "https://koordinates.com/services/api/v1/layers/6233/", "https://koordinates.com/services/api/v1/layers/6232/", "https://koordinates.com/services/api/v1/layers/6231/", "https://koordinates.com/services/api/v1/layers/6227/", "https://koordinates.com/services/api/v1/layers/6230/", "https://koordinates.com/services/api/v1/layers/6226/", "https://koordinates.com/services/api/v1/layers/6225/", "https://koordinates.com/services/api/v1/layers/6235/"], "url": "https://koordinates.com/services/api/v1/sets/1563/", "url_html": "https://koordinates.com/set/1563-quattroshapes-all-data/", "metadata": null, "created_at": "2013-06-14T01:38:56.438Z"}, {"id": 2, "title": "Christchurch Earthquake Layers (LATEST)", "description": "Christchurch City Council Bridge and Road Closures, and Rapid Building Assessments.\n", "description_html": "<p>Christchurch City Council Bridge and Road Closures, and Rapid Building Assessments.<br />\n</p>", "categories": [], "tags": [], "group": {"id": 148, "url": "https://koordinates.com/services/api/v1/groups/148/", "name": "Christchurch Earthquake 2011", "country": "NZ"}, "items": [], "url": "https://koordinates.com/services/api/v1/sets/2/", "url_html": "https://koordinates.com/set/2-christchurch-earthquake-layers-latest/", "metadata": null, "created_at": "2011-02-25T09:28:49.697Z"}, {"id": 1063, "title": "Finland Waterways", "description": "A collection of waterway datasets from the 1:1,000,000 scale topographic database.", "description_html": "<p>A collection of waterway datasets from the 1:1,000,000 scale topographic database.</p>", "categories": [], "tags": [], "group": {"id": 140, "url": "https://koordinates.com/services/api/v1/groups/140/", "name": "National Land Survey of Finland", "country": "FI"}, "items": ["https://koordinates.com/services/api/v1/layers/4262/", "https://koordinates.com/services/api/v1/layers/4277/", "https://koordinates.com/services/api/v1/layers/4259/"], "url": "https://koordinates.com/services/api/v1/sets/1063/", "url_html": "https://koordinates.com/set/1063-finland-waterways/", "metadata": null, "created_at": "2012-05-28T21:51:49.803Z"}, {"id": 230, "title": "TZ Timezones Layers", "description": "Timezone Layers collected from http://efele.net/maps/tz/world/", "description_html": "<p>Timezone Layers collected from <a href=\"http://efele.net/maps/tz/world/\">efele.net/maps/tz/world/</a></p>", "categories": [], "tags": [], "group": {"id": 163, "url": "https://koordinates.com/services/api/v1/groups/163/", "name": "TZ Timezones Maps", "country": "US"}, "items": ["https://koordinates.com/services/api/v1/layers/751/", "https://koordinates.com/services/api/v1/layers/3727/", "https://koordinates.com/services/api/v1/layers/3726/"], "url": "https://koordinates.com/services/api/v1/sets/230/", "url_html": "https://koordinates.com/set/230-tz-timezones-layers/", "metadata": null, "created_at": "2011-07-19T01:30:01.381Z"}, {"id": 1551, "title": "A sample of Triple J modelled radio coverage", "description": "", "description_html": "", "categories": [], "tags": [], "user": {"id": 16042, "url": "https://koordinates.com/services/api/v1/users/16042/", "first_name": "Gov ", "last_name": "Hack", "country": "AU"}, "items": ["https://koordinates.com/services/api/v1/layers/6162/", "https://koordinates.com/services/api/v1/layers/6214/", "https://koordinates.com/services/api/v1/layers/6211/", "https://koordinates.com/services/api/v1/layers/6209/", "https://koordinates.com/services/api/v1/layers/6208/", "https://koordinates.com/services/api/v1/layers/6207/", "https://koordinates.com/services/api/v1/layers/6205/", "https://koordinates.com/services/api/v1/layers/6203/", "https://koordinates.com/services/api/v1/layers/6202/", "https://koordinates.com/services/api/v1/layers/6200/", "https://koordinates.com/services/api/v1/layers/6194/", "https://koordinates.com/services/api/v1/layers/6188/", "https://koordinates.com/services/api/v1/layers/6189/", "https://koordinates.com/services/api/v1/layers/6192/", "https://koordinates.com/services/api/v1/layers/6193/", "https://koordinates.com/services/api/v1/layers/6186/", "https://koordinates.com/services/api/v1/layers/6185/", "https://koordinates.com/services/api/v1/layers/6172/", "https://koordinates.com/services/api/v1/layers/6181/", "https://koordinates.com/services/api/v1/layers/6177/", "https://koordinates.com/services/api/v1/layers/6166/", "https://koordinates.com/services/api/v1/layers/6165/", "https://koordinates.com/services/api/v1/layers/6164/", "https://koordinates.com/services/api/v1/layers/6163/", "https://koordinates.com/services/api/v1/layers/6175/", "https://koordinates.com/services/api/v1/layers/6210/", "https://koordinates.com/services/api/v1/layers/6215/", "https://koordinates.com/services/api/v1/layers/6199/", "https://koordinates.com/services/api/v1/layers/6180/"], "url": "https://koordinates.com/services/api/v1/sets/1551/", "url_html": "https://koordinates.com/set/1551-a-sample-of-triple-j-modelled-radio-coverage/", "metadata": null, "created_at": "2013-06-03T03:51:48.082Z"}]"""
| 4,476.375
| 22,955
| 0.693781
| 5,327
| 35,811
| 4.596583
| 0.167824
| 0.148983
| 0.176917
| 0.23928
| 0.748999
| 0.697786
| 0.632729
| 0.358858
| 0.148616
| 0.14543
| 0
| 0.162232
| 0.063975
| 35,811
| 7
| 22,956
| 5,115.857143
| 0.568258
| 0
| 0
| 0
| 0
| 0.6
| 0.995951
| 0.21795
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e534a23fae22cc4018e6539490da96e041499d36
| 227
|
py
|
Python
|
plugins/executor_plugin/minerva/const.py
|
anismiles/querybook
|
66142ee1fe1198fb0a3de97dab7677daaefd4118
|
[
"Apache-2.0"
] | null | null | null |
plugins/executor_plugin/minerva/const.py
|
anismiles/querybook
|
66142ee1fe1198fb0a3de97dab7677daaefd4118
|
[
"Apache-2.0"
] | null | null | null |
plugins/executor_plugin/minerva/const.py
|
anismiles/querybook
|
66142ee1fe1198fb0a3de97dab7677daaefd4118
|
[
"Apache-2.0"
] | null | null | null |
# https://easily-champion-frog.dataos.io:7432/depot/collection
connection_regex = r"^(http|https):\/\/([\w.-]+(?:\:\d+)?(?:,[\w.-]+(?:\:\d+)?)*)(\/\w+)?(\/\w+)?(\?[\w.-]+=[\w.-]+(?:&[\w.-]+=[\w.-]+)*)?$"
apikey_regex = "\\w+"
| 45.4
| 140
| 0.436123
| 27
| 227
| 3.592593
| 0.592593
| 0.103093
| 0.123711
| 0.123711
| 0.061856
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018349
| 0.039648
| 227
| 4
| 141
| 56.75
| 0.426606
| 0.264317
| 0
| 0
| 0
| 0.5
| 0.739394
| 0.715152
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e581948b51ad831cd2a0986f6336b51585a0ee81
| 8,235
|
py
|
Python
|
advanced/part10-12_course_records/test/test_course_records.py
|
Hannah-Abi/python-pro-21
|
2ce32c4bf118054329d19afdf83c50561be1ada8
|
[
"MIT"
] | null | null | null |
advanced/part10-12_course_records/test/test_course_records.py
|
Hannah-Abi/python-pro-21
|
2ce32c4bf118054329d19afdf83c50561be1ada8
|
[
"MIT"
] | null | null | null |
advanced/part10-12_course_records/test/test_course_records.py
|
Hannah-Abi/python-pro-21
|
2ce32c4bf118054329d19afdf83c50561be1ada8
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import patch
from tmc import points, reflect
from tmc.utils import load, load_module, reload_module, get_stdout, check_source
from functools import reduce
import os
import os.path
import textwrap
from random import choice, randint
from datetime import date, datetime, timedelta
exercise = 'src.course_records'
def f(attr: list):
return ",".join(attr)
def s(l: list):
return "\n".join(l)
class CourseRecordsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', side_effect=["0"]):
cls.module = load_module(exercise, 'fi')
@points('10.course_records_part1')
def test_0_stops(self):
input_values = ["0"]
with patch('builtins.input', side_effect=input_values):
try:
reload_module(self.module)
except:
self.fail(f"Check that the program works with input\n{s(input_values)}")
output = get_stdout()
@points('10.course_records_part1')
def test_1_add_works_1(self):
input_values = ["1", "Programming", "3", "5", "0"]
with patch('builtins.input', side_effect=input_values):
try:
reload_module(self.module)
except:
self.fail(f"Check that the program works with inputn{s(input_values)}")
output = get_stdout()
self.assertFalse(len(output)==0,'Your program does not output anything.\n Check that it is not insde if __name__ == "__main__" block!')
@points('10.course_records_part1')
def test_2_add_found(self):
input_values = ["1", "Programming", "3", "5", "2", "Programming", "0"]
with patch('builtins.input', side_effect=input_values):
try:
reload_module(self.module)
except:
self.fail(f"Check that the program works with inputn{s(input_values)}")
output = get_stdout()
expected = "Programming (5 cr) grade 3"
self.assertTrue(expected in output, f"Program output should be\n{expected}\nwith input\n{s(input_values)}\nNow the output was\n{output}")
@points('10.course_records_part1')
def test_3_increase_works(self):
input_values = ["1", "Programming", "3", "5","1","Programming", "5", "5", "2", "Programming", "0"]
with patch('builtins.input', side_effect=input_values):
try:
reload_module(self.module)
except:
self.fail(f"Check that the program works with inputn{s(input_values)}")
output = get_stdout()
expected = "Programming (5 cr) grade 5"
self.assertTrue(expected in output, f"Program output should be\n{expected}\nwith input\n{s(input_values)}\nNow the output was\n{output}")
expected = "Programming (5 cr) grade 3"
self.assertFalse(expected in output, f"The output should NOT contain\n{expected}\nwith input\n{s(input_values)}\nNow the output was\n{output}")
@points('10.course_records_part1')
def test_4_grade_does_not_decrease(self):
input_values = ["1", "Programming", "3", "5", "1","Programming", "1", "5", "2", "Programming", "0"]
with patch('builtins.input', side_effect=input_values):
try:
reload_module(self.module)
except:
self.fail(f"Check that the program works with inputn{s(input_values)}")
output = get_stdout()
expected = "Programming (5 cr) grade 3"
self.assertTrue(expected in output, f"Program output should be\n{expected}\nwith input\n{s(input_values)}\nNow the output was\n{output}")
expected = "Programming (5 cr) grade 1"
self.assertFalse(expected in output, f"The output should NOT contain\n{expected}\nwith input\n{s(input_values)}\nNow the output was\n{output}")
@points('10.course_records_part1')
def test_5_unkonow_completion(self):
input_values = ["1", "Programming", "3", "5", "2", "Java-ohjelmointi","0"]
with patch('builtins.input', side_effect=input_values):
try:
reload_module(self.module)
except:
self.fail(f"Check that the program works with inputn{s(input_values)}")
output = get_stdout()
expected = "no entry for this course"
self.assertTrue(expected in output, f"Program output should be\n{expected}\nwith input\n{s(input_values)}\nNow the output was\n{output}")
expected = "Programming (5 cr) grade"
self.assertFalse(expected in output, f"The output should NOT contain\n{expected}\nwith input\n{s(input_values)}\nNow the output was\n{output}")
@points('10.course_records_part2')
def test_6_stats_1(self):
input_values = ["1", "Programming", "3", "5", "3","0"]
with patch('builtins.input', side_effect=input_values):
try:
reload_module(self.module)
except:
self.fail(f"Check that the program works with inputn{s(input_values)}")
output = get_stdout()
exp = """
1 completed courses, a total of 5 credits
mean 3
grade distribution
5:
4:
3: x
2:
1:
"""
for line in exp.split("\n"):
if not line in output:
self.fail(f"Program should output line\n{line}\nwith input\n{s(input_values)}\nOutput was\n{output}")
@points('10.course_records_part2')
def test_7_stats_2(self):
input_values = ["1", "Programming", "3", "5", "1", "Ohja", "5", "5", "3", "0"]
with patch('builtins.input', side_effect=input_values):
try:
reload_module(self.module)
except:
self.fail(f"Check that the program works with inputn{s(input_values)}")
output = get_stdout()
exp = """
2 completed courses, a total of 10 credits
mean 4
grade distribution
5: x
4:
3: x
2:
1:
"""
for line in exp.split("\n"):
if not line in output:
self.fail(f"Program should output line\n{line}\nwith input\n{s(input_values)}\nOutput was\n{output}")
@points('10.course_records_part2')
def test_7_stats_3(self):
input_values = ["1", "Programming", "3", "5", "1", "Programming", "5", "5", "3", "1", "Algorithms", "5", "10", "3", "1", "Statistics", "1", "5", "3", "0"]
with patch('builtins.input', side_effect=input_values):
try:
reload_module(self.module)
except:
self.fail(f"Check that the program works with inputn{s(input_values)}")
output = get_stdout()
exp = """
3 completed courses, a total of 20 credits
mean 3.7
grade distribution
5: xx
4:
3:
2:
1: x
"""
for line in exp.split("\n"):
if not line in output:
self.fail(f"Program should output line\n{line}\nwith input\n{s(input_values)}\nOutput was\n{output}")
@points('10.course_records_part2')
def test_8_stats_4(self):
input_values = ["1", "Programming", "3", "5",
"1", "Programming", "5", "5",
"1", "Algorithms", "5", "10",
"1", "Statistics", "1", "5",
"1", "Databases", "4", "5",
"1", "Operating Systems", "2", "5",
"1", "Distributed Systems", "4", "5",
"1", "Unix", "2", "1",
"3", "0"]
with patch('builtins.input', side_effect=input_values):
try:
reload_module(self.module)
except:
self.fail(f"Check that the program works with inputn{s(input_values)}")
output = get_stdout()
exp = """
7 completed courses, a total of 36 credits
mean 3.3
grade distribution
5: xx
4: xx
3:
2: xx
1: x
"""
for line in exp.split("\n"):
if not line in output:
self.fail(f"Program should output line\n{line}\nwith input\n{s(input_values)}\nOutput was\n{output}")
if __name__ == '__main__':
unittest.main()
| 37.775229
| 162
| 0.581785
| 1,077
| 8,235
| 4.312906
| 0.129062
| 0.097094
| 0.054252
| 0.031001
| 0.80409
| 0.773089
| 0.760388
| 0.738859
| 0.707212
| 0.698385
| 0
| 0.031017
| 0.283546
| 8,235
| 217
| 163
| 37.949309
| 0.756271
| 0
| 0
| 0.636364
| 0
| 0.064171
| 0.370735
| 0.105282
| 0
| 0
| 0
| 0
| 0.042781
| 1
| 0.069519
| false
| 0
| 0.053476
| 0.010695
| 0.139037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e5f06b599502890a8c339c5afd35bb7a7a48ba43
| 37
|
py
|
Python
|
utils/__init__.py
|
nullarmo/Infiniti
|
67304b78fdcf3e549bb59fb63fa55b2ae44d2ab4
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
nullarmo/Infiniti
|
67304b78fdcf3e549bb59fb63fa55b2ae44d2ab4
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
nullarmo/Infiniti
|
67304b78fdcf3e549bb59fb63fa55b2ae44d2ab4
|
[
"MIT"
] | null | null | null |
from hd_key import HDKey, HD_HARDEN
| 12.333333
| 35
| 0.810811
| 7
| 37
| 4
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 37
| 2
| 36
| 18.5
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e5fa322285b7a2870c65507e9cc7a567fea39998
| 35
|
py
|
Python
|
mmm/__init__.py
|
rick446/mmm
|
34d46f6cbf91a13d3168f160b57b268f0ec56fd9
|
[
"Apache-2.0"
] | 35
|
2015-01-04T20:10:05.000Z
|
2021-11-09T10:07:02.000Z
|
mmm/__init__.py
|
rick446/mmm
|
34d46f6cbf91a13d3168f160b57b268f0ec56fd9
|
[
"Apache-2.0"
] | null | null | null |
mmm/__init__.py
|
rick446/mmm
|
34d46f6cbf91a13d3168f160b57b268f0ec56fd9
|
[
"Apache-2.0"
] | 14
|
2015-03-13T15:39:52.000Z
|
2019-07-28T18:53:12.000Z
|
from slave import ReplicationSlave
| 17.5
| 34
| 0.885714
| 4
| 35
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f90cd43482ef00e9b5ab481bc86a562fd78c8476
| 123
|
py
|
Python
|
tests/testunits/testlexicographers/testnaxxxlexicographer.py
|
rsnakamura/oldape
|
b4d1c77e1d611fe2b30768b42bdc7493afb0ea95
|
[
"Apache-2.0"
] | null | null | null |
tests/testunits/testlexicographers/testnaxxxlexicographer.py
|
rsnakamura/oldape
|
b4d1c77e1d611fe2b30768b42bdc7493afb0ea95
|
[
"Apache-2.0"
] | null | null | null |
tests/testunits/testlexicographers/testnaxxxlexicographer.py
|
rsnakamura/oldape
|
b4d1c77e1d611fe2b30768b42bdc7493afb0ea95
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from apetools.lexicographers.sublexicographers.naxxxlexicographer import NaxxxLexicographer
| 30.75
| 91
| 0.902439
| 11
| 123
| 10.090909
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 123
| 3
| 92
| 41
| 0.973684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f921cfc05c392f7b086ee04b216048c14a736a53
| 44
|
py
|
Python
|
lang/Python/find-limit-of-recursion-1.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/find-limit-of-recursion-1.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/find-limit-of-recursion-1.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
import sys
print((sys.getrecursionlimit()))
| 14.666667
| 32
| 0.772727
| 5
| 44
| 6.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 44
| 2
| 33
| 22
| 0.829268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
0093517aaf5902ac39126eea0b7bb24a9e615d6d
| 6,670
|
py
|
Python
|
tests-proxy/server/example_com_http_client_tunnel_or_bump_test.py
|
plater-inc/proxy
|
f277fd8b3b5bf19b29c8f07055b65ed34c9a8dda
|
[
"MIT"
] | null | null | null |
tests-proxy/server/example_com_http_client_tunnel_or_bump_test.py
|
plater-inc/proxy
|
f277fd8b3b5bf19b29c8f07055b65ed34c9a8dda
|
[
"MIT"
] | null | null | null |
tests-proxy/server/example_com_http_client_tunnel_or_bump_test.py
|
plater-inc/proxy
|
f277fd8b3b5bf19b29c8f07055b65ed34c9a8dda
|
[
"MIT"
] | null | null | null |
import test_util.proxy
import test_util.runner
import ssl
import http.client
if __name__ == "__main__":
for bump in [False, True]:
queue, proxy_process = test_util.runner.run(
"./tests-proxy/server/tunnel_or_bump_callbacks_proxy",
["bump" if bump else "tunnel"],
)
proxy_port = int(queue.get().strip())
http_connection = http.client.HTTPConnection("127.0.0.1", proxy_port)
http_connection.connect()
test_util.runner.get_line_from_queue_and_assert(queue, "connection\n")
for url in ["http://example.com", "http://example.com/"]:
request = http_connection.request("GET", url)
test_util.runner.get_line_from_queue_and_assert(
queue, "request_pre_body /\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "request_body_some_last /\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "response_pre_body / 200\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "response_body_some_last /\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "response_finished\n"
)
response = http_connection.getresponse()
body_piece = b"<h1>Example Domain</h1>"
assert body_piece in response.read(), "%s has no '%s' in body!" % (
url,
body_piece,
)
http_connection.close()
test_util.runner.get_line_from_queue_and_assert(queue, "connection_finished\n")
if bump:
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
kwargs = {"context": context}
else:
kwargs = {}
https_connection = http.client.HTTPSConnection(
"127.0.0.1", proxy_port, **kwargs
)
# We cannot call https_connection.connect() here as it would try
# talking SSL instead of plain HTTP - we have to tell it about tunnel
# first.
https_connection.set_tunnel("example.com", 443)
https_connection.request("GET", "/")
test_util.runner.get_line_from_queue_and_assert(queue, "connection\n")
test_util.runner.get_line_from_queue_and_assert(
queue, "connect example.com 443\n"
)
if bump:
test_util.runner.get_line_from_queue_and_assert(
queue, "request_pre_body /\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "request_body_some_last /\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "response_pre_body / 200\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "response_body_some_last /\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "response_finished\n"
)
response = https_connection.getresponse()
body_piece = b"<h1>Example Domain</h1>"
assert body_piece in response.read(), "%s has no '%s' in body!" % (
url,
body_piece,
)
https_connection.close()
test_util.runner.get_line_from_queue_and_assert(queue, "connection_finished\n")
# Let's test some redirects
https_connection = http.client.HTTPSConnection(
"127.0.0.1", proxy_port, **kwargs
)
# We cannot call https_connection.connect() here as it would try
# talking SSL instead of plain HTTP - we have to tell it about tunnel
# first.
https_connection.set_tunnel("www.iana.org", 443)
url = "/domains/example"
https_connection.request(
"GET",
url,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4298.0 Safari/537.36",
},
)
test_util.runner.get_line_from_queue_and_assert(queue, "connection\n")
test_util.runner.get_line_from_queue_and_assert(
queue, "connect www.iana.org 443\n"
)
if bump:
test_util.runner.get_line_from_queue_and_assert(
queue, "request_pre_body /domains/example\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "request_body_some_last /domains/example\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "response_pre_body /domains/example 301\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "response_body_some_last /domains/example\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "response_finished\n"
)
response = https_connection.getresponse()
body_piece = b"<h1>Moved Permanently</h1>"
assert body_piece in response.read(), "%s has no '%s' in body!" % (
url,
body_piece,
)
url = "/domains/reserved"
https_connection.request(
"GET",
url,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4298.0 Safari/537.36",
},
)
if bump:
test_util.runner.get_line_from_queue_and_assert(
queue, "request_pre_body /domains/reserved\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "request_body_some_last /domains/reserved\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "response_pre_body /domains/reserved 200\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "response_body_some_last /domains/reserved\n"
)
test_util.runner.get_line_from_queue_and_assert(
queue, "response_finished\n"
)
response = https_connection.getresponse()
body_piece = b"<h1>IANA-managed Reserved Domains</h1>"
assert body_piece in response.read(), "%s has no '%s' in body!" % (
url,
body_piece,
)
https_connection.close()
test_util.runner.get_line_from_queue_and_assert(queue, "connection_finished\n")
proxy_process.kill()
| 37.47191
| 151
| 0.588756
| 802
| 6,670
| 4.549875
| 0.15586
| 0.067964
| 0.1151
| 0.130447
| 0.81584
| 0.81584
| 0.811729
| 0.811729
| 0.811729
| 0.811729
| 0
| 0.02152
| 0.317241
| 6,670
| 177
| 152
| 37.683616
| 0.779754
| 0.045127
| 0
| 0.513333
| 0
| 0.013333
| 0.221506
| 0.046219
| 0
| 0
| 0
| 0
| 0.213333
| 1
| 0
| false
| 0
| 0.026667
| 0
| 0.026667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
00d18a0e6bfca167f2fb6800337a84966739c67c
| 24,981
|
py
|
Python
|
app/api/v2/position.py
|
BoostryJP/ibet-Wallet-API
|
da7323a298bdb746e93da8a9b57a5da1dd6f14ac
|
[
"Apache-2.0"
] | 6
|
2021-06-16T02:06:21.000Z
|
2021-09-20T09:50:56.000Z
|
app/api/v2/position.py
|
BoostryJP/ibet-Wallet-API
|
da7323a298bdb746e93da8a9b57a5da1dd6f14ac
|
[
"Apache-2.0"
] | 68
|
2021-04-06T03:44:54.000Z
|
2022-03-29T02:00:02.000Z
|
app/api/v2/position.py
|
BoostryJP/ibet-Wallet-API
|
da7323a298bdb746e93da8a9b57a5da1dd6f14ac
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
from cerberus import Validator
from web3 import Web3
from eth_utils import to_checksum_address
from app import log
from app.api.common import BaseResource
from app.errors import (
InvalidParameterError,
NotSupportedError
)
from app import config
from app.contracts import Contract
from app.model.db import (
Listing,
IDXConsumeCoupon,
IDXTransfer
)
from app.model.blockchain import (
BondToken,
ShareToken,
MembershipToken,
CouponToken
)
LOG = log.get_logger()
# /Position/Share
class ShareMyTokens(BaseResource):
"""保有一覧参照(Share)"""
def on_post(self, req, res, **kwargs):
LOG.info("v2.position.ShareMyTokens")
session = req.context["session"]
if config.SHARE_TOKEN_ENABLED is False:
raise NotSupportedError(method="POST", url=req.path)
# Validation
request_json = ShareMyTokens.validate(req)
# TokenList Contract
list_contract = Contract.get_contract(
contract_name="TokenList",
address=config.TOKEN_LIST_CONTRACT_ADDRESS
)
# Exchange Contract
_exchange_contract = None
if config.IBET_SHARE_EXCHANGE_CONTRACT_ADDRESS is not None:
_exchange_contract = Contract.get_contract(
contract_name="IbetExchangeInterface",
address=config.IBET_SHARE_EXCHANGE_CONTRACT_ADDRESS
)
listed_tokens = session.query(Listing).all()
position_list = []
for _account_address in request_json["account_address_list"]:
# Get token details
for token in listed_tokens:
token_info = Contract.call_function(
contract=list_contract,
function_name="getTokenByAddress",
args=(token.token_address,),
default_returns=(config.ZERO_ADDRESS, "", config.ZERO_ADDRESS)
)
token_address = token_info[0]
token_template = token_info[1]
if token_template == "IbetShare":
_account_address = to_checksum_address(_account_address)
_token_contract = Contract.get_contract(
contract_name="IbetShare",
address=token_address
)
try:
balance = Contract.call_function(
contract=_token_contract,
function_name="balanceOf",
args=(_account_address,),
default_returns=0
)
pending_transfer = Contract.call_function(
contract=_token_contract,
function_name="pendingTransfer",
args=(_account_address,),
default_returns=0
)
if _exchange_contract is not None:
_exchange_balance = Contract.call_function(
contract=_exchange_contract,
function_name="balanceOf",
args=(_account_address, token_address,),
default_returns=0
)
_exchange_commitment = Contract.call_function(
contract=_exchange_contract,
function_name="commitmentOf",
args=(_account_address, token_address,),
default_returns=0
)
else:
# If EXCHANGE_CONTRACT_ADDRESS is not set, set commitment to zero.
_exchange_balance = 0
_exchange_commitment = 0
# If balance, pending_transfer, and commitment are non-zero,
# get the token information from TokenContract.
if balance == 0 and \
pending_transfer == 0 and \
_exchange_balance == 0 and \
_exchange_commitment == 0:
continue
else:
sharetoken = ShareToken.get(
session=session,
token_address=token_address
)
position_list.append({
"token": sharetoken.__dict__,
"balance": balance,
"exchange_balance": _exchange_balance,
"exchange_commitment": _exchange_commitment,
"pending_transfer": pending_transfer
})
except Exception as e:
LOG.exception(e)
continue
self.on_success(res, position_list)
@staticmethod
def validate(req):
request_json = req.context["data"]
if request_json is None:
raise InvalidParameterError
validator = Validator({
"account_address_list": {
"type": "list",
"schema": {"type": "string"},
"empty": False,
"required": True
}
})
if not validator.validate(request_json):
raise InvalidParameterError(validator.errors)
for account_address in request_json["account_address_list"]:
if not Web3.isAddress(account_address):
raise InvalidParameterError("invalid account address")
return request_json
# /Position/StraightBond
class StraightBondMyTokens(BaseResource):
"""保有一覧参照(StraightBond)"""
def on_post(self, req, res, **kwargs):
LOG.info("v2.position.StraightBondMyTokens")
session = req.context["session"]
if config.BOND_TOKEN_ENABLED is False:
raise NotSupportedError(method="POST", url=req.path)
# Validation
request_json = StraightBondMyTokens.validate(req)
# TokenList Contract
list_contract = Contract.get_contract(
contract_name="TokenList",
address=config.TOKEN_LIST_CONTRACT_ADDRESS
)
# Bond Exchange Contract
_exchange_contract = None
if config.IBET_SB_EXCHANGE_CONTRACT_ADDRESS is not None:
_exchange_contract = Contract.get_contract(
contract_name="IbetExchangeInterface",
address=config.IBET_SB_EXCHANGE_CONTRACT_ADDRESS
)
listed_tokens = session.query(Listing).all()
position_list = []
for _account_address in request_json["account_address_list"]:
# Get token details
for token in listed_tokens:
token_info = Contract.call_function(
contract=list_contract,
function_name="getTokenByAddress",
args=(token.token_address,),
default_returns=(config.ZERO_ADDRESS, "", config.ZERO_ADDRESS)
)
token_address = token_info[0]
token_template = token_info[1]
if token_template == "IbetStraightBond":
_account_address = to_checksum_address(_account_address)
_token_contract = Contract.get_contract(
contract_name="IbetStraightBond",
address=token_address
)
try:
balance = Contract.call_function(
contract=_token_contract,
function_name="balanceOf",
args=(_account_address,),
default_returns=0
)
pending_transfer = Contract.call_function(
contract=_token_contract,
function_name="pendingTransfer",
args=(_account_address,),
default_returns=0
)
if _exchange_contract is not None:
_exchange_balance = Contract.call_function(
contract=_exchange_contract,
function_name="balanceOf",
args=(_account_address, token_address,),
default_returns=0
)
_exchange_commitment = Contract.call_function(
contract=_exchange_contract,
function_name="commitmentOf",
args=(_account_address, token_address,),
default_returns=0
)
else:
# If EXCHANGE_CONTRACT_ADDRESS is not set, set commitment to zero.
_exchange_balance = 0
_exchange_commitment = 0
# If balance and commitment are non-zero,
# get the token information from TokenContract.
if balance == 0 and \
pending_transfer == 0 and \
_exchange_balance == 0 and \
_exchange_commitment == 0:
continue
else:
bondtoken = BondToken.get(
session=session,
token_address=token_address
)
position_list.append({
"token": bondtoken.__dict__,
"balance": balance,
"exchange_balance": _exchange_balance,
"exchange_commitment": _exchange_commitment,
"pending_transfer": pending_transfer
})
except Exception as e:
LOG.error(e)
continue
self.on_success(res, position_list)
@staticmethod
def validate(req):
request_json = req.context["data"]
if request_json is None:
raise InvalidParameterError
validator = Validator({
"account_address_list": {
"type": "list",
"schema": {"type": "string"},
"empty": False,
"required": True
}
})
if not validator.validate(request_json):
raise InvalidParameterError(validator.errors)
for account_address in request_json["account_address_list"]:
if not Web3.isAddress(account_address):
raise InvalidParameterError
return request_json
# /Position/Membership
class MembershipMyTokens(BaseResource):
"""保有一覧参照(Membership)"""
def on_post(self, req, res, **kwargs):
LOG.info("v2.position.MembershipMyTokens")
session = req.context["session"]
if config.MEMBERSHIP_TOKEN_ENABLED is False:
raise NotSupportedError(method="POST", url=req.path)
# Validation
request_json = MembershipMyTokens.validate(req)
# TokenList Contract
list_contract = Contract.get_contract(
contract_name="TokenList",
address=config.TOKEN_LIST_CONTRACT_ADDRESS
)
# Exchange Contract
_exchange_contract = None
if config.IBET_MEMBERSHIP_EXCHANGE_CONTRACT_ADDRESS is not None:
_exchange_contract = Contract.get_contract(
contract_name="IbetExchangeInterface",
address=config.IBET_MEMBERSHIP_EXCHANGE_CONTRACT_ADDRESS
)
listed_tokens = session.query(Listing).all()
position_list = []
for _account_address in request_json["account_address_list"]:
# Get token details
for token in listed_tokens:
token_info = Contract.call_function(
contract=list_contract,
function_name="getTokenByAddress",
args=(token.token_address,),
default_returns=(config.ZERO_ADDRESS, "", config.ZERO_ADDRESS)
)
token_address = token_info[0]
token_template = token_info[1]
if token_template == "IbetMembership":
_account_address = to_checksum_address(_account_address)
_token_contract = Contract.get_contract(
contract_name="IbetMembership",
address=token_address
)
try:
balance = Contract.call_function(
contract=_token_contract,
function_name="balanceOf",
args=(_account_address,),
default_returns=0
)
if _exchange_contract is not None:
_exchange_balance = Contract.call_function(
contract=_exchange_contract,
function_name="balanceOf",
args=(_account_address, token_address,),
default_returns=0
)
_exchange_commitment = Contract.call_function(
contract=_exchange_contract,
function_name="commitmentOf",
args=(_account_address, token_address,),
default_returns=0
)
else:
# If EXCHANGE_CONTRACT_ADDRESS is not set, set commitment to zero.
_exchange_balance = 0
_exchange_commitment = 0
# If balance and commitment are non-zero,
# get the token information from TokenContract.
if balance == 0 and _exchange_balance == 0 and _exchange_commitment == 0:
continue
else:
membershiptoken = MembershipToken.get(
session=session,
token_address=token_address
)
position_list.append({
"token": membershiptoken.__dict__,
"balance": balance,
"exchange_balance": _exchange_balance,
"exchange_commitment": _exchange_commitment,
})
except Exception as e:
LOG.error(e)
continue
self.on_success(res, position_list)
@staticmethod
def validate(req):
request_json = req.context["data"]
if request_json is None:
raise InvalidParameterError
validator = Validator({
"account_address_list": {
"type": "list",
"schema": {"type": "string"},
"empty": False,
"required": True
}
})
if not validator.validate(request_json):
raise InvalidParameterError(validator.errors)
for account_address in request_json["account_address_list"]:
if not Web3.isAddress(account_address):
raise InvalidParameterError
return request_json
# /Position/Coupon
class CouponMyTokens(BaseResource):
"""保有一覧参照(Coupon)"""
def on_post(self, req, res, **kwargs):
LOG.info("v2.position.CouponMyTokens")
session = req.context["session"]
if config.COUPON_TOKEN_ENABLED is False:
raise NotSupportedError(method="POST", url=req.path)
# Validation
request_json = CouponMyTokens.validate(req)
# TokenList Contract
list_contract = Contract.get_contract(
contract_name="TokenList",
address=config.TOKEN_LIST_CONTRACT_ADDRESS
)
# Coupon Exchange Contract
_exchange_contract = None
if config.IBET_CP_EXCHANGE_CONTRACT_ADDRESS is not None:
_exchange_contract = Contract.get_contract(
contract_name="IbetExchangeInterface",
address=config.IBET_CP_EXCHANGE_CONTRACT_ADDRESS
)
listed_tokens = session.query(Listing).all()
position_list = []
for _account_address in request_json["account_address_list"]:
# Get token details
for token in listed_tokens:
token_info = Contract.call_function(
contract=list_contract,
function_name="getTokenByAddress",
args=(token.token_address,),
default_returns=(config.ZERO_ADDRESS, "", config.ZERO_ADDRESS)
)
token_address = token_info[0]
token_template = token_info[1]
if token_template == "IbetCoupon":
_account_address = to_checksum_address(_account_address)
_token_contract = Contract.get_contract(
contract_name="IbetCoupon",
address=token_address
)
try:
balance = Contract.call_function(
contract=_token_contract,
function_name="balanceOf",
args=(_account_address,),
default_returns=0
)
if _exchange_contract is not None:
_exchange_balance = Contract.call_function(
contract=_exchange_contract,
function_name="balanceOf",
args=(_account_address, token_address,),
default_returns=0
)
_exchange_commitment = Contract.call_function(
contract=_exchange_contract,
function_name="commitmentOf",
args=(_account_address, token_address,),
default_returns=0
)
else:
# If EXCHANGE_CONTRACT_ADDRESS is not set, set commitment to zero.
_exchange_balance = 0
_exchange_commitment = 0
used = Contract.call_function(
contract=_token_contract,
function_name="usedOf",
args=(_account_address,),
default_returns=0
)
# Retrieving token receipt history from IDXTransfer
# NOTE: Index data has a lag from the most recent transfer state.
received_history = session.query(IDXTransfer). \
filter(IDXTransfer.token_address == token.token_address). \
filter(IDXTransfer.to_address == _account_address). \
first()
# If balance, commitment, and used are non-zero, and exist received history,
# get the token information from TokenContract.
if balance == 0 and \
_exchange_balance == 0 and \
_exchange_commitment == 0 and \
used == 0 and \
received_history is None:
continue
else:
coupontoken = CouponToken.get(
session=session,
token_address=token_address
)
position_list.append({
"token": coupontoken.__dict__,
"balance": balance,
"exchange_balance": _exchange_balance,
"exchange_commitment": _exchange_commitment,
"used": used
})
except Exception as e:
LOG.error(e)
continue
self.on_success(res, position_list)
@staticmethod
def validate(req):
request_json = req.context["data"]
if request_json is None:
raise InvalidParameterError
validator = Validator({
"account_address_list": {
"type": "list",
"schema": {"type": "string"},
"empty": False,
"required": True
}
})
if not validator.validate(request_json):
raise InvalidParameterError(validator.errors)
for account_address in request_json["account_address_list"]:
if not Web3.isAddress(account_address):
raise InvalidParameterError
return request_json
# /Position/Coupon/Consumptions
class CouponConsumptions(BaseResource):
"""Coupon消費履歴参照"""
def on_post(self, req, res, **kwargs):
LOG.info("v2.position.CouponConsumptions")
session = req.context["session"]
if config.COUPON_TOKEN_ENABLED is False:
raise NotSupportedError(method="POST", url=req.path)
# Validation
request_json = CouponConsumptions.validate(req)
# Create a list of coupon consumption history
_coupon_address = to_checksum_address(request_json["token_address"])
coupon_consumptions = []
for _account_address in request_json["account_address_list"]:
consumptions = session.query(IDXConsumeCoupon). \
filter(IDXConsumeCoupon.token_address == _coupon_address). \
filter(IDXConsumeCoupon.account_address == _account_address). \
all()
for consumption in consumptions:
coupon_consumptions.append({
"account_address": _account_address,
"block_timestamp": consumption.block_timestamp.strftime("%Y/%m/%d %H:%M:%S"),
"value": consumption.amount
})
# Sort by block_timestamp in ascending order
coupon_consumptions = sorted(
coupon_consumptions,
key=lambda x: x["block_timestamp"]
)
self.on_success(res, coupon_consumptions)
@staticmethod
def validate(req):
request_json = req.context["data"]
if request_json is None:
raise InvalidParameterError
validator = Validator({
"token_address": {
"type": "string",
"empty": False,
"required": True
},
"account_address_list": {
"type": "list",
"schema": {"type": "string"},
"empty": False,
"required": True
}
})
if not validator.validate(request_json):
raise InvalidParameterError(validator.errors)
if not Web3.isAddress(request_json["token_address"]):
raise InvalidParameterError
for account_address in request_json["account_address_list"]:
if not Web3.isAddress(account_address):
raise InvalidParameterError
return request_json
| 40.097913
| 100
| 0.506145
| 1,989
| 24,981
| 6.068879
| 0.11463
| 0.068428
| 0.03148
| 0.044073
| 0.784608
| 0.784608
| 0.768122
| 0.768122
| 0.755778
| 0.751802
| 0
| 0.004553
| 0.428566
| 24,981
| 622
| 101
| 40.162379
| 0.841051
| 0.076938
| 0
| 0.715464
| 0
| 0
| 0.066162
| 0.009874
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020619
| false
| 0
| 0.020619
| 0
| 0.061856
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
00e12fd9b39128ae52b9693e481fff1c8da419c8
| 45
|
py
|
Python
|
instaframe/__init__.py
|
oselin/instaframe
|
6a1ec6e9e6b5935b082c207182f3f2bc071eb4ec
|
[
"MIT"
] | null | null | null |
instaframe/__init__.py
|
oselin/instaframe
|
6a1ec6e9e6b5935b082c207182f3f2bc071eb4ec
|
[
"MIT"
] | null | null | null |
instaframe/__init__.py
|
oselin/instaframe
|
6a1ec6e9e6b5935b082c207182f3f2bc071eb4ec
|
[
"MIT"
] | null | null | null |
from instaframe.instaframe2 import Instaframe
| 45
| 45
| 0.911111
| 5
| 45
| 8.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0.066667
| 45
| 1
| 45
| 45
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
daa32e288164da610bf0df0a384617965413e8d0
| 15,087
|
py
|
Python
|
features/steps/data-table.py
|
eaton-lab/toyplot
|
472f2f2f1bc048e485ade44d75c3ace310be4b41
|
[
"BSD-3-Clause"
] | 438
|
2015-01-06T20:54:02.000Z
|
2022-03-15T00:39:33.000Z
|
features/steps/data-table.py
|
eaton-lab/toyplot
|
472f2f2f1bc048e485ade44d75c3ace310be4b41
|
[
"BSD-3-Clause"
] | 184
|
2015-01-26T17:04:47.000Z
|
2022-02-19T16:29:00.000Z
|
features/steps/data-table.py
|
eaton-lab/toyplot
|
472f2f2f1bc048e485ade44d75c3ace310be4b41
|
[
"BSD-3-Clause"
] | 45
|
2015-07-06T18:00:27.000Z
|
2022-02-14T12:46:17.000Z
|
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
from behave import *
import nose.tools
import numpy.testing
import collections
import io
import numpy
import os
import sys
import tempfile
import toyplot.data
import testing
try:
import pandas
except:
pass
def pandas_available(context):
if "pandas" in sys.modules:
return True
context.scenario.skip(reason="The pandas library is not available.")
return False
root_dir = os.path.dirname(os.path.dirname(__file__))
@given(u'a new toyplot.data.table')
def step_impl(context):
context.data = toyplot.data.Table()
@then(u'the table should be empty')
def step_impl(context):
nose.tools.assert_equal(len(context.data), 0)
nose.tools.assert_equal(context.data.shape, (0, 0))
nose.tools.assert_equal(list(context.data.items()), [])
nose.tools.assert_equal(list(context.data.keys()), [])
nose.tools.assert_equal(list(context.data.values()), [])
@then(u'adding columns should change the table')
def step_impl(context):
context.data["a"] = numpy.arange(10)
nose.tools.assert_equal(list(context.data.keys()), ["a"])
nose.tools.assert_equal(context.data.shape, (10, 1))
context.data["b"] = context.data["a"] ** 2
nose.tools.assert_equal(list(context.data.keys()), ["a", "b"])
nose.tools.assert_equal(context.data.shape, (10, 2))
context.data["c"] = numpy.zeros(10)
nose.tools.assert_equal(list(context.data.keys()), ["a", "b", "c"])
nose.tools.assert_equal(context.data.shape, (10, 3))
@then(u'columns can be retrieved by name')
def step_impl(context):
numpy.testing.assert_array_equal(context.data["a"], numpy.arange(10))
@then(u'partial columns can be retrieved by name and index')
def step_impl(context):
nose.tools.assert_equal(context.data["a", 5], 5)
@then(u'partial columns can be retrieved by name and slice')
def step_impl(context):
numpy.testing.assert_array_equal(context.data["a", 5:7], [5, 6])
@then(u'partial tables can be retrieved by row index')
def step_impl(context):
table = context.data[5]
nose.tools.assert_equal(list(table.keys()), ["a", "b", "c"])
nose.tools.assert_equal(table.shape, (1, 3))
numpy.testing.assert_array_equal(table["a"], [5])
@then(u'partial tables can be retrieved by row slice')
def step_impl(context):
table = context.data[5:7]
nose.tools.assert_equal(list(table.keys()), ["a", "b", "c"])
nose.tools.assert_equal(table.shape, (2, 3))
numpy.testing.assert_array_equal(table["a"], [5,6])
@then(u'partial tables can be retrieved by row index and column name')
def step_impl(context):
table = context.data[5, "b"]
nose.tools.assert_equal(list(table.keys()), ["b"])
nose.tools.assert_equal(table.shape, (1, 1))
numpy.testing.assert_array_equal(table["b"], [25])
@then(u'partial tables can be retrieved by row slice and column name')
def step_impl(context):
table = context.data[5:7, "b"]
nose.tools.assert_equal(list(table.keys()), ["b"])
nose.tools.assert_equal(table.shape, (2, 1))
numpy.testing.assert_array_equal(table["b"], [25,36])
@then(u'partial tables can be retrieved by row index and column names')
def step_impl(context):
table = context.data[5, ["b", "a"]]
nose.tools.assert_equal(list(table.keys()), ["b", "a"])
nose.tools.assert_equal(table.shape, (1, 2))
numpy.testing.assert_array_equal(table["a"], [5])
@then(u'partial tables can be retrieved by row slice and column names')
def step_impl(context):
table = context.data[5:7, ["b", "a"]]
nose.tools.assert_equal(list(table.keys()), ["b", "a"])
nose.tools.assert_equal(table.shape, (2, 2))
numpy.testing.assert_array_equal(table["a"], [5,6])
@then(u'partial tables can be retrieved by column names')
def step_impl(context):
table = context.data[["b", "a"]]
nose.tools.assert_equal(list(table.keys()), ["b", "a"])
nose.tools.assert_equal(table.shape, (10, 2))
@then(u'partial tables can be retrieved by row indices')
def step_impl(context):
table = context.data[[5, 7]]
nose.tools.assert_equal(list(table.keys()), ["a", "b", "c"])
nose.tools.assert_equal(table.shape, (2, 3))
numpy.testing.assert_array_equal(table["a"], [5, 7])
@then(u'columns can be replaced by name')
def step_impl(context):
context.data["c"] = numpy.ones(10)
nose.tools.assert_equal(list(context.data.keys()), ["a", "b", "c"])
nose.tools.assert_equal(context.data.shape, (10, 3))
numpy.testing.assert_array_equal(context.data["c"], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
@then(u'partial columns can be modified by name and separate index')
def step_impl(context):
context.data["c"][0] = 0
numpy.testing.assert_array_equal(context.data["c"], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1])
@then(u'partial columns can be modified by name and separate slice')
def step_impl(context):
context.data["c"][1:4] = [1, 2, 3]
numpy.testing.assert_array_equal(context.data["c"], [0, 1, 2, 3, 1, 1, 1, 1, 1, 1])
@then(u'partial columns can be modified by name and index')
def step_impl(context):
context.data["c", 4] = 4
numpy.testing.assert_array_equal(context.data["c"], [0, 1, 2, 3, 4, 1, 1, 1, 1, 1])
@then(u'partial columns can be modified by name and slice')
def step_impl(context):
context.data["c", 5:8] = [5, 6, 7]
numpy.testing.assert_array_equal(context.data["c"], [0, 1, 2, 3, 4, 5, 6, 7, 1, 1])
@then(u'partial columns can be masked by name and index')
def step_impl(context):
context.data["c", 3] = numpy.ma.masked
nose.tools.assert_is(context.data["c"][3], numpy.ma.masked)
@then(u'partial columns can be masked by name and slice')
def step_impl(context):
context.data["c", 8:10] = numpy.ma.masked
nose.tools.assert_is(context.data["c"][8], numpy.ma.masked)
nose.tools.assert_is(context.data["c"][9], numpy.ma.masked)
@then(u'deleting columns should change the table')
def step_impl(context):
del context.data["c"]
nose.tools.assert_equal(list(context.data.keys()), ["a", "b"])
nose.tools.assert_equal(context.data.shape, (10, 2))
@then(u'new columns must have a string name')
def step_impl(context):
with nose.tools.assert_raises(ValueError):
context.data[3] = numpy.arange(10)
@then(u'new columns must have the same number of rows as existing columns')
def step_impl(context):
with nose.tools.assert_raises(ValueError):
context.data["c"] = numpy.random.random(4)
@then(u'new columns must be one-dimensional')
def step_impl(context):
with nose.tools.assert_raises(ValueError):
context.data["c"] = numpy.random.random((10, 4))
@then(u'per-column metadata can be specified')
def step_impl(context):
nose.tools.assert_equal(context.data.metadata("b"), {})
context.data.metadata("b")["foo"] = True
nose.tools.assert_equal(context.data.metadata("b"), {"foo": True})
with nose.tools.assert_raises(ValueError):
context.data.metadata("c")
@then(u'the table can be converted to a numpy matrix')
def step_impl(context):
matrix = context.data.matrix()
numpy.testing.assert_array_equal(matrix, [[0,0],[1,1],[2,4],[3,9],[4,16],[5,25],[6,36],[7,49],[8,64],[9,81]])
@when(u'toyplot.data.Table is initialized with nothing')
def step_impl(context):
context.data = toyplot.data.Table()
@then(u'the toyplot.data.Table is empty')
def step_impl(context):
nose.tools.assert_equal(len(context.data), 0)
nose.tools.assert_equal(context.data.shape, (0, 0))
nose.tools.assert_equal(list(context.data.items()), [])
nose.tools.assert_equal(list(context.data.keys()), [])
nose.tools.assert_equal(list(context.data.values()), [])
@when(u'toyplot.data.Table is initialized with a toyplot.data.Table')
def step_impl(context):
table = toyplot.data.Table()
table["a"] = numpy.arange(10)
table["b"] = table["a"] ** 2
context.data = table
@when(
u'toyplot.data.Table is initialized with an OrderedDict containing columns')
def step_impl(context):
context.data = collections.OrderedDict(
[("a", numpy.arange(10)), ("b", numpy.arange(10) ** 2)])
@then(u'the toyplot.data.Table contains the columns')
def step_impl(context):
table = toyplot.data.Table(context.data)
nose.tools.assert_equal(list(table.keys()), ["a", "b"])
numpy.testing.assert_array_equal(
table["a"], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
numpy.testing.assert_array_equal(
table["b"], [0, 1, 4, 9, 16, 25, 36, 49, 64, 81])
@when(u'toyplot.data.Table is initialized with a dict containing columns')
def step_impl(context):
context.data = {"b": numpy.arange(10) ** 2, "a": numpy.arange(10)}
@then(u'the toyplot.data.Table contains the columns, sorted by key')
def step_impl(context):
table = toyplot.data.Table(context.data)
nose.tools.assert_equal(list(table.keys()), ["a", "b"])
numpy.testing.assert_array_equal(
table["a"], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
numpy.testing.assert_array_equal(
table["b"], [0, 1, 4, 9, 16, 25, 36, 49, 64, 81])
@when(u'toyplot.data.Table is initialized with a sequence of name, column tuples')
def step_impl(context):
context.data = [("a", numpy.arange(10)), ("b", numpy.arange(10) ** 2)]
@when(u'toyplot.data.Table is initialized with a matrix')
def step_impl(context):
context.data = numpy.arange(16).reshape((4, 4))
@then(u'the toyplot.data.Table contains the matrix columns with generated keys')
def step_impl(context):
table = toyplot.data.Table(context.data)
nose.tools.assert_equal(list(table.keys()), ["0", "1", "2", "3"])
numpy.testing.assert_array_equal(
table["0"], [0, 4, 8, 12])
numpy.testing.assert_array_equal(
table["1"], [1, 5, 9, 13])
numpy.testing.assert_array_equal(
table["2"], [2, 6, 10, 14])
numpy.testing.assert_array_equal(
table["3"], [3, 7, 11, 15])
@when(u'toyplot.data.Table is initialized with an array')
def step_impl(context):
context.data = numpy.arange(16)
@when(u'toyplot.data.Table is initialized with an integer')
def step_impl(context):
context.data = 5
@then(u'the toyplot.data.Table raises ValueError')
def step_impl(context):
with nose.tools.assert_raises(ValueError):
toyplot.data.Table(context.data)
@given(u'a toyplot.data.table with some data')
def step_impl(context):
numpy.random.seed(1234)
context.data = toyplot.data.Table()
context.data["foo"] = numpy.arange(10)
context.data["bar"] = numpy.random.random(10)
context.data["baz"] = numpy.random.choice(
["red", "green", "blue"], size=10)
@when(u'toyplot.data.Table is initialized with a csv file')
def step_impl(context):
context.data = toyplot.data.read_csv(toyplot.data.temperatures.path)
@then(u'the toyplot.data.Table contains the csv file columns')
def step_impl(context):
nose.tools.assert_equal(context.data.shape, (362, 6))
nose.tools.assert_equal(list(context.data.keys()), ['STATION', 'STATION_NAME', 'DATE', 'TMAX', 'TMIN', 'TOBS'])
for column in context.data.values():
nose.tools.assert_true(issubclass(column.dtype.type, numpy.character))
@when(u'toyplot.data.Table is initialized with a csv file and conversion')
def step_impl(context):
context.data = toyplot.data.read_csv(toyplot.data.temperatures.path, convert=True)
@then(u'the toyplot.data.Table contains the csv file columns with numeric type')
def step_impl(context):
nose.tools.assert_equal(context.data.shape, (362, 6))
nose.tools.assert_equal(list(context.data.keys()), ['STATION', 'STATION_NAME', 'DATE', 'TMAX', 'TMIN', 'TOBS'])
for column, column_type in zip(context.data.values(), [numpy.character, numpy.character, numpy.integer, numpy.integer, numpy.integer, numpy.integer]):
nose.tools.assert_true(issubclass(column.dtype.type, column_type))
@when(u'toyplot.data.Table is initialized with a pandas dataframe')
def step_impl(context):
if pandas_available(context):
context.data = toyplot.data.Table(pandas.read_csv(toyplot.data.temperatures.path))
@then(u'the toyplot.data.Table contains the data frame columns')
def step_impl(context):
nose.tools.assert_equal(context.data.shape, (362, 6))
nose.tools.assert_equal(list(context.data.keys()), ['STATION', 'STATION_NAME', 'DATE', 'TMAX', 'TMIN', 'TOBS'])
@when(u'toyplot.data.Table is initialized with a pandas dataframe with index')
def step_impl(context):
if pandas_available(context):
context.data = toyplot.data.Table(pandas.read_csv(toyplot.data.temperatures.path), index=True)
@then(u'the toyplot.data.Table contains the data frame columns plus an index column')
def step_impl(context):
nose.tools.assert_equal(context.data.shape, (362, 7))
nose.tools.assert_equal(list(context.data.keys()), ["index0", 'STATION', 'STATION_NAME', 'DATE', 'TMAX', 'TMIN', 'TOBS'])
@when(u'toyplot.data.Table is initialized with a pandas dataframe with hierarchical index')
def step_impl(context):
if pandas_available(context):
index = [numpy.array(["foo", "foo", "bar", "bar"]), numpy.array(["one", "two", "one", "two"])]
data_frame = pandas.DataFrame(numpy.ones((4, 4)), index=index)
context.data = toyplot.data.Table(data_frame, index=True)
@then(u'the toyplot.data.Table contains the data frame columns plus multiple index columns')
def step_impl(context):
nose.tools.assert_equal(context.data.shape, (4, 6))
nose.tools.assert_equal(list(context.data.keys()), ["index0", 'index1', '0', '1', '2', '3'])
@when(u'toyplot.data.Table is initialized with a pandas dataframe with hierarchical index and custom index format')
def step_impl(context):
if pandas_available(context):
index = [numpy.array(["foo", "foo", "bar", "bar"]), numpy.array(["one", "two", "one", "two"])]
data_frame = pandas.DataFrame(numpy.ones((4, 4)), index=index)
context.data = toyplot.data.Table(data_frame, index="Index {}")
@then(u'the toyplot.data.Table contains the data frame columns plus multiple custom format index columns')
def step_impl(context):
nose.tools.assert_equal(context.data.shape, (4, 6))
nose.tools.assert_equal(list(context.data.keys()), ["Index 0", 'Index 1', '0', '1', '2', '3'])
@when(u'toyplot.data.Table is initialized with a pandas dataframe with duplicate column names')
def step_impl(context):
if pandas_available(context):
context.data = toyplot.data.Table(pandas.read_csv(toyplot.data.temperatures.path)[["STATION", "DATE", "STATION", "DATE", "DATE"]])
@then(u'the toyplot.data.Table contains the data frame columns with uniqified column names')
def step_impl(context):
nose.tools.assert_equal(list(context.data.keys()), ['STATION', 'DATE', 'STATION-1', 'DATE-1', 'DATE-2'])
@then(u'the table can be rendered as format ipython html string')
def step_impl(context):
html = context.data._repr_html_()
nose.tools.assert_is_instance(html, str)
testing.assert_html_equal(html, "data-table")
| 35.498824
| 154
| 0.686618
| 2,346
| 15,087
| 4.33035
| 0.089088
| 0.107196
| 0.097451
| 0.099222
| 0.84467
| 0.819963
| 0.779112
| 0.77104
| 0.715622
| 0.634019
| 0
| 0.027278
| 0.144694
| 15,087
| 424
| 155
| 35.582547
| 0.759997
| 0.011069
| 0
| 0.434783
| 0
| 0
| 0.235618
| 0
| 0
| 0
| 0
| 0
| 0.301003
| 1
| 0.190635
| false
| 0.003344
| 0.040134
| 0
| 0.237458
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dab6e7629723f93498360689cb6eae0347f38513
| 45
|
py
|
Python
|
src/testing_pystan_install/where_is_my_python.py
|
webclinic017/back-testing-stock-strats
|
2860fd9ed6ab86424c9a0c766c45d0c09658bd33
|
[
"MIT"
] | 1
|
2022-03-14T12:59:28.000Z
|
2022-03-14T12:59:28.000Z
|
src/testing_pystan_install/where_is_my_python.py
|
webclinic017/back-testing-stock-strats
|
2860fd9ed6ab86424c9a0c766c45d0c09658bd33
|
[
"MIT"
] | null | null | null |
src/testing_pystan_install/where_is_my_python.py
|
webclinic017/back-testing-stock-strats
|
2860fd9ed6ab86424c9a0c766c45d0c09658bd33
|
[
"MIT"
] | 2
|
2021-12-02T20:51:30.000Z
|
2022-03-14T12:59:33.000Z
|
import distutils
print(distutils.__file__)
| 15
| 26
| 0.822222
| 5
| 45
| 6.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 45
| 2
| 27
| 22.5
| 0.825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
dae4962901ec884fa9f9a97564b67fabf2e275c0
| 25
|
py
|
Python
|
rlberry/agents/cem/__init__.py
|
antoine-moulin/rlberry
|
676af9d1bb9094a6790a9aa3ff7e67b13584a183
|
[
"MIT"
] | null | null | null |
rlberry/agents/cem/__init__.py
|
antoine-moulin/rlberry
|
676af9d1bb9094a6790a9aa3ff7e67b13584a183
|
[
"MIT"
] | null | null | null |
rlberry/agents/cem/__init__.py
|
antoine-moulin/rlberry
|
676af9d1bb9094a6790a9aa3ff7e67b13584a183
|
[
"MIT"
] | null | null | null |
from .cem import CEMAgent
| 25
| 25
| 0.84
| 4
| 25
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
daed0000c6137c31b6fc25867909c0032976bb4f
| 48
|
py
|
Python
|
evaluate/__init__.py
|
YantaoShen/openBCT
|
69e798c2dd6380572da7a88b68e0e9d31d9b08a4
|
[
"BSD-2-Clause"
] | 64
|
2020-10-13T06:24:41.000Z
|
2022-03-08T11:23:22.000Z
|
evaluate/__init__.py
|
YantaoShen/openBCT
|
69e798c2dd6380572da7a88b68e0e9d31d9b08a4
|
[
"BSD-2-Clause"
] | 4
|
2020-12-29T05:57:34.000Z
|
2022-01-13T18:07:05.000Z
|
evaluate/__init__.py
|
YantaoShen/openBCT
|
69e798c2dd6380572da7a88b68e0e9d31d9b08a4
|
[
"BSD-2-Clause"
] | 10
|
2020-10-13T06:25:51.000Z
|
2022-03-03T00:06:06.000Z
|
from .evaluators import *
from .ranking import *
| 24
| 25
| 0.770833
| 6
| 48
| 6.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 2
| 26
| 24
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9708f9ea6f363037e41fbb160593d14942c16acc
| 56
|
py
|
Python
|
emp_wsb/__main__.py
|
EasyMicroPython/EMP-WSB
|
77bca344c4844b04dd9436b9bfa50fdaf79178ff
|
[
"MIT"
] | 3
|
2019-01-14T15:57:48.000Z
|
2020-01-31T03:43:33.000Z
|
emp_wsb/__main__.py
|
EasyMicroPython/EMP-WSB
|
77bca344c4844b04dd9436b9bfa50fdaf79178ff
|
[
"MIT"
] | 1
|
2019-03-17T03:49:21.000Z
|
2019-08-11T06:57:00.000Z
|
emp_wsb/__main__.py
|
EasyMicroPython/EMP-WSB
|
77bca344c4844b04dd9436b9bfa50fdaf79178ff
|
[
"MIT"
] | 1
|
2020-03-21T15:01:07.000Z
|
2020-03-21T15:01:07.000Z
|
from emp_wsb.cli import run
import fire
fire.Fire(run)
| 11.2
| 27
| 0.785714
| 11
| 56
| 3.909091
| 0.636364
| 0.372093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 56
| 4
| 28
| 14
| 0.895833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
97728d8ed6508f1f6b53a00fdc4e04584be577a4
| 3,165
|
py
|
Python
|
Linear Models/tests/01_unittest_onehot_input/test.py
|
AxoyTO/ML-DL-DS-Python-Studies
|
ffef653190d1106e01244a4ea7f3f953b9d97882
|
[
"Unlicense"
] | null | null | null |
Linear Models/tests/01_unittest_onehot_input/test.py
|
AxoyTO/ML-DL-DS-Python-Studies
|
ffef653190d1106e01244a4ea7f3f953b9d97882
|
[
"Unlicense"
] | null | null | null |
Linear Models/tests/01_unittest_onehot_input/test.py
|
AxoyTO/ML-DL-DS-Python-Studies
|
ffef653190d1106e01244a4ea7f3f953b9d97882
|
[
"Unlicense"
] | 1
|
2021-12-08T13:00:41.000Z
|
2021-12-08T13:00:41.000Z
|
import numpy as np
import pandas as pd
from Task import MyOneHotEncoder, SimpleCounterEncoder, FoldCounters, weights
def test_imports():
with open('Task.py', 'r') as file:
lines = ' '.join(file.readlines())
assert 'import numpy' in lines
assert lines.count('import') == 1
assert 'sklearn' not in lines
assert 'get_dummies' not in lines
def test_one_hot_small():
data = {'col_1': [0, 1, 0, 1, 0, 1], 'col_2': ['a', 'b', 'c', 'c', 'b', 'a']}
df_test = pd.DataFrame.from_dict(data)
enc = MyOneHotEncoder(dtype=int)
enc.fit(df_test)
onehot = enc.transform(df_test)
ans = np.array(
[[1, 0, 1, 0, 0], [0, 1, 0, 1, 0], [1, 0, 0, 0, 1], [0, 1, 0, 0, 1], [1, 0, 0, 1, 0], [0, 1, 1, 0, 0]])
assert len(onehot.shape) == 2
assert onehot.shape[0] == 6
assert onehot.shape[1] == 5
assert (ans == onehot).all()
assert type(onehot) == np.ndarray
def test_one_hot_big():
data = {'col_1': [1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 2, 1, 2, 0, 2, 1, 2, 0, 0, 2, 0, 1, 2, 2, 0, 1, 1, 2, 0],
'col_2': [1, 1, 1, 1, 0, 4, 1, 0, 0, 3, 2, 1, 0, 3, 1, 1, 3, 4, 0, 1, 3, 4, 2, 4, 0, 3, 1, 2, 0, 4],
'col_3': [1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]}
df_test = pd.DataFrame.from_dict(data)
enc = MyOneHotEncoder(dtype=int)
enc.fit(df_test)
onehot = enc.transform(df_test)
ans = np.array([[0, 1, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 1, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 1, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 1]])
assert len(onehot.shape) == 2
assert onehot.shape[0] == 30
assert onehot.shape[1] == 10
assert (onehot == ans).all()
assert type(onehot) == np.ndarray
| 42.77027
| 112
| 0.371248
| 598
| 3,165
| 1.929766
| 0.107023
| 0.259965
| 0.205373
| 0.180243
| 0.629983
| 0.615251
| 0.560659
| 0.542461
| 0.542461
| 0.44714
| 0
| 0.238838
| 0.412638
| 3,165
| 73
| 113
| 43.356164
| 0.381926
| 0
| 0
| 0.363636
| 0
| 0
| 0.024013
| 0
| 0
| 0
| 0
| 0
| 0.212121
| 1
| 0.045455
| false
| 0
| 0.090909
| 0
| 0.136364
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c136833da2e3221acd93170dec9116d1ff938a32
| 53
|
py
|
Python
|
exp/trying_globals_for_keyword.py
|
nicolasessisbreton/fython
|
988f5a94cee8b16b0000501a22239195c73424a1
|
[
"Apache-2.0"
] | 41
|
2016-01-21T05:14:45.000Z
|
2021-11-24T20:37:21.000Z
|
exp/trying_globals_for_keyword.py
|
nicolasessisbreton/fython
|
988f5a94cee8b16b0000501a22239195c73424a1
|
[
"Apache-2.0"
] | 5
|
2016-01-21T05:36:37.000Z
|
2016-08-22T19:26:51.000Z
|
exp/trying_globals_for_keyword.py
|
nicolasessisbreton/fython
|
988f5a94cee8b16b0000501a22239195c73424a1
|
[
"Apache-2.0"
] | 3
|
2016-01-23T04:03:44.000Z
|
2016-08-21T15:58:38.000Z
|
from . import globals_for_keyword as a
print(a.pass)
| 17.666667
| 38
| 0.792453
| 10
| 53
| 4
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 53
| 3
| 39
| 17.666667
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.5
| 0.5
| null | null | 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 1
|
0
| 6
|
c13b445d5df69b68154c8c546efa78050cbf5425
| 5,966
|
py
|
Python
|
tests/test_control_archive.py
|
EthanArbuckle/dm.py
|
d69fd1c7312bfe2c9f10674499b99f4ea1725d78
|
[
"MIT"
] | 2
|
2021-02-07T12:21:26.000Z
|
2021-03-10T01:35:24.000Z
|
tests/test_control_archive.py
|
EthanArbuckle/dm.py
|
d69fd1c7312bfe2c9f10674499b99f4ea1725d78
|
[
"MIT"
] | null | null | null |
tests/test_control_archive.py
|
EthanArbuckle/dm.py
|
d69fd1c7312bfe2c9f10674499b99f4ea1725d78
|
[
"MIT"
] | null | null | null |
import tarfile
import tempfile
from pathlib import Path
import pytest
from dm import Dm
class TestControlArchive:
def test_build_control_archive(self) -> None:
with tempfile.TemporaryDirectory() as tempdir:
staging = Path(tempdir)
# Given a valid control directory
debian_dir = staging / "DEBIAN"
debian_dir.mkdir()
# And a control file
control_file = debian_dir / "control"
control_file.write_bytes(b"Package: com.test\nVersion: 1.0\nArchitecture: arm64")
# When the control archive is created
control_archive = Dm._build_control_archive(staging)
# Archive data is returned
archive_data = control_archive.getvalue()
assert len(archive_data) > 10
# And its gzip data
assert archive_data[0:4] == b"\x1f\x8b\x08\x00"
# When the archive is ungzipped
control_archive.seek(0)
with tarfile.open(fileobj=control_archive, mode="r:gz") as tarf:
# It contains the control file
ctrl_file = tarf.extractfile(tarf.getmember("control"))
assert ctrl_file is not None
# And the file has the correct contents
assert ctrl_file.read() == b"Package: com.test\nVersion: 1.0\nArchitecture: arm64"
def test_control_archive_debian_scripts(self) -> None:
with tempfile.TemporaryDirectory() as tempdir:
staging = Path(tempdir)
# Given a valid control directory
debian_dir = staging / "DEBIAN"
debian_dir.mkdir()
# And a control file
control_file = debian_dir / "control"
control_file.write_bytes(b"Package: com.test\nVersion: 1.0\nArchitecture: arm64")
# And a postinst and preinst
postinst = debian_dir / "postinst"
postinst.write_bytes(b"echo 1234")
preinst = debian_dir / "preinst"
preinst.write_bytes(b"echo done")
# When the control archive is created
control_archive = Dm._build_control_archive(staging)
# Archive data is returned
archive_data = control_archive.getvalue()
assert len(archive_data) > 10
# And its gzip data
assert archive_data[0:4] == b"\x1f\x8b\x08\x00"
# When the archive is ungzipped
control_archive.seek(0)
with tarfile.open(fileobj=control_archive, mode="r:gz") as tarf:
# It contains all of the expected files
assert "control" in tarf.getnames()
assert "preinst" in tarf.getnames()
assert "postinst" in tarf.getnames()
def test_control_archive__bad_permissions__high(self) -> None:
with tempfile.TemporaryDirectory() as tempdir:
staging = Path(tempdir)
# Given a valid control directory
debian_dir = staging / "DEBIAN"
debian_dir.mkdir()
# And a control file with an invalid mode
control_file = debian_dir / "control"
control_file.write_bytes(b"Package: com.test\nVersion: 1.0\nArchitecture: arm64")
control_file.chmod(777)
# When the control archive is created
with pytest.raises(Exception) as exc_info:
Dm._build_control_archive(staging)
# An exception is raised due to invalid file permissions
assert 'Invalid permissions on file "control"' in str(exc_info.value)
def test_control_archive__bad_permissions__low(self) -> None:
with tempfile.TemporaryDirectory() as tempdir:
staging = Path(tempdir)
# Given a valid control directory
debian_dir = staging / "DEBIAN"
debian_dir.mkdir()
# And a control file with an invalid mode
control_file = debian_dir / "control"
control_file.write_bytes(b"Package: com.test\nVersion: 1.0\nArchitecture: arm64")
control_file.chmod(550)
# When the control archive is created
with pytest.raises(Exception) as exc_info:
Dm._build_control_archive(staging)
# An exception is raised due to invalid file permissions
assert 'Invalid permissions on file "control"' in str(exc_info.value)
def test_control_archive__invalid_package(self) -> None:
with tempfile.TemporaryDirectory() as tempdir:
staging = Path(tempdir)
# Given a valid control directory
debian_dir = staging / "DEBIAN"
debian_dir.mkdir()
# And a control file with an invalid mode
control_file = debian_dir / "control"
control_file.write_bytes(b"Package: com.testINVALID\nVersion: 1.0\nArchitecture: arm64")
# When the control archive is created
with pytest.raises(Exception) as exc_info:
Dm._build_control_archive(staging)
# An exception is raised
assert str(exc_info.value) == "Package name has characters that aren't lowercase alphanums or '-+.'."
def test_control_archive__invalid_version(self) -> None:
with tempfile.TemporaryDirectory() as tempdir:
staging = Path(tempdir)
# Given a valid control directory
debian_dir = staging / "DEBIAN"
debian_dir.mkdir()
# And a control file with an invalid mode
control_file = debian_dir / "control"
control_file.write_bytes(b"Package: com.test\nVersion: womp\nArchitecture: arm64")
# When the control archive is created
with pytest.raises(Exception) as exc_info:
Dm._build_control_archive(staging)
# An exception is raised
assert str(exc_info.value) == "Package version womp doesn't contain any digits."
| 41.430556
| 113
| 0.615488
| 695
| 5,966
| 5.116547
| 0.17554
| 0.102362
| 0.024747
| 0.033746
| 0.821991
| 0.812148
| 0.798369
| 0.798369
| 0.798369
| 0.786277
| 0
| 0.014108
| 0.310929
| 5,966
| 143
| 114
| 41.72028
| 0.850888
| 0.174154
| 0
| 0.666667
| 0
| 0
| 0.151819
| 0.005108
| 0
| 0
| 0
| 0
| 0.154762
| 1
| 0.071429
| false
| 0
| 0.059524
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c1685e46a8fff97a1cf0a5a3abed301c31829955
| 56
|
py
|
Python
|
example/windows/include/code/included_folderA/included_module.py
|
HussainTaj-W/spark_submit_project
|
17e32ae208147321d42c732ff2c015fe47271ae8
|
[
"MIT"
] | 3
|
2020-01-06T16:02:21.000Z
|
2020-04-04T12:24:07.000Z
|
example/windows/include/code/included_folderA/included_module.py
|
HussainTaj-W/spark_submit_project
|
17e32ae208147321d42c732ff2c015fe47271ae8
|
[
"MIT"
] | null | null | null |
example/windows/include/code/included_folderA/included_module.py
|
HussainTaj-W/spark_submit_project
|
17e32ae208147321d42c732ff2c015fe47271ae8
|
[
"MIT"
] | null | null | null |
def the_module_says():
return "This seems to work."
| 18.666667
| 32
| 0.696429
| 9
| 56
| 4.111111
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196429
| 56
| 2
| 33
| 28
| 0.822222
| 0
| 0
| 0
| 0
| 0
| 0.339286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
c18a3f7ec198c87772aad4aa7e7ea9928733b4dc
| 180
|
py
|
Python
|
Chapter 1-4/G.py
|
Null3rror/Engineering-Economics-Cheatsheet
|
68639482d4db071bf51c44f5d51fc978c7899b90
|
[
"MIT"
] | null | null | null |
Chapter 1-4/G.py
|
Null3rror/Engineering-Economics-Cheatsheet
|
68639482d4db071bf51c44f5d51fc978c7899b90
|
[
"MIT"
] | null | null | null |
Chapter 1-4/G.py
|
Null3rror/Engineering-Economics-Cheatsheet
|
68639482d4db071bf51c44f5d51fc978c7899b90
|
[
"MIT"
] | null | null | null |
def P_G(G, i, n):
t = (1 + i) ** n
return (G / i) * (((t - 1) / (i * t)) - (n / t))
def A_G(G, i, n):
t = (1 + i) ** n
return G * ((1/i) - (n / (t - 1)))
| 20
| 53
| 0.288889
| 36
| 180
| 1.388889
| 0.25
| 0.2
| 0.18
| 0.24
| 0.6
| 0.6
| 0.6
| 0.6
| 0.6
| 0.6
| 0
| 0.04717
| 0.411111
| 180
| 8
| 54
| 22.5
| 0.424528
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
c1c0a66b495732fbb82b90375668d220f7004fff
| 221
|
py
|
Python
|
Python_Project/python_basic/019_object.py
|
airpoet/bigdata
|
dc86e9fd63ed59cbd7bf69c1aa37ff6130df3da8
|
[
"MIT"
] | null | null | null |
Python_Project/python_basic/019_object.py
|
airpoet/bigdata
|
dc86e9fd63ed59cbd7bf69c1aa37ff6130df3da8
|
[
"MIT"
] | null | null | null |
Python_Project/python_basic/019_object.py
|
airpoet/bigdata
|
dc86e9fd63ed59cbd7bf69c1aa37ff6130df3da8
|
[
"MIT"
] | 2
|
2019-04-20T03:31:31.000Z
|
2020-03-19T14:15:50.000Z
|
#!/usr/bin/python3
# -*-coding:utf-8-*-
# python面向对象相关
"""
面向对象:
https://www.liaoxuefeng.com/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000/001431864715651c99511036d884cf1b399e65ae0d27f7e000
"""
| 17
| 135
| 0.746606
| 15
| 221
| 11
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.415385
| 0.117647
| 221
| 13
| 136
| 17
| 0.430769
| 0.864253
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a9e79f7ad87aabfbfd7cc4595b9c1e7add49b8bf
| 11,102
|
py
|
Python
|
arhuaco/graphics/results.py
|
kuronosec/arhuaco
|
6eec1691dd03b2e3726ae8c2101588b45d58b6d7
|
[
"Apache-2.0"
] | 1
|
2020-08-08T02:17:34.000Z
|
2020-08-08T02:17:34.000Z
|
arhuaco/graphics/results.py
|
kuronosec/arhuaco
|
6eec1691dd03b2e3726ae8c2101588b45d58b6d7
|
[
"Apache-2.0"
] | null | null | null |
arhuaco/graphics/results.py
|
kuronosec/arhuaco
|
6eec1691dd03b2e3726ae8c2101588b45d58b6d7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 Andres Gomez Ramirez.
# All Rights Reserved.
from __future__ import print_function
import os
import sys, getopt
import numpy as np
import time
from arhuaco.graphics.plot import Plot
# Collect and plot evaluation results
def main(argv):
training_vs_validation_cnn()
training_vs_validation_svm()
comparative_results()
def training_vs_validation_cnn():
sys_accuracy = np.fromfile("/var/lib/arhuaco/data/logs/sys_accuracy_cnn.log",
dtype=float, sep="\n")
sys_val_accuracy = np.fromfile("/var/lib/arhuaco/data/logs/sys_val_accuracy_cnn.log",
dtype=float, sep="\n")
sys_fpr = np.fromfile("/var/lib/arhuaco/data/logs/sys_fpr_cnn.log",
dtype=float, sep="\n")
sys_val_fpr = np.fromfile("/var/lib/arhuaco/data/logs/sys_val_fpr_cnn.log",
dtype=float, sep="\n")
net_accuracy = np.fromfile("/var/lib/arhuaco/data/logs/net_accuracy_cnn.log",
dtype=float, sep="\n")
net_val_accuracy = np.fromfile("/var/lib/arhuaco/data/logs/net_val_accuracy_cnn.log",
dtype=float, sep="\n")
net_fpr = np.fromfile("/var/lib/arhuaco/data/logs/net_fpr_cnn.log",
dtype=float, sep="\n")
net_val_fpr = np.fromfile("/var/lib/arhuaco/data/logs/net_val_fpr_cnn.log",
dtype=float, sep="\n")
# Graphically plot the results
plot = Plot()
# Training vs validation
plot.history2plot([sys_accuracy,
sys_val_accuracy],
['Training', 'Validation'],
"System call classification with CNN", "Epoch", "Accuracy",
"/var/lib/arhuaco/data/logs/sys_conv_accuracy-%s.pdf"
% time.strftime("%Y%m%d-%H%M%S"),
'lower right',
[ 0, 9], [ 0.8, 1.0 ])
plot.history2plot([sys_fpr,
sys_val_fpr],
['Training', 'Validation'],
"System call classification with CNN", "Epoch", "False positive rate",
"/var/lib/arhuaco/data/logs/sys_conv_fpr-%s.pdf"
% time.strftime("%Y%m%d-%H%M%S"),
'upper left',
[ 0, 9 ], [ 0, 0.2 ])
plot.history2plot([net_accuracy,
net_val_accuracy],
['Training', 'Validation'],
"Network trace classification with CNN", "Epoch", "Accuracy",
"/var/lib/arhuaco/data/logs/net_conv_accuracy-%s.pdf"
% time.strftime("%Y%m%d-%H%M%S"),
'lower right',
[ 0, 9 ], [ 0.8, 1.0 ])
plot.history2plot([net_fpr,
net_val_fpr],
['Training', 'Validation'],
"Network trace classification with CNN", "Epoch", "False postive rate",
"/var/lib/arhuaco/data/logs/net_conv_fpr-%s.pdf"
% time.strftime("%Y%m%d-%H%M%S"),
'upper left',
[ 0, 9 ], [ 0, 0.2 ])
def training_vs_validation_svm():
sys_accuracy = np.fromfile("/var/lib/arhuaco/data/logs/sys_accuracy_svm.log",
dtype=float, sep="\n")
sys_val_accuracy = np.fromfile("/var/lib/arhuaco/data/logs/sys_val_accuracy_svm.log",
dtype=float, sep="\n")
sys_fpr = np.fromfile("/var/lib/arhuaco/data/logs/sys_fpr_svm.log",
dtype=float, sep="\n")
sys_val_fpr = np.fromfile("/var/lib/arhuaco/data/logs/sys_val_fpr_svm.log",
dtype=float, sep="\n")
net_accuracy = np.fromfile("/var/lib/arhuaco/data/logs/net_accuracy_svm.log",
dtype=float, sep="\n")
net_val_accuracy = np.fromfile("/var/lib/arhuaco/data/logs/net_val_accuracy_svm.log",
dtype=float, sep="\n")
net_gen_accuracy = np.fromfile("/var/lib/arhuaco/data/logs/net_acc_gen_svm.log",
dtype=float, sep="\n")
net_gen_val_accuracy = np.fromfile("/var/lib/arhuaco/data/logs/net_val_acc_gen_svm.log",
dtype=float, sep="\n")
net_fpr = np.fromfile("/var/lib/arhuaco/data/logs/net_fpr_svm.log",
dtype=float, sep="\n")
net_val_fpr = np.fromfile("/var/lib/arhuaco/data/logs/net_val_fpr_svm.log",
dtype=float, sep="\n")
# Graphically plot the results
plot = Plot()
# Training vs validation
plot.history2plot([sys_accuracy,
sys_val_accuracy],
['Training', 'Validation'],
"System call classification with SVM", "Epoch", "Accuracy",
"/var/lib/arhuaco/data/logs/sys_svm_accuracy-%s.pdf"
% time.strftime("%Y%m%d-%H%M%S"),
'lower right',
[ 0, 9], [ 0.8, 1.0 ])
plot.history2plot([sys_fpr,
sys_val_fpr],
['Training', 'Validation'],
"System call classification with SVM", "Epoch", "False positive rate",
"/var/lib/arhuaco/data/logs/sys_svm_fpr-%s.pdf"
% time.strftime("%Y%m%d-%H%M%S"),
'upper left',
[ 0, 9 ], [ 0, 0.2 ])
plot.history2plot([net_accuracy,
net_val_accuracy],
['Training', 'Validation'],
"Network trace classification with SVM", "Epoch", "Accuracy",
"/var/lib/arhuaco/data/logs/net_svm_accuracy-%s.pdf"
% time.strftime("%Y%m%d-%H%M%S"),
'lower right',
[ 0, 9 ], [ 0.8, 1.0 ])
plot.history2plot([net_fpr,
net_val_fpr],
['Training', 'Validation'],
"Network trace classification with SVM", "Epoch", "False postive rate",
"/var/lib/arhuaco/data/logs/net_svm_fpr-%s.pdf"
% time.strftime("%Y%m%d-%H%M%S"),
'upper left',
[ 0, 9 ], [ 0, 0.2 ])
plot.history2plot([net_gen_accuracy,
net_gen_val_accuracy],
['Training', 'Validation'],
"Network trace classification with SVM: generated data", "Epoch", "Accuracy",
"/var/lib/arhuaco/data/logs/net_svm_accuracy-generated-%s.pdf"
% time.strftime("%Y%m%d-%H%M%S"),
'lower right',
[ 0, 9 ], [ 0.8, 1.0 ])
def comparative_results():
sys_val_accuracy_cnn = np.fromfile("/var/lib/arhuaco/data/logs/sys_val_accuracy_cnn.log",
dtype=float, sep="\n")
sys_val_accuracy_svm = np.fromfile("/var/lib/arhuaco/data/logs/sys_val_accuracy_svm.log",
dtype=float, sep="\n")
sys_val_fpr_cnn = np.fromfile("/var/lib/arhuaco/data/logs/sys_val_fpr_cnn.log",
dtype=float, sep="\n")
sys_val_fpr_svm = np.fromfile("/var/lib/arhuaco/data/logs/sys_val_fpr_svm.log",
dtype=float, sep="\n")
net_val_accuracy_cnn = np.fromfile("/var/lib/arhuaco/data/logs/net_val_accuracy_cnn.log",
dtype=float, sep="\n")
net_val_accuracy_svm = np.fromfile("/var/lib/arhuaco/data/logs/net_val_accuracy_svm.log",
dtype=float, sep="\n")
net_val_fpr_cnn = np.fromfile("/var/lib/arhuaco/data/logs/net_val_fpr_cnn.log",
dtype=float, sep="\n")
net_val_fpr_svm = np.fromfile("/var/lib/arhuaco/data/logs/net_val_fpr_svm.log",
dtype=float, sep="\n")
net_val_acc_gen_svm = np.fromfile("/var/lib/arhuaco/data/logs/net_val_acc_gen_svm.log",
dtype=float, sep="\n")
# Graphically plot the results
plot = Plot()
# Syscall cnn vs svm acc
plot.history2plot([sys_val_accuracy_cnn[0:10],
sys_val_accuracy_svm[0:10]],
['CNN validation', 'SVM validation'],
"CNN vs SVM system call validation accuracy",
"Epoch", "Accuracy",
"/var/lib/arhuaco/data/logs/sys_cnn_svm_accuracy-%s.pdf"
% time.strftime("%Y%m%d-%H%M%S"),
'lower right',
[ 0, 9 ], [ 0, 0.2 ])
# Syscall cnn vs svm fpr
plot.history2plot([sys_val_fpr_cnn[0:10],
sys_val_fpr_svm[0:10]],
['CNN validation', 'SVM validation'],
"CNN vs SVM system call validation false positive rate",
"Epoch", "False positive rate",
"/var/lib/arhuaco/data/logs/sys_cnn_svm_fpr-%s.pdf"
% time.strftime("%Y%m%d-%H%M%S"),
'upper left',
[ 0, 9 ], [ 0, 0.2 ])
# Network cnn vs svm acc
plot.history2plot([net_val_accuracy_cnn[0:10],
net_val_accuracy_svm[0:10]],
['CNN validation', 'SVM validation'],
"CNN vs SVM network trace validation accuracy",
"Epoch", "Accuracy",
"/var/lib/arhuaco/data/logs/net_cnn_svm_accuracy-%s.pdf"
% time.strftime("%Y%m%d-%H%M%S"),
'lower right',
[ 0, 9 ], [ 0, 0.2 ])
# Network cnn vs svm fpr
plot.history2plot([net_val_fpr_cnn[0:10],
net_val_fpr_svm[0:10]],
['CNN validation', 'SVM validation'],
"CNN vs SVM network validation false positive rate",
"Epoch", "False positive rate",
"/var/lib/arhuaco/data/logs/net_cnn_svm_fpr-%s.pdf"
% time.strftime("%Y%m%d-%H%M%S"),
'upper left',
[ 0, 9 ], [ 0, 0.2 ])
# Network svm original vs svm generated acc
plot.history2plot([net_val_accuracy_svm[0:10],
net_val_acc_gen_svm[0:10]],
['SVM validation non generated', 'SVM validation generated'],
"SVM accuracy comparison: normal data vs generated data",
"Epoch", "False positive rate",
"/var/lib/arhuaco/data/logs/net_svm_accuracy-generated-%s.pdf"
% time.strftime("%Y%m%d-%H%M%S"),
'upper left',
[ 0, 9 ], [ 0, 0.2 ])
if __name__ == "__main__":
main(sys.argv[1:])
| 52.367925
| 100
| 0.496217
| 1,296
| 11,102
| 4.057099
| 0.071759
| 0.046786
| 0.101369
| 0.13256
| 0.896919
| 0.882275
| 0.854127
| 0.854127
| 0.847851
| 0.798022
| 0
| 0.01775
| 0.370744
| 11,102
| 211
| 101
| 52.616114
| 0.734898
| 0.032787
| 0
| 0.571429
| 0
| 0
| 0.330567
| 0.185379
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021164
| false
| 0
| 0.031746
| 0
| 0.05291
| 0.005291
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e74b716e9b1f25a4a1435df6e02ddd268ce738b3
| 7,316
|
py
|
Python
|
tests/test_json_fields.py
|
trevorbox/prom2teams
|
512c53aabdea8b7858fbe5899ac96e392be1ea8e
|
[
"Apache-2.0"
] | 180
|
2017-09-04T21:07:00.000Z
|
2022-03-10T11:05:02.000Z
|
tests/test_json_fields.py
|
trevorbox/prom2teams
|
512c53aabdea8b7858fbe5899ac96e392be1ea8e
|
[
"Apache-2.0"
] | 162
|
2017-08-24T08:54:33.000Z
|
2022-03-26T20:08:04.000Z
|
tests/test_json_fields.py
|
trevorbox/prom2teams
|
512c53aabdea8b7858fbe5899ac96e392be1ea8e
|
[
"Apache-2.0"
] | 75
|
2017-11-08T11:04:31.000Z
|
2022-03-04T12:34:37.000Z
|
import unittest
import os
import json
from prom2teams.teams.alert_mapper import map_prom_alerts_to_teams_alerts
from prom2teams.prometheus.message_schema import MessageSchema
from prom2teams.app.sender import AlertSender
from deepdiff import DeepDiff
class TestJSONFields(unittest.TestCase):
TEST_CONFIG_FILES_PATH = './tests/data/json_files/'
def test_json_with_all_fields(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'all_ok.json')) as json_data:
json_received = json.load(json_data)
alerts = MessageSchema().load(json_received)
alert = map_prom_alerts_to_teams_alerts(alerts)[0]
self.assertNotIn('unknown', str(alert))
def test_json_without_mandatory_field(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'without_mandatory_field.json')) as json_data:
json_received = json.load(json_data)
alerts = MessageSchema().load(json_received)
alert = map_prom_alerts_to_teams_alerts(alerts)[0]
self.assertIn('unknown', str(alert))
def test_json_without_optional_field(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'without_optional_field.json')) as json_data:
json_received = json.load(json_data)
alerts = MessageSchema().load(json_received)
alert = map_prom_alerts_to_teams_alerts(alerts)[0]
self.assertIn("'description': 'unknown'", str(alert))
def test_json_without_instance_field(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'without_instance_field.json')) as json_data:
json_received = json.load(json_data)
alerts = MessageSchema().load(json_received)
alert = map_prom_alerts_to_teams_alerts(alerts)[0]
self.assertEqual('unknown', str(alert['instance']))
def test_fingerprint(self):
with open(self.TEST_CONFIG_FILES_PATH + 'all_ok.json') as json_data:
json_received = json.load(json_data)
alerts = MessageSchema().load(json_received)
alert = map_prom_alerts_to_teams_alerts(alerts)[0]
self.assertEqual('dd19ae3d4e06ac55', str(alert['fingerprint']))
def test_without_fingerprint(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'without_fingerprint.json')) as json_data:
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'teams_without_fingerprint.json')) as expected_data:
json_received = json.load(json_data)
json_expected = json.load(expected_data)
alerts = MessageSchema().load(json_received)
rendered_data = AlertSender()._create_alerts(alerts)[0]
json_rendered = json.loads(rendered_data)
self.assertEqual(json_rendered.keys(), json_expected.keys())
def test_compose_all(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'all_ok.json')) as json_data:
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'teams_alert_all_ok.json')) as expected_data:
json_received = json.load(json_data)
json_expected = json.load(expected_data)
alerts = MessageSchema().load(json_received)
rendered_data = AlertSender()._create_alerts(alerts)[0]
json_rendered = json.loads(rendered_data)
diff = DeepDiff(json_rendered, json_expected, ignore_order=True)
self.assertTrue(not diff)
def test_with_common_items(self):
self.maxDiff = None
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'with_common_items.json')) as json_data:
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'teams_alert_with_common_items.json')) as expected_data:
json_received = json.load(json_data)
json_expected = json.load(expected_data)
alerts = MessageSchema().load(json_received)
rendered_data = AlertSender()._create_alerts(alerts)[0]
json_rendered = json.loads(rendered_data)
self.assertEqual(json_rendered.keys(), json_expected.keys())
def test_grouping_multiple_alerts(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'all_ok_multiple.json')) as json_data:
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'teams_alert_all_ok_multiple.json')) as expected_data:
json_received = json.load(json_data)
json_expected = json.load(expected_data)
alerts = MessageSchema().load(json_received)
rendered_data = AlertSender(group_alerts_by='name')._create_alerts(alerts)[0].replace("\n\n\n", " ")
json_rendered = json.loads(rendered_data)
diff = DeepDiff(json_rendered, json_expected, ignore_order=True)
self.assertTrue(not diff)
def test_with_extra_labels(self):
excluded_labels = ('pod_name', )
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'all_ok_extra_labels.json')) as json_data:
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'teams_alert_all_ok_extra_labels.json')) as expected_data:
json_received = json.load(json_data)
json_expected = json.load(expected_data)
alerts = MessageSchema(exclude_fields=excluded_labels).load(json_received)
rendered_data = AlertSender()._create_alerts(alerts)[0]
json_rendered = json.loads(rendered_data)
diff = DeepDiff(json_rendered, json_expected, ignore_order=True)
self.assertTrue(not diff)
def test_with_extra_annotations(self):
excluded_annotations = ('message', )
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'all_ok_extra_annotations.json')) as json_data:
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'teams_alert_all_ok_extra_annotations.json')) as expected_data:
json_received = json.load(json_data)
json_expected = json.load(expected_data)
alerts = MessageSchema(exclude_annotations=excluded_annotations).load(json_received)
rendered_data = AlertSender()._create_alerts(alerts)[0]
json_rendered = json.loads(rendered_data)
diff = DeepDiff(json_rendered, json_expected, ignore_order=True)
self.assertTrue(not diff)
def test_with_too_long_payload(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'all_ok_multiple.json')) as json_data:
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'teams_alert_all_ok_splitted.json')) as expected_data:
json_received = json.load(json_data)
json_expected = json.load(expected_data)
alerts = MessageSchema().load(json_received)
rendered_data = '[' + ','.join([a.replace("\n\n\n", " ") for a in AlertSender(group_alerts_by='name', teams_client_config={'MAX_PAYLOAD': 800})._create_alerts(alerts)]) + ']'
json_rendered = json.loads(rendered_data)
diff = DeepDiff(json_rendered, json_expected, ignore_order=True)
self.assertTrue(not diff)
if __name__ == '__main__':
unittest.main()
| 51.160839
| 190
| 0.674959
| 919
| 7,316
| 5.020675
| 0.104461
| 0.041612
| 0.06502
| 0.082358
| 0.82098
| 0.802991
| 0.789987
| 0.768531
| 0.768531
| 0.768531
| 0
| 0.004414
| 0.225806
| 7,316
| 142
| 191
| 51.521127
| 0.810205
| 0
| 0
| 0.553571
| 0
| 0
| 0.088163
| 0.059185
| 0.008929
| 0
| 0
| 0
| 0.107143
| 1
| 0.107143
| false
| 0
| 0.0625
| 0
| 0.1875
| 0.044643
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e75ce0bb5d5d5c1a35846cd6584d38765070db04
| 29
|
py
|
Python
|
py_01.py
|
fosterlee/robrebase
|
b05d5c536a88c592f19d358b976ba1a9f5717fc5
|
[
"MIT"
] | null | null | null |
py_01.py
|
fosterlee/robrebase
|
b05d5c536a88c592f19d358b976ba1a9f5717fc5
|
[
"MIT"
] | 3
|
2021-03-13T18:20:43.000Z
|
2021-03-14T20:17:06.000Z
|
py_01.py
|
fosterlee/robrebase
|
b05d5c536a88c592f19d358b976ba1a9f5717fc5
|
[
"MIT"
] | null | null | null |
print(f"Hi, I'm py_01.py!")
| 9.666667
| 27
| 0.586207
| 8
| 29
| 2
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0.137931
| 29
| 2
| 28
| 14.5
| 0.56
| 0
| 0
| 0
| 0
| 0
| 0.607143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
e785dfc5b3ef5361ff72a11cef56454f3403e3bd
| 171
|
py
|
Python
|
wonya/admin.py
|
BuildForSDG/team-271-backend
|
db2bd8eb5f4d9f46bd6baff05e0e705aba883a83
|
[
"MIT"
] | 1
|
2020-08-20T01:24:46.000Z
|
2020-08-20T01:24:46.000Z
|
wonya/admin.py
|
BuildForSDG/team-271-backend
|
db2bd8eb5f4d9f46bd6baff05e0e705aba883a83
|
[
"MIT"
] | 12
|
2020-05-13T04:40:32.000Z
|
2022-03-12T00:39:09.000Z
|
wonya/admin.py
|
BuildForSDG/team-271-backend
|
db2bd8eb5f4d9f46bd6baff05e0e705aba883a83
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
admin.site.site_header = "Wonya Admin"
admin.site.site_title = "Wonya Admin Area"
admin.site.index_title = "Welcome to Wonya admin area"
| 28.5
| 54
| 0.783626
| 27
| 171
| 4.851852
| 0.481481
| 0.206107
| 0.21374
| 0.274809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122807
| 171
| 5
| 55
| 34.2
| 0.873333
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e7990fa9255a40c41070da86bccde11f51478c80
| 64
|
py
|
Python
|
waveform_analysis/weighting_filters/__init__.py
|
pirun/waveform_analysis
|
66809614b1fc985e694af1720341035316a5ac8e
|
[
"MIT"
] | 125
|
2017-08-27T01:48:02.000Z
|
2022-01-20T10:47:13.000Z
|
waveform_analysis/weighting_filters/__init__.py
|
pirun/waveform_analysis
|
66809614b1fc985e694af1720341035316a5ac8e
|
[
"MIT"
] | 13
|
2017-06-25T14:57:43.000Z
|
2022-03-18T19:54:19.000Z
|
waveform_analysis/weighting_filters/__init__.py
|
pirun/waveform_analysis
|
66809614b1fc985e694af1720341035316a5ac8e
|
[
"MIT"
] | 48
|
2017-06-25T10:42:10.000Z
|
2022-03-09T18:13:55.000Z
|
from .ABC_weighting import *
from .ITU_R_468_weighting import *
| 21.333333
| 34
| 0.8125
| 10
| 64
| 4.8
| 0.7
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053571
| 0.125
| 64
| 2
| 35
| 32
| 0.803571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
99c0fe51ba8b84508d7f28745233a4ba8b2f22a2
| 2,589
|
py
|
Python
|
bdc_collectors/scihub/parser.py
|
raphaelrpl/bdc-collectors
|
3eb4f1b8bee26aeca1df6ae20a232d13ece60bb7
|
[
"MIT"
] | 4
|
2021-01-21T21:40:10.000Z
|
2022-01-14T18:42:07.000Z
|
bdc_collectors/scihub/parser.py
|
raphaelrpl/bdc-collectors
|
3eb4f1b8bee26aeca1df6ae20a232d13ece60bb7
|
[
"MIT"
] | 14
|
2021-02-07T01:45:32.000Z
|
2022-03-25T14:16:41.000Z
|
bdc_collectors/scihub/parser.py
|
raphaelrpl/bdc-collectors
|
3eb4f1b8bee26aeca1df6ae20a232d13ece60bb7
|
[
"MIT"
] | 2
|
2021-02-07T00:53:14.000Z
|
2021-02-13T02:54:45.000Z
|
#
# This file is part of BDC-Collectors.
# Copyright (C) 2020 INPE.
#
# BDC-Collectors is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Defines the base structure of SciHub api."""
from datetime import datetime
from typing import List
from ..base import SceneParser
class Sentinel2Scene(SceneParser):
"""Define the parser of Sentinel Scene identifiers."""
fragments: List[str]
def __init__(self, scene_id: str):
"""Create the parser Sentinel2Scene."""
super().__init__(scene_id)
fragments = scene_id.split('_')
if len(fragments) != 7 or fragments[0] not in ('S2A', 'S2B'):
raise RuntimeError(f'Invalid sentinel scene {scene_id}')
self.fragments = fragments
def tile_id(self):
"""Retrieve the tile id value."""
return self.fragments[5][1:]
def sensing_date(self):
"""Retrieve the scene sensing date."""
return datetime.strptime(self.fragments[2], '%Y%m%dT%H%M%S')
def processing_date(self):
"""Retrieve the scene processing date."""
return datetime.strptime(self.fragments[-1], '%Y%m%dT%H%M%S')
def satellite(self):
"""Retrieve the Sentinel satellite - 2A/2B."""
part = self.fragments[0]
return part[-2:]
def source(self):
"""Retrieve the scene first parameter (S2A/S2B)."""
return self.fragments[0]
class Sentinel1Scene(SceneParser):
"""Define the parser of Sentinel 1 Scene identifiers."""
fragments: List[str]
def __init__(self, scene_id: str):
"""Create the parser SentinelScene."""
super().__init__(scene_id)
fragments = scene_id.split('_')
if len(fragments) != 9 or fragments[0] not in ('S1A', 'S1B'):
raise RuntimeError(f'Invalid sentinel scene {scene_id}')
self.fragments = fragments
def tile_id(self):
"""Retrieve the tile id value."""
return self.fragments[6]
def sensing_date(self):
"""Retrieve the scene sensing date."""
return datetime.strptime(self.fragments[4], '%Y%m%dT%H%M%S')
def processing_date(self):
"""Retrieve the scene processing date."""
return datetime.strptime(self.fragments[5], '%Y%m%dT%H%M%S')
def satellite(self):
"""Retrieve the Sentinel satellite - 2A/2B."""
part = self.fragments[0]
return part[-2:]
def source(self):
"""Retrieve the scene first parameter (S2A/S2B)."""
return self.fragments[0]
| 27.83871
| 75
| 0.627655
| 334
| 2,589
| 4.769461
| 0.290419
| 0.097928
| 0.094162
| 0.07533
| 0.77715
| 0.755807
| 0.710609
| 0.710609
| 0.710609
| 0.710609
| 0
| 0.018791
| 0.239475
| 2,589
| 92
| 76
| 28.141304
| 0.790249
| 0.30282
| 0
| 0.682927
| 0
| 0
| 0.076611
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.292683
| false
| 0
| 0.073171
| 0
| 0.707317
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
99db8f88ea33145ea5fc56366dae8a627c45d429
| 135
|
py
|
Python
|
scripts/npc/autogen_DestinyWormhole_First.py
|
hsienjan/SideQuest-Server
|
3e88debaf45615b759d999255908f99a15283695
|
[
"MIT"
] | null | null | null |
scripts/npc/autogen_DestinyWormhole_First.py
|
hsienjan/SideQuest-Server
|
3e88debaf45615b759d999255908f99a15283695
|
[
"MIT"
] | null | null | null |
scripts/npc/autogen_DestinyWormhole_First.py
|
hsienjan/SideQuest-Server
|
3e88debaf45615b759d999255908f99a15283695
|
[
"MIT"
] | null | null | null |
# Character field ID when accessed: 820000000
# ObjectID: 1000010
# ParentID: 9201391
# Object Position Y: 48
# Object Position X: 134
| 22.5
| 45
| 0.755556
| 18
| 135
| 5.666667
| 0.888889
| 0.27451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.17037
| 135
| 5
| 46
| 27
| 0.660714
| 0.918519
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
99f2e33f1c43c678a091d5b01f3e9bf523948ceb
| 21
|
py
|
Python
|
qmatch/commands/__init__.py
|
davislf2/qmatch-cli
|
ba97b3cf2bbc225efe96c8e4687103448e3b2835
|
[
"MIT"
] | null | null | null |
qmatch/commands/__init__.py
|
davislf2/qmatch-cli
|
ba97b3cf2bbc225efe96c8e4687103448e3b2835
|
[
"MIT"
] | null | null | null |
qmatch/commands/__init__.py
|
davislf2/qmatch-cli
|
ba97b3cf2bbc225efe96c8e4687103448e3b2835
|
[
"MIT"
] | null | null | null |
from .match import *
| 21
| 21
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
820c45ca18741ed33628195fcd7cf95b3540b713
| 196
|
py
|
Python
|
terse/test/test_invoke.py
|
cjlucas85/terse
|
f94483c677850d8da6a154079ee6b2feefd3c739
|
[
"BSD-3-Clause"
] | 1
|
2018-07-23T23:47:25.000Z
|
2018-07-23T23:47:25.000Z
|
terse/test/test_invoke.py
|
cjlucas85/terse
|
f94483c677850d8da6a154079ee6b2feefd3c739
|
[
"BSD-3-Clause"
] | null | null | null |
terse/test/test_invoke.py
|
cjlucas85/terse
|
f94483c677850d8da6a154079ee6b2feefd3c739
|
[
"BSD-3-Clause"
] | null | null | null |
from terse import main
from .test_invoke_helper import FILENAME
from .test_invoke_helper import main_impl
import os
def test_helper_did_not_create_file():
assert not os.path.exists(FILENAME)
| 24.5
| 41
| 0.831633
| 32
| 196
| 4.78125
| 0.5625
| 0.130719
| 0.183007
| 0.261438
| 0.339869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 196
| 7
| 42
| 28
| 0.889535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.166667
| true
| 0
| 0.666667
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8240d30fdbec4d38935b4abd617e5ba39fd0f7b1
| 121
|
py
|
Python
|
project_tools/tests/data_processing/test_pipelines.py
|
NRCan/Geoscience_Language_Models
|
a1dcdaae4aac0e8cee9f864e6246ba615b7f68c8
|
[
"MIT"
] | null | null | null |
project_tools/tests/data_processing/test_pipelines.py
|
NRCan/Geoscience_Language_Models
|
a1dcdaae4aac0e8cee9f864e6246ba615b7f68c8
|
[
"MIT"
] | null | null | null |
project_tools/tests/data_processing/test_pipelines.py
|
NRCan/Geoscience_Language_Models
|
a1dcdaae4aac0e8cee9f864e6246ba615b7f68c8
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2021 ServiceNow, Inc.
import nrcan_p2.data_processing.pipelines
def test_can_import():
assert(True)
| 20.166667
| 41
| 0.768595
| 17
| 121
| 5.235294
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0.132231
| 121
| 6
| 42
| 20.166667
| 0.8
| 0.289256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.666667
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8246fe303f74a61bfc0f7b5dfe898b240aa83b0b
| 214
|
py
|
Python
|
inversion/__init__.py
|
yohan-pg/stylegan2-ada-pytorch
|
e1225b08d55ff5ca38e1646fa430d3c3c3bb3c68
|
[
"BSD-Source-Code"
] | null | null | null |
inversion/__init__.py
|
yohan-pg/stylegan2-ada-pytorch
|
e1225b08d55ff5ca38e1646fa430d3c3c3bb3c68
|
[
"BSD-Source-Code"
] | null | null | null |
inversion/__init__.py
|
yohan-pg/stylegan2-ada-pytorch
|
e1225b08d55ff5ca38e1646fa430d3c3c3bb3c68
|
[
"BSD-Source-Code"
] | null | null | null |
from .prelude import *
from .criterions import *
from .variables import *
from .jittering import *
from .optimizer import *
from .io import *
from .interpolator import *
from .inverter import *
from .util import *
| 21.4
| 27
| 0.747664
| 27
| 214
| 5.925926
| 0.407407
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168224
| 214
| 9
| 28
| 23.777778
| 0.898876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
413edf4c8be0e5fea1e4a8e313013e016141705e
| 192
|
py
|
Python
|
src/event_stream/event.py
|
hvuhsg/yoyocoin
|
aad0f413479728dc4e0842447cf1910e5dff1418
|
[
"MIT"
] | 11
|
2021-05-25T07:42:27.000Z
|
2022-01-03T07:46:38.000Z
|
src/event_stream/event.py
|
hvuhsg/yoyocoin
|
aad0f413479728dc4e0842447cf1910e5dff1418
|
[
"MIT"
] | 18
|
2021-05-25T17:42:46.000Z
|
2021-09-13T15:14:38.000Z
|
src/event_stream/event.py
|
hvuhsg/yoyocoin
|
aad0f413479728dc4e0842447cf1910e5dff1418
|
[
"MIT"
] | 5
|
2021-06-23T17:38:51.000Z
|
2022-03-03T12:40:53.000Z
|
class Event:
def __init__(self, name, **kwargs):
self.name = name
self.args = kwargs
def __str__(self):
return f"Event(name='{self.name}', args={self.args})"
| 21.333333
| 61
| 0.578125
| 25
| 192
| 4.12
| 0.44
| 0.23301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.265625
| 192
| 8
| 62
| 24
| 0.730496
| 0
| 0
| 0
| 0
| 0
| 0.225131
| 0.13089
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
4154085cd0ac9a3e05cbf6bb1c722e3d9b649fa8
| 27
|
py
|
Python
|
webapp/__init__.py
|
georg3tom/matching-handwritten-document-images
|
a86d197b0f84bf6c0733bc75d5aff3d1b2263d60
|
[
"MIT"
] | 1
|
2020-12-24T07:13:39.000Z
|
2020-12-24T07:13:39.000Z
|
webapp/__init__.py
|
georg3tom/matching-handwritten-document-images
|
a86d197b0f84bf6c0733bc75d5aff3d1b2263d60
|
[
"MIT"
] | null | null | null |
webapp/__init__.py
|
georg3tom/matching-handwritten-document-images
|
a86d197b0f84bf6c0733bc75d5aff3d1b2263d60
|
[
"MIT"
] | null | null | null |
from webapp.app import app
| 13.5
| 26
| 0.814815
| 5
| 27
| 4.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4161a8be5aa5f7dd13c62162e5a69f945d18d565
| 20,365
|
py
|
Python
|
figure_generation/figure_3.py
|
calico/stimulated_emission_imaging
|
dca60d2188cfb79527537496c5473ecf80c4bf22
|
[
"CC-BY-4.0"
] | 1
|
2020-02-14T13:33:46.000Z
|
2020-02-14T13:33:46.000Z
|
figure_generation/figure_3.py
|
calico/stimulated_emission_imaging
|
dca60d2188cfb79527537496c5473ecf80c4bf22
|
[
"CC-BY-4.0"
] | 1
|
2020-02-20T19:16:47.000Z
|
2020-02-20T19:16:47.000Z
|
figure_generation/figure_3.py
|
calico/stimulated_emission_imaging
|
dca60d2188cfb79527537496c5473ecf80c4bf22
|
[
"CC-BY-4.0"
] | 3
|
2020-02-13T00:32:42.000Z
|
2020-02-19T22:16:17.000Z
|
import os
import numpy as np
from scipy.ndimage import gaussian_filter
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import np_tif
from stack_registration import bucket
def main():
assert os.path.isdir('./../images')
if not os.path.isdir('./../images/figure_3'):
os.mkdir('./../images/figure_3')
#####################################################################
# meltmount mix data
data = np_tif.tif_to_array(
'./../../stimulated_emission_imaging-data' +
'/2018_02_23_STE_phase_cr_bead_4' +
'/dataset_green_1010mW_single_shot.tif').astype(np.float64)
# get rid of overexposed rows at top and bottom of images
less_rows = 3
data = data[:, 0+less_rows:data.shape[1]-less_rows, :]
data = data[:, ::-1, :] # flip up down
# reshape to hyperstack
num_delays = 3
data = data.reshape((
data.shape[0]/num_delays,# phase plate angle number
num_delays,
data.shape[1],
data.shape[2],
))
# Get the average pixel brightness in the background region of the
# meltmount mix data. We'll use it to account for laser intensity
# fluctuations
avg_laser_brightness = get_bg_level(data.mean(axis=(0, 1)))
# scale all images to have the same background brightness. This
# amounts to a correction of roughly 1% or less
local_laser_brightness = get_bg_level(data)
data = data * (avg_laser_brightness / local_laser_brightness).reshape(
data.shape[0], data.shape[1], 1, 1)
# get zero delay images, max delay images and phase contrast images
zero_delay_images = data[:, 1, :, :] # zero red/green delay
max_delay_images = data[
:, 0:3:2, :, :].mean(axis=1) # average max and min delay
phase_contrast_images = data[:, 0, :, :] # red before green (min delay)
# from the image where red/green are simultaneous, subtract the
# average of the max and min delay images
STE_stack = zero_delay_images - max_delay_images
# phase contrast image (no STE) stack: there is a large background
# variation that has nothing to do with the sample; it's due to
# multiple reflections in the microscope. Some of it moves when you
# move the phase plate, and some of it doesn't. This step subtracts
# off the stationary component. For each image we use in the figure,
# we subtract the minimum contrast image with the closest phase plate angle.
# minimum contrast phase plate angle closest to first 7 phase plate angles:
min_contrast_index_1 = 5
# minimum contrast phase plate angle closest to last 7 phase plate angles:
min_contrast_index_2 = 11
phase_stack = phase_contrast_images
phase_stack[0:8, ...] = phase_stack[0:8, ...] - phase_contrast_images[
min_contrast_index_1:min_contrast_index_1 + 1, :, :]
phase_stack[8:15, ...] = phase_stack[8:15, ...] - phase_contrast_images[
min_contrast_index_2:min_contrast_index_2 + 1, :, :]
# Luckily the non-stationary component is comprised of stripes that
# are completely outside of the microscope's spatial pass-band. The
# smoothing step below strongly attenuates this striping artifact
# with almost no effect on spatial frequencies due to the sample.
sigma = 9 # tune this parameter to reject high spatial frequencies
STE_stack = gaussian_filter(STE_stack, sigma=(0, sigma, sigma))
phase_stack = gaussian_filter(phase_stack, sigma=(0, sigma, sigma))
# crop images to center bead and fit into figure
top = 0
bot = 122
left = 109
right = 361
phase_cropped = phase_stack[:, top:bot, left:right]
STE_cropped = STE_stack[:, top:bot, left:right]
# Our pixels are tiny (8.7 nm/pixel) to give large dynamic range.
# This is not great for viewing, because fluctuations can swamp the
# signal. This step bins the pixels into a more typical size.
bucket_width = 8 # bucket width in pixels
phase_cropped = bucket(
phase_cropped, (1, bucket_width, bucket_width)) / bucket_width**2
STE_cropped = bucket(
STE_cropped, (1, bucket_width, bucket_width)) / bucket_width**2
# display images from the two phase plate angles that maximize bead
# contrast (+/- contrast)
zero_phase_angle = 8
pi_phase_angle = 0
n_mix_zero_phase_bead_image = phase_cropped[zero_phase_angle, :, :]
n_mix_pi_phase_bead_image = phase_cropped[pi_phase_angle, :, :]
n_mix_zero_phase_STE_image = STE_cropped[zero_phase_angle, :, :]
n_mix_pi_phase_STE_image = STE_cropped[pi_phase_angle, :, :]
#####################################################################
#####################################################################
# meltmount n = 1.54 data
data = np_tif.tif_to_array(
'./../../stimulated_emission_imaging-data' +
'/2018_02_27_STE_phase_n_1_54_cr_bead_0' +
'/dataset_green_970mW_single_shot.tif').astype(np.float64)
# get rid of overexposed rows at top and bottom of images
data = data[:, 0+less_rows:data.shape[1]-less_rows, :]
# reshape to hyperstack
data = data.reshape((
data.shape[0]/num_delays,# phase plate angle number
num_delays,
data.shape[1],
data.shape[2],
))
# scale all images to have the same background brightness. This
# amounts to a correction of roughly 1% or less
local_laser_brightness = get_bg_level(data)
data = data * (avg_laser_brightness / local_laser_brightness).reshape(
data.shape[0], data.shape[1], 1, 1)
# get zero delay images, max delay images and phase contrast images
zero_delay_images = data[:, 1, :, :] # zero red/green delay
max_delay_images = data[
:, 0:3:2, :, :].mean(axis=1) # average max and min delay
phase_contrast_images = data[:, 0, :, :] # red before green (min delay)
# from the image where red/green are simultaneous, subtract the
# average of the max and min delay images
STE_stack = zero_delay_images - max_delay_images
# phase contrast image (no STE) stack: there is a large background
# variation that has nothing to do with the sample; it's due to
# multiple reflections in the microscope. Some of it moves when you
# move the phase plate, and some of it doesn't. This step subtracts
# off the stationary component. For each image we use in the figure,
# we subtract the minimum contrast image with the closest phase plate angle.
# minimum contrast phase plate angle closest to first 7 phase plate angles:
min_contrast_index_1 = 5
# minimum contrast phase plate angle closest to last 7 phase plate angles:
min_contrast_index_2 = 11
phase_stack = phase_contrast_images
phase_stack[0:8, ...] = phase_stack[0:8, ...] - phase_contrast_images[
min_contrast_index_1:min_contrast_index_1 + 1, :, :]
phase_stack[8:15, ...] = phase_stack[8:15, ...] - phase_contrast_images[
min_contrast_index_2:min_contrast_index_2 + 1, :, :]
# Luckily the non-stationary component is comprised of stripes that
# are completely outside of the microscope's spatial pass-band. The
# smoothing step below strongly attenuates this striping artifact
# with almost no effect on spatial frequencies due to the sample.
STE_stack = gaussian_filter(STE_stack, sigma=(0, sigma, sigma))
phase_stack = gaussian_filter(phase_stack, sigma=(0, sigma, sigma))
# crop images to center bead and fit into figure
top = 0
bot = 122
left = 44
right = 296
phase_cropped = phase_stack[:,top:bot,left:right]
STE_cropped = STE_stack[:,top:bot,left:right]
# Our pixels are tiny (8.7 nm/pixel) to give large dynamic range.
# This is not great for viewing, because fluctuations can swamp the
# signal. This step bins the pixels into a more typical size.
phase_cropped = bucket(
phase_cropped, (1, bucket_width, bucket_width)) / bucket_width**2
STE_cropped = bucket(
STE_cropped, (1, bucket_width, bucket_width)) / bucket_width**2
# display images from the two phase plate angles that maximize bead
# contrast (+/- contrast)
zero_phase_angle = 8
pi_phase_angle = 13
n_1_53_zero_phase_bead_image = phase_cropped[zero_phase_angle, :, :]
n_1_53_pi_phase_bead_image = phase_cropped[pi_phase_angle, :, :]
n_1_53_zero_phase_STE_image = STE_cropped[zero_phase_angle, :, :]
n_1_53_pi_phase_STE_image = STE_cropped[pi_phase_angle, :, :]
#####################################################################
#####################################################################
# meltmount n = 1.61 data
data = np_tif.tif_to_array(
'./../../stimulated_emission_imaging-data' +
'/2018_02_26_STE_phase_n_1_61_cr_bead_0' +
'/dataset_green_1060mW_single_shot.tif').astype(np.float64)
# get rid of overexposed rows at top and bottom of images
data = data[:, 0+less_rows:data.shape[1]-less_rows, :]
data = data[:, ::-1, :] # flip up down
# reshape to hyperstack
data = data.reshape((
data.shape[0]/num_delays,# phase plate angle number
num_delays,
data.shape[1],
data.shape[2],
))
# scale all images to have the same background brightness. This
# amounts to a correction of roughly 1% or less
local_laser_brightness = get_bg_level(data)
data = data * (avg_laser_brightness / local_laser_brightness).reshape(
data.shape[0], data.shape[1], 1, 1)
# get zero delay images, max delay images and phase contrast images
zero_delay_images = data[:, 1, :, :] # zero red/green delay
max_delay_images = data[
:, 0:3:2, :, :].mean(axis=1) # average max and min delay
phase_contrast_images = data[:, 0, :, :] # red before green (min delay)
# from the image where red/green are simultaneous, subtract the
# average of the max and min delay images
STE_stack = zero_delay_images - max_delay_images
# phase contrast image (no STE) stack: there is a large background
# variation that has nothing to do with the sample; it's due to
# multiple reflections in the microscope. Some of it moves when you
# move the phase plate, and some of it doesn't. This step subtracts
# off the stationary component. For each image we use in the figure,
# we subtract the minimum contrast image with the closest phase plate angle.
# minimum contrast phase plate angle closest to first 7 phase plate angles:
min_contrast_index_1 = 5
# minimum contrast phase plate angle closest to last 7 phase plate angles:
min_contrast_index_2 = 11
phase_stack = phase_contrast_images
phase_stack[0:8, ...] = phase_stack[0:8, ...] - phase_contrast_images[
min_contrast_index_1:min_contrast_index_1 + 1, :, :]
phase_stack[8:15, ...] = phase_stack[8:15, ...] - phase_contrast_images[
min_contrast_index_2:min_contrast_index_2 + 1, :, :]
# Luckily the non-stationary component is comprised of stripes that
# are completely outside of the microscope's spatial pass-band. The
# smoothing step below strongly attenuates this striping artifact
# with almost no effect on spatial frequencies due to the sample.
STE_stack = gaussian_filter(STE_stack, sigma=(0, sigma, sigma))
phase_stack = gaussian_filter(phase_stack, sigma=(0, sigma, sigma))
# crop images to center bead and fit into figure
top = 0
bot = 122
left = 59
right = 311
phase_cropped = phase_stack[:,top:bot,left:right]
STE_cropped = STE_stack[:,top:bot,left:right]
# Our pixels are tiny (8.7 nm/pixel) to give large dynamic range.
# This is not great for viewing, because fluctuations can swamp the
# signal. This step bins the pixels into a more typical size.
phase_cropped = bucket(
phase_cropped, (1, bucket_width, bucket_width)) / bucket_width**2
STE_cropped = bucket(
STE_cropped, (1, bucket_width, bucket_width)) / bucket_width**2
# display images from the two phase plate angles that maximize bead
# contrast (+/- contrast)
zero_phase_angle = 8
pi_phase_angle = 0
n_1_61_zero_phase_bead_image = phase_cropped[zero_phase_angle, :, :]
n_1_61_pi_phase_bead_image = phase_cropped[pi_phase_angle, :, :]
n_1_61_zero_phase_STE_image = STE_cropped[zero_phase_angle, :, :]
n_1_61_pi_phase_STE_image = STE_cropped[pi_phase_angle, :, :]
#####################################################################
#####################################################################
# start plotting all the images
# get max and min values to unify the colorbar
all_phase = np.concatenate((
n_mix_zero_phase_bead_image,
n_1_53_zero_phase_bead_image,
n_1_61_zero_phase_bead_image,
n_mix_pi_phase_bead_image,
n_1_53_pi_phase_bead_image,
n_1_61_pi_phase_bead_image), axis=0)
all_STE = np.concatenate((
n_mix_zero_phase_STE_image,
n_1_53_zero_phase_STE_image,
n_1_61_zero_phase_STE_image,
n_mix_pi_phase_STE_image,
n_1_53_pi_phase_STE_image,
n_1_61_pi_phase_STE_image), axis=0)
max_phase = int(np.amax(all_phase)) + 1
min_phase = int(np.amin(all_phase)) - 1
max_ste = int(np.amax(all_STE)) + 1
min_ste = int(np.amin(all_STE)) - 1
# make scale bar black to give lower limit on colorbar
bar_left = 1
bar_right = 6
bar_vert = -2
n_mix_zero_phase_bead_image[bar_vert, bar_left:bar_right] = min_phase
n_1_53_zero_phase_bead_image[bar_vert, bar_left:bar_right] = min_phase
n_1_61_zero_phase_bead_image[bar_vert, bar_left:bar_right] = min_phase
n_mix_pi_phase_bead_image[bar_vert, bar_left:bar_right] = min_phase
n_1_53_pi_phase_bead_image[bar_vert, bar_left:bar_right] = min_phase
n_1_61_pi_phase_bead_image[bar_vert, bar_left:bar_right] = min_phase
n_mix_zero_phase_STE_image[bar_vert, bar_left:bar_right] = min_ste
n_1_53_zero_phase_STE_image[bar_vert, bar_left:bar_right] = min_ste
n_1_61_zero_phase_STE_image[bar_vert, bar_left:bar_right] = min_ste
n_mix_pi_phase_STE_image[bar_vert, bar_left:bar_right] = min_ste
n_1_53_pi_phase_STE_image[bar_vert, bar_left:bar_right] = min_ste
n_1_61_pi_phase_STE_image[bar_vert, bar_left:bar_right] = min_ste
# create wider image comprised of three side-by-side images
# get width of wider image
num_angles, height, width = STE_cropped.shape
between_pics = int(16 / bucket_width)
big_width = width*3 + between_pics*2
# initialize wide phase contrast image and make "between color" white
between_color = max_phase # makes it white and gives upper limit on colorbar
zero_phase_bead_image = np.zeros((height,big_width)) + between_color
pi_phase_bead_image = np.zeros((height,big_width)) + between_color
# initialize wide STE image and make "between color" white
between_color = max_ste # makes it white and gives upper limit on colorbar
zero_phase_STE_image = np.zeros((height,big_width)) + between_color
pi_phase_STE_image = np.zeros((height,big_width)) + between_color
# n = 1.53 images on left side of wide image
left = 0
right = width
zero_phase_bead_image[:,left:right] = n_1_53_zero_phase_bead_image
pi_phase_bead_image[:,left:right] = n_1_53_pi_phase_bead_image
zero_phase_STE_image[:,left:right] = n_1_53_zero_phase_STE_image
pi_phase_STE_image[:,left:right] = n_1_53_pi_phase_STE_image
# n = 1.58/1.61 mix images in center of wide image
left = width + between_pics
right = width*2 + between_pics
zero_phase_bead_image[:,left:right] = n_mix_zero_phase_bead_image
pi_phase_bead_image[:,left:right] = n_mix_pi_phase_bead_image
zero_phase_STE_image[:,left:right] = n_mix_zero_phase_STE_image
pi_phase_STE_image[:,left:right] = n_mix_pi_phase_STE_image
# n = 1.61 on right side of wide image
left = width*2 + between_pics*2
right = big_width
zero_phase_bead_image[:,left:right] = n_1_61_zero_phase_bead_image
pi_phase_bead_image[:,left:right] = n_1_61_pi_phase_bead_image
zero_phase_STE_image[:,left:right] = n_1_61_zero_phase_STE_image
pi_phase_STE_image[:,left:right] = n_1_61_pi_phase_STE_image
# generate and save plot
fig, (ax0, ax1) = plt.subplots(nrows=2,ncols=1,figsize=(20,7))
cax0 = ax0.imshow(pi_phase_bead_image, cmap=plt.cm.gray,
interpolation='nearest', vmax=2500, vmin=-4200)
ax0.axis('off')
divider = make_axes_locatable(ax0)
cax = divider.append_axes("right",size="1%",pad=0.25)
plt.colorbar(cax0, cax = cax)
ax0.set_title('Phase contrast image of scattered light from bead',fontsize=30)
ax0.text(
12, 14, r'$\Delta n\approx +0.05$',
fontsize=38, color='black', fontweight='bold')
ax0.text(
53, 14, r'$\Delta n\approx 0$',
fontsize=38, color='black', fontweight='bold')
ax0.text(
79, 14, r'$\Delta n\approx -0.01$',
fontsize=38, color='black', fontweight='bold')
cax1 = ax1.imshow(pi_phase_STE_image, cmap=plt.cm.gray,
interpolation='nearest')
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right",size="1%",pad=0.25)
plt.colorbar(cax1, cax = cax)
ax1.text(
12, 14 ,r'$\Delta n\approx +0.05$',
fontsize=38, color='black', fontweight='bold')
ax1.text(
53, 14, r'$\Delta n\approx 0$',
fontsize=38, color='black', fontweight='bold')
ax1.text(
79, 14, r'$\Delta n\approx -0.01$',
fontsize=38, color='black', fontweight='bold')
ax1.set_title('Change due to excitation',fontsize=30,)
ax1.axis('off')
plt.savefig('./../images/figure_3/STE_crimson_bead_pi_phase.svg',
bbox_inches='tight', pad_inches=0.1)
plt.show()
fig, (ax0, ax1) = plt.subplots(nrows=2,ncols=1,figsize=(20,7))
cax0 = ax0.imshow(zero_phase_bead_image, cmap=plt.cm.gray,
interpolation='nearest', vmin=-2300)
ax0.axis('off')
divider = make_axes_locatable(ax0)
cax = divider.append_axes("right",size="1%",pad=0.25)
plt.colorbar(cax0, cax = cax)
ax0.set_title('Phase contrast image of scattered light from bead',fontsize=30)
ax0.text(
12, 14, r'$\Delta n\approx +0.05$',
fontsize=38, color='white', fontweight='bold')
ax0.text(
53, 14, r'$\Delta n\approx 0$',
fontsize=38, color='white', fontweight='bold')
ax0.text(
79, 14, r'$\Delta n\approx -0.01$',
fontsize=38, color='white', fontweight='bold')
cax1 = ax1.imshow(zero_phase_STE_image, cmap=plt.cm.gray,
interpolation='nearest')
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right",size="1%",pad=0.25)
plt.colorbar(cax1, cax = cax)
ax1.text(
12, 14, r'$\Delta n\approx +0.05$',
fontsize=38, color='white', fontweight='bold')
ax1.text(
53, 14, r'$\Delta n\approx 0$',
fontsize=38, color='white', fontweight='bold')
ax1.text(
79, 14, r'$\Delta n\approx -0.01$',
fontsize=38, color='white', fontweight='bold')
ax1.set_title('Change due to excitation',fontsize=30)
ax1.axis('off')
plt.savefig('./../images/figure_3/STE_crimson_bead_zero_phase.svg',
bbox_inches='tight', pad_inches=0.1)
plt.show()
return None
def get_bg_level(data):
num_regions = 2
# region 1
bg_up = 2
bg_down = 120
bg_left = 285
bg_right = 379
bg_level = data[..., bg_up:bg_down, bg_left:bg_right].mean(axis=(-2, -1))
# region 2
bg_up = 2
bg_down = 120
bg_left = 1
bg_right = 81
bg_level += data[..., bg_up:bg_down, bg_left:bg_right].mean(axis=(-2, -1))
return(bg_level / num_regions)
main()
| 44.660088
| 83
| 0.654014
| 3,002
| 20,365
| 4.169554
| 0.110926
| 0.031637
| 0.038028
| 0.024447
| 0.893185
| 0.883039
| 0.872573
| 0.856515
| 0.848047
| 0.821283
| 0
| 0.037096
| 0.226958
| 20,365
| 455
| 84
| 44.758242
| 0.757988
| 0.279205
| 0
| 0.589226
| 0
| 0
| 0.079395
| 0.032213
| 0
| 0
| 0
| 0
| 0.003367
| 1
| 0.006734
| false
| 0
| 0.023569
| 0
| 0.03367
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4193dc3831f4a8fea32ce29d1e468b0d9080c6bf
| 8,779
|
py
|
Python
|
tests/test_operator.py
|
TierMobility/aws-auth-operator
|
3841e88b85a04a9dd0fb4ca088d163436442848f
|
[
"MIT"
] | 8
|
2020-11-17T16:04:13.000Z
|
2021-10-21T07:50:20.000Z
|
tests/test_operator.py
|
TierMobility/aws-auth-operator
|
3841e88b85a04a9dd0fb4ca088d163436442848f
|
[
"MIT"
] | 6
|
2021-04-30T21:07:56.000Z
|
2021-06-14T12:53:38.000Z
|
tests/test_operator.py
|
TierMobility/aws-auth-operator
|
3841e88b85a04a9dd0fb4ca088d163436442848f
|
[
"MIT"
] | null | null | null |
import aws_auth
import kubernetes
import yaml
import copy
import logging
import pytest
import kopf
from lib.mappings import UserType
DATA_DEFAULT = {
"arn": "arn:aws:iam::6666:role/test-role-0",
"username": "test-role-0",
"usertype": UserType.Role,
"groups": ["viewers"],
}
DATA_CREATE = {
"arn": "arn:aws:iam::6666:role/test-role-1",
"username": "test-role-1",
"usertype": UserType.Role,
"groups": ["viewers"],
}
DATA_UPDATE = {
"arn": "arn:aws:iam::6666:role/test-role-1",
"username": "test-role-1",
"usertype": UserType.Role,
"groups": ["viewers", "editors"],
}
DATA_NOT_CONTAINED = {
"arn": "arn:aws:iam::6666:role/test-role-2",
"username": "test-role-2",
"usertype": UserType.Role,
"groups": ["viewers", "editors"],
}
CM_DATA_1 = {
"rolearn": "arn:aws:iam::6666:role/test-role-1",
"username": "test-role-1",
"groups": ["viewers"],
}
CM_DATA_2 = {
"rolearn": "arn:aws:iam::6666:role/test-role-2",
"username": "test-role-2",
"groups": ["viewers", "editors"],
}
logger = logging.getLogger()
def test_run():
assert 1 == 1
def test_create(mocker):
mocker.patch("aws_auth.get_protected_mapping")
mocker.patch("aws_auth.get_config_map")
mocker.patch("aws_auth.write_config_map")
mocker.patch("aws_auth.write_last_handled_mapping")
aws_auth.get_protected_mapping.return_value = {
"spec": {"mappings": [DATA_NOT_CONTAINED]}
}
aws_auth.get_config_map.return_value = build_cm()
aws_auth.write_config_map.return_value = build_cm(extra_data=DATA_CREATE)
message = aws_auth.create_fn(
logger, spec={"mappings": [DATA_CREATE]}, meta={}, kwargs={}
)
assert "All good" == message["message"]
# asserts
aws_auth.get_config_map.assert_called_once()
aws_auth.write_config_map.assert_called_once()
aws_auth.get_protected_mapping.assert_called_once()
config_map, _ = aws_auth.write_config_map.call_args
assert isinstance(config_map[0], kubernetes.client.V1ConfigMap)
data = {
"mapRoles": yaml.dump(
rename_arn_keys([DATA_DEFAULT, DATA_CREATE]), default_flow_style=False
)
}
assert config_map[0].data == data
def test_delete(mocker):
mocker.patch("aws_auth.get_protected_mapping")
mocker.patch("aws_auth.get_config_map")
mocker.patch("aws_auth.write_config_map")
mocker.patch("aws_auth.write_last_handled_mapping")
aws_auth.get_config_map.return_value = build_cm(extra_data=DATA_CREATE)
aws_auth.write_config_map.return_value = build_cm()
message = aws_auth.delete_fn(
logger, spec={"mappings": [DATA_CREATE]}, meta={}, kwargs={}
)
assert "All good" == message["message"]
# asserts
aws_auth.get_config_map.assert_called_once()
aws_auth.write_config_map.assert_called_once()
config_map, _ = aws_auth.write_config_map.call_args
assert isinstance(config_map[0], kubernetes.client.V1ConfigMap)
data = {
"mapRoles": yaml.dump(rename_arn_keys([DATA_DEFAULT]), default_flow_style=False)
}
assert config_map[0].data == data
def test_update(mocker):
mocker.patch("aws_auth.get_protected_mapping")
mocker.patch("aws_auth.get_config_map")
mocker.patch("aws_auth.write_config_map")
mocker.patch("aws_auth.write_last_handled_mapping")
aws_auth.get_config_map.return_value = build_cm()
aws_auth.write_config_map.return_value = build_cm(default=DATA_UPDATE)
old = {"spec": {"mappings": [DATA_DEFAULT]}}
new = {"spec": {"mappings": [DATA_UPDATE]}}
message = aws_auth.update_fn(logger, old=old, new=new, spec={}, diff={}, kwargs={})
assert "All good" == message["message"]
# asserts
aws_auth.get_config_map.assert_called_once()
aws_auth.write_config_map.assert_called_once()
config_map, _ = aws_auth.write_config_map.call_args
assert isinstance(config_map[0], kubernetes.client.V1ConfigMap)
data = {
"mapRoles": yaml.dump(rename_arn_keys([DATA_UPDATE]), default_flow_style=False)
}
assert config_map[0].data == data
def test_create_failed(mocker):
with pytest.raises(kopf.PermanentError) as err:
mocker.patch("aws_auth.get_protected_mapping")
mocker.patch("aws_auth.get_config_map")
mocker.patch("aws_auth.write_config_map")
mocker.patch("aws_auth.write_last_handled_mapping")
aws_auth.get_config_map.return_value = build_cm()
aws_auth.write_config_map.return_value = build_cm(default={})
aws_auth.create_fn(logger, spec={"mappings": [DATA_CREATE]}, meta={}, kwargs={})
assert "Add Roles failed" in str(err)
def test_update_failed(mocker):
with pytest.raises(kopf.PermanentError) as err:
mocker.patch("aws_auth.get_protected_mapping")
mocker.patch("aws_auth.get_config_map")
mocker.patch("aws_auth.write_config_map")
mocker.patch("aws_auth.write_last_handled_mapping")
aws_auth.get_config_map.return_value = build_cm()
aws_auth.write_config_map.return_value = build_cm()
old = {"spec": {"mappings": [DATA_DEFAULT]}}
new = {"spec": {"mappings": [DATA_UPDATE]}}
aws_auth.update_fn(logger, old=old, new=new, spec={}, diff={}, kwargs={})
assert "Update Roles failed" in str(err)
def test_delete_failed(mocker):
with pytest.raises(kopf.PermanentError) as err:
mocker.patch("aws_auth.get_protected_mapping")
mocker.patch("aws_auth.get_config_map")
mocker.patch("aws_auth.write_config_map")
mocker.patch("aws_auth.write_last_handled_mapping")
aws_auth.get_config_map.return_value = build_cm(extra_data=DATA_CREATE)
aws_auth.write_config_map.return_value = build_cm(extra_data=DATA_CREATE)
aws_auth.delete_fn(logger, spec={"mappings": [DATA_CREATE]}, meta={}, kwargs={})
assert "Delete Roles failed" in str(err)
def test_create_invalid_spec():
message = aws_auth.create_fn(logger, spec={}, meta={}, kwargs={})
assert "invalid schema {}" == message["message"]
def test_update_invalid_spec():
old = {"spec": {"mappings": [DATA_DEFAULT]}}
new = {}
message = message = aws_auth.update_fn(
logger, old=old, new=new, spec={}, diff={}, kwargs={}
)
assert "invalid schema {}" == message["message"]
def test_delete_invalid_spec():
message = aws_auth.delete_fn(logger, spec={}, meta={}, kwargs={})
assert "invalid schema {}" == message["message"]
def test_startup(mocker):
settings = kopf.OperatorSettings()
mocker.patch("aws_auth.kopf.login_via_client")
mocker.patch("aws_auth.get_protected_mapping")
mocker.patch("aws_auth.get_config_map")
mocker.patch("aws_auth.write_protected_mapping")
aws_auth.get_protected_mapping.return_value = None
aws_auth.startup(logger, settings=settings)
aws_auth.get_protected_mapping.assert_called_once()
aws_auth.get_config_map.assert_called_once()
aws_auth.write_protected_mapping.assert_called_once()
def test_create_overwrite_protected_mapping(mocker):
mocker.patch("aws_auth.get_protected_mapping")
mocker.patch("aws_auth.get_config_map")
mocker.patch("aws_auth.write_config_map")
aws_auth.get_protected_mapping.return_value = {"spec": {"mappings": [DATA_CREATE]}}
aws_auth.get_config_map.return_value = build_cm()
aws_auth.write_config_map.return_value = build_cm(extra_data=DATA_CREATE)
message = aws_auth.create_fn(
logger, spec={"mappings": [DATA_CREATE]}, meta={}, kwargs={}
)
assert "overwriting protected mapping not possible" == message["message"]
# asserts
aws_auth.get_config_map.assert_not_called()
aws_auth.write_config_map.assert_not_called()
aws_auth.get_protected_mapping.assert_called_once()
def test_log_config_map_change(mocker):
mocker.patch("aws_auth.get_last_handled_mapping")
aws_auth.get_last_handled_mapping.return_value = {
"spec": {"mappings": [DATA_CREATE]}
}
aws_auth.log_config_map_change(logger, {"data": CM_DATA_2})
def build_cm(default=DATA_DEFAULT, extra_data=None):
data = [default]
if extra_data is not None:
data.append(extra_data)
cm = kubernetes.client.V1ConfigMap(
data={"mapRoles": yaml.dump(rename_arn_keys(data), default_flow_style=False)}
)
cm.metadata = kubernetes.client.V1ObjectMeta()
return cm
def rename_arn_keys(mappings):
result = []
if not mappings[0]:
return result
for mapping_orig in mappings:
mapping = copy.copy(mapping_orig)
if mapping["usertype"] == UserType.Role:
mapping["rolearn"] = mapping.pop("arn")
else:
mapping["userarn"] = mapping.pop("arn")
mapping.pop("usertype")
result.append(mapping)
return result
| 34.837302
| 88
| 0.697004
| 1,183
| 8,779
| 4.83601
| 0.094675
| 0.096661
| 0.062926
| 0.100682
| 0.821185
| 0.815766
| 0.782905
| 0.746023
| 0.705296
| 0.687992
| 0
| 0.00724
| 0.166192
| 8,779
| 251
| 89
| 34.976096
| 0.774317
| 0.003531
| 0
| 0.490291
| 0
| 0
| 0.20668
| 0.12673
| 0
| 0
| 0
| 0
| 0.145631
| 1
| 0.072816
| false
| 0
| 0.038835
| 0
| 0.126214
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.