hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7dfe28ce643b7b9dfdc6deb719836b6a27612627
| 1,116
|
py
|
Python
|
mmdet/core/loss/__init__.py
|
mmoghadam11/ReDet
|
917d370c827b65f0dc3618899290fd9288f30f64
|
[
"Apache-2.0"
] | 1
|
2021-08-04T11:30:08.000Z
|
2021-08-04T11:30:08.000Z
|
mmdet/core/loss/__init__.py
|
mmoghadam11/ReDet
|
917d370c827b65f0dc3618899290fd9288f30f64
|
[
"Apache-2.0"
] | null | null | null |
mmdet/core/loss/__init__.py
|
mmoghadam11/ReDet
|
917d370c827b65f0dc3618899290fd9288f30f64
|
[
"Apache-2.0"
] | null | null | null |
from .losses import (weighted_nll_loss, weighted_cross_entropy,
weighted_binary_cross_entropy, sigmoid_focal_loss,
py_sigmoid_focal_loss, weighted_sigmoid_focal_loss,
mask_cross_entropy, smooth_l1_loss, weighted_smoothl1,
balanced_l1_loss, weighted_balanced_l1_loss, iou_loss,
bounded_iou_loss, weighted_iou_loss, accuracy
####################################################
# BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss
)
__all__ = [
'weighted_nll_loss', 'weighted_cross_entropy',
'weighted_binary_cross_entropy', 'sigmoid_focal_loss',
'py_sigmoid_focal_loss', 'weighted_sigmoid_focal_loss',
'mask_cross_entropy', 'smooth_l1_loss', 'weighted_smoothl1',
'balanced_l1_loss', 'weighted_balanced_l1_loss', 'bounded_iou_loss',
'weighted_iou_loss', 'iou_loss', 'accuracy'
#############################################
# 'BoundedIoULoss', 'CIoULoss', 'DIoULoss', 'GIoULoss', 'IoULoss'
]
| 48.521739
| 76
| 0.590502
| 104
| 1,116
| 5.721154
| 0.25
| 0.201681
| 0.161345
| 0.077311
| 0.963025
| 0.963025
| 0.963025
| 0.870588
| 0.668908
| 0.668908
| 0
| 0.009524
| 0.247312
| 1,116
| 22
| 77
| 50.727273
| 0.69881
| 0.106631
| 0
| 0
| 0
| 0
| 0.311644
| 0.141553
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b41af3d3d722674d7784f8ea226aa920425c976f
| 516
|
py
|
Python
|
src/modules/Reshape.py
|
ychnlgy/TIMIT-diarization
|
1fbf410cbb643de60201d2d351f1654273885674
|
[
"MIT"
] | 1
|
2021-08-19T14:28:45.000Z
|
2021-08-19T14:28:45.000Z
|
src/modules/Reshape.py
|
ychnlgy/TIMIT-diarization
|
1fbf410cbb643de60201d2d351f1654273885674
|
[
"MIT"
] | null | null | null |
src/modules/Reshape.py
|
ychnlgy/TIMIT-diarization
|
1fbf410cbb643de60201d2d351f1654273885674
|
[
"MIT"
] | 1
|
2022-03-11T07:20:06.000Z
|
2022-03-11T07:20:06.000Z
|
import torch
class Reshape(torch.nn.Module):
def __init__(self, *size, contiguous=False):
super(Reshape, self).__init__()
if not contiguous:
self.make_contiguous = self._make_contiguous
self.size = size
def forward(self, X):
X = self.make_contiguous(X)
return X.view(len(X), *self.size)
# === PRIVATE ===
def make_contiguous(self, X):
return X.contiguous()
def _make_contiguous(self, X):
return X
| 22.434783
| 56
| 0.575581
| 61
| 516
| 4.622951
| 0.360656
| 0.248227
| 0.255319
| 0.198582
| 0.382979
| 0.205674
| 0.205674
| 0
| 0
| 0
| 0
| 0
| 0.312016
| 516
| 22
| 57
| 23.454545
| 0.794366
| 0.02907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.071429
| 0.142857
| 0.642857
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
b41cf710f9aa7f75b77954e884c6563c458d2a84
| 205
|
py
|
Python
|
speedexcourier/generalzone/admin.py
|
sunilkrbajpai/Django-Speedex-website
|
a5ad05354b74885ad29c2f6a60028c0b546df82e
|
[
"bzip2-1.0.6"
] | null | null | null |
speedexcourier/generalzone/admin.py
|
sunilkrbajpai/Django-Speedex-website
|
a5ad05354b74885ad29c2f6a60028c0b546df82e
|
[
"bzip2-1.0.6"
] | null | null | null |
speedexcourier/generalzone/admin.py
|
sunilkrbajpai/Django-Speedex-website
|
a5ad05354b74885ad29c2f6a60028c0b546df82e
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.contrib import admin
from .models import Enquiry,Complain,Career,LoginInfo
admin.site.register(Enquiry)
admin.site.register(Complain)
admin.site.register(Career)
admin.site.register(LoginInfo)
| 29.285714
| 53
| 0.839024
| 28
| 205
| 6.142857
| 0.428571
| 0.209302
| 0.395349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058537
| 205
| 7
| 54
| 29.285714
| 0.891192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b41d8d7e202c89cb60f3385f2084245425aa3c4b
| 4,461
|
py
|
Python
|
game/settings.py
|
mikeanthony321/PacMan_Learner
|
7eda0150e581870e66223f9c078087e3b0efd2c0
|
[
"MIT"
] | null | null | null |
game/settings.py
|
mikeanthony321/PacMan_Learner
|
7eda0150e581870e66223f9c078087e3b0efd2c0
|
[
"MIT"
] | 6
|
2021-02-04T00:24:01.000Z
|
2021-03-30T02:54:49.000Z
|
game/settings.py
|
mikeanthony321/PacMan_Learner
|
7eda0150e581870e66223f9c078087e3b0efd2c0
|
[
"MIT"
] | 2
|
2021-05-04T00:00:34.000Z
|
2021-07-28T19:11:28.000Z
|
import pygame
vec = pygame.math.Vector2
# interface settings
WIDTH, HEIGHT = 560, 715
PAD_TOP, PAD_BOT = 25, 50
GRID_PIXEL_H = HEIGHT - PAD_TOP - PAD_BOT
GRID_W, GRID_H = 28, 32
CELL_W, CELL_H = WIDTH // GRID_W, GRID_PIXEL_H // GRID_H
FPS = 60
SHOW_GRID = False
# db
HIGH_SCORE: int = open("db/hs.txt", "r").readline()
COIN_SCORE: int = 10
SUPERCOIN_SCORE: int = 20
# color settings
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GOLD = (255, 189, 51)
BLUE = (51, 199, 255)
YELLOW = (190, 190, 5)
RED = (255, 0, 0)
# font settings
TITLE_TEXT_SIZE = 16
TITLE_FONT = "res/barcade.ttf"
# player settings
PLAYER_START_POS = vec(6, 14)
PLAYER_RESPAWN_POS = vec(6, 14) # or (20, 14) |
PLAYER_DEATHS = open("db/deaths.txt", "r").readline()
# ghost settings
INKY_START_POS = vec(12, 11)
BLINKY_START_POS = vec(15, 11)
PINKY_START_POS = vec(12, 17)
CLYDE_START_POS = vec(15, 17)
GHOST_START_POS = (vec(12, 11), vec(15, 11), vec(12, 17), vec(15, 17))
# AI settings
MAX_IDLE_ALLOWANCE = 40
DECISION_FREQUENCY = 0.8
REPLAY_BATCH_SIZE = 10
REPLAY_MEMORY_SIZE = 10000
EPSILON_START = 1
EPSILON_END = 0.05
EPSILON_DECAY = 98
# Q-Value settings (careful when tweaking!)
# IDLE_HISTORY_LENGTH = 10
# Q_IDLE_PENALTY = 2
Q_LEVEL_PASSED = 100
Q_PELLET_FUNC = lambda a: ((a / 2) + 1)
Q_PELLET_PROXIMITY_FACTOR = 2
Q_GHOST_PROXIMITY_FACTOR = 3
# spritesheet settings
SPRITE_SIZE = 16
MOVE_RIGHT = vec(-1, 0)
MOVE_LEFT = vec(1, 0)
MOVE_UP = vec(0, -1)
MOVE_DOWN = vec(0, 1)
GHOST_RIGHT = 0
GHOST_LEFT = 2
GHOST_UP = 4
GHOST_DOWN = 6
BLINKY_SPRITE_POS = 2
PINKY_SPRITE_POS = 3
INKY_SPRITE_POS = 4
CLYDE_SPRITE_POS = 5
POWER_PELLET_TIMER = 300
GRID = [
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3],
[ 3, 1, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 1, 3],
[ 3, 1, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 1, 3],
[ 3, 1, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 1, 3],
[ 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3],
[ 3, 1, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 1, 3],
[ 3, 1, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 1, 3],
[ 3, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 1, 1, 3],
[ 3, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3],
[ 3, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3],
[ 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3],
[ 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 4, 4, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3],
[ 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 4, 4, 4, 4, 4, 4, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3],
[ 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 3, 4, 4, 4, 4, 4, 4, 3, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3],
[ 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 4, 4, 4, 4, 4, 4, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3],
[ 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3],
[ 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3],
[ 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3],
[ 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3],
[ 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3],
[ 3, 1, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 1, 3],
[ 3, 1, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 1, 3],
[ 3, 1, 1, 1, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, 3],
[ 3, 3, 3, 1, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 1, 3, 3, 3],
[ 3, 3, 3, 1, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 1, 3, 3, 3],
[ 3, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 1, 1, 3],
[ 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3],
[ 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3],
[ 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
]
| 39.477876
| 90
| 0.454382
| 1,160
| 4,461
| 1.67931
| 0.109483
| 0.447639
| 0.489733
| 0.533881
| 0.459959
| 0.444559
| 0.444559
| 0.444559
| 0.444559
| 0.444559
| 0
| 0.322795
| 0.290966
| 4,461
| 113
| 91
| 39.477876
| 0.293076
| 0.047971
| 0
| 0.325581
| 0
| 0
| 0.009209
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.011628
| 0.011628
| 0
| 0.011628
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b4576ae394bd1fc8ff8d17a4647bb41670b7c981
| 144
|
py
|
Python
|
achievement/admin.py
|
Alt90/Student_progress_bar
|
c2f4f059a8487a58c7c321ed5005b336888fe4eb
|
[
"MIT"
] | null | null | null |
achievement/admin.py
|
Alt90/Student_progress_bar
|
c2f4f059a8487a58c7c321ed5005b336888fe4eb
|
[
"MIT"
] | 6
|
2017-03-15T17:28:26.000Z
|
2017-11-14T21:11:19.000Z
|
achievement/admin.py
|
Alt90/Student_progress_bar
|
c2f4f059a8487a58c7c321ed5005b336888fe4eb
|
[
"MIT"
] | 1
|
2017-11-14T18:58:14.000Z
|
2017-11-14T18:58:14.000Z
|
from django.contrib import admin
from achievement import models
admin.site.register(models.Student)
admin.site.register(models.StudentsGroup)
| 20.571429
| 41
| 0.840278
| 19
| 144
| 6.368421
| 0.578947
| 0.14876
| 0.280992
| 0.380165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 144
| 7
| 41
| 20.571429
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b4583cc20db642edcea06690d2d07c87cd5e56b2
| 135
|
py
|
Python
|
app/android_default/__init__.py
|
DigitME2/oee_server
|
450df961ad6eaae3b20e043629089988b5f9e438
|
[
"Apache-2.0"
] | 2
|
2020-07-14T12:45:04.000Z
|
2022-03-21T05:44:09.000Z
|
app/android_default/__init__.py
|
DigitME2/oee_server
|
450df961ad6eaae3b20e043629089988b5f9e438
|
[
"Apache-2.0"
] | 5
|
2022-01-25T00:37:38.000Z
|
2022-03-17T17:18:06.000Z
|
app/android_default/__init__.py
|
DigitME2/oee_server
|
450df961ad6eaae3b20e043629089988b5f9e438
|
[
"Apache-2.0"
] | 2
|
2020-07-14T12:45:16.000Z
|
2020-12-27T00:44:10.000Z
|
from flask import Blueprint
bp = Blueprint('android_default', __name__)
# noinspection PyPep8
from app.android_default import routes
| 19.285714
| 43
| 0.814815
| 17
| 135
| 6.117647
| 0.705882
| 0.269231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008475
| 0.125926
| 135
| 6
| 44
| 22.5
| 0.872881
| 0.140741
| 0
| 0
| 0
| 0
| 0.131579
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
c3378812b6b3cab2444207aca7220816e582a7a3
| 608
|
py
|
Python
|
csvHelper.py
|
CollinHemeltjen/morse
|
01456a7e48afc243e09c6346a80af59b601a6f59
|
[
"MIT"
] | null | null | null |
csvHelper.py
|
CollinHemeltjen/morse
|
01456a7e48afc243e09c6346a80af59b601a6f59
|
[
"MIT"
] | null | null | null |
csvHelper.py
|
CollinHemeltjen/morse
|
01456a7e48afc243e09c6346a80af59b601a6f59
|
[
"MIT"
] | null | null | null |
import csv
# helper method to create a morse dictionairy from a csv file
# where the key is the morse code
def csv_to_dictionairy_morse(csv_file, dictionairy):
with open(csv_file, mode='r') as infile:
reader = csv.reader(infile)
dictionairy.update({rows[1]:rows[0] for rows in reader})
# helper method to create a morse dictionairy from a csv file
# where the key is the normal character
def csv_to_dictionairy_text(csv_file, dictionairy):
with open(csv_file, mode='r') as infile:
reader = csv.reader(infile)
dictionairy.update({rows[0]:rows[1] for rows in reader})
| 38
| 64
| 0.720395
| 98
| 608
| 4.367347
| 0.336735
| 0.098131
| 0.065421
| 0.093458
| 0.714953
| 0.714953
| 0.714953
| 0.714953
| 0.714953
| 0.714953
| 0
| 0.00813
| 0.190789
| 608
| 15
| 65
| 40.533333
| 0.861789
| 0.310855
| 0
| 0.444444
| 0
| 0
| 0.004831
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c34446e26b2c7bc42ed7d7eaa26d557f8645d1d4
| 22
|
py
|
Python
|
app/wsgi.py
|
ahmedhisham73/switching_to_torch
|
5240abc46ee787471bf161006e24038b0ce6e837
|
[
"Apache-2.0"
] | null | null | null |
app/wsgi.py
|
ahmedhisham73/switching_to_torch
|
5240abc46ee787471bf161006e24038b0ce6e837
|
[
"Apache-2.0"
] | null | null | null |
app/wsgi.py
|
ahmedhisham73/switching_to_torch
|
5240abc46ee787471bf161006e24038b0ce6e837
|
[
"Apache-2.0"
] | null | null | null |
from main import app
| 11
| 21
| 0.772727
| 4
| 22
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227273
| 22
| 1
| 22
| 22
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c35fa26359822985efa1bfc1680742335db7cf6f
| 7,801
|
py
|
Python
|
huaweicloud-sdk-ces/huaweicloudsdkces/v1/__init__.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-ces/huaweicloudsdkces/v1/__init__.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-ces/huaweicloudsdkces/v1/__init__.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
from __future__ import absolute_import
# import CesClient
from huaweicloudsdkces.v1.ces_client import CesClient
from huaweicloudsdkces.v1.ces_async_client import CesAsyncClient
# import models into sdk package
from huaweicloudsdkces.v1.model.additional_info import AdditionalInfo
from huaweicloudsdkces.v1.model.alarm_actions import AlarmActions
from huaweicloudsdkces.v1.model.alarm_history_info import AlarmHistoryInfo
from huaweicloudsdkces.v1.model.alarm_template import AlarmTemplate
from huaweicloudsdkces.v1.model.alarm_template_condition import AlarmTemplateCondition
from huaweicloudsdkces.v1.model.batch_list_metric_data_request import BatchListMetricDataRequest
from huaweicloudsdkces.v1.model.batch_list_metric_data_request_body import BatchListMetricDataRequestBody
from huaweicloudsdkces.v1.model.batch_list_metric_data_response import BatchListMetricDataResponse
from huaweicloudsdkces.v1.model.batch_metric_data import BatchMetricData
from huaweicloudsdkces.v1.model.condition import Condition
from huaweicloudsdkces.v1.model.create_alarm_request import CreateAlarmRequest
from huaweicloudsdkces.v1.model.create_alarm_request_body import CreateAlarmRequestBody
from huaweicloudsdkces.v1.model.create_alarm_response import CreateAlarmResponse
from huaweicloudsdkces.v1.model.create_alarm_template_request import CreateAlarmTemplateRequest
from huaweicloudsdkces.v1.model.create_alarm_template_request_body import CreateAlarmTemplateRequestBody
from huaweicloudsdkces.v1.model.create_alarm_template_response import CreateAlarmTemplateResponse
from huaweicloudsdkces.v1.model.create_events_request import CreateEventsRequest
from huaweicloudsdkces.v1.model.create_events_response import CreateEventsResponse
from huaweicloudsdkces.v1.model.create_events_response_body import CreateEventsResponseBody
from huaweicloudsdkces.v1.model.create_metric_data_request import CreateMetricDataRequest
from huaweicloudsdkces.v1.model.create_metric_data_response import CreateMetricDataResponse
from huaweicloudsdkces.v1.model.create_resource_group import CreateResourceGroup
from huaweicloudsdkces.v1.model.create_resource_group_request import CreateResourceGroupRequest
from huaweicloudsdkces.v1.model.create_resource_group_request_body import CreateResourceGroupRequestBody
from huaweicloudsdkces.v1.model.create_resource_group_response import CreateResourceGroupResponse
from huaweicloudsdkces.v1.model.data_point_for_alarm_history import DataPointForAlarmHistory
from huaweicloudsdkces.v1.model.datapoint import Datapoint
from huaweicloudsdkces.v1.model.datapoint_for_batch_metric import DatapointForBatchMetric
from huaweicloudsdkces.v1.model.delete_alarm_request import DeleteAlarmRequest
from huaweicloudsdkces.v1.model.delete_alarm_response import DeleteAlarmResponse
from huaweicloudsdkces.v1.model.delete_alarm_template_request import DeleteAlarmTemplateRequest
from huaweicloudsdkces.v1.model.delete_alarm_template_response import DeleteAlarmTemplateResponse
from huaweicloudsdkces.v1.model.delete_resource_group_request import DeleteResourceGroupRequest
from huaweicloudsdkces.v1.model.delete_resource_group_response import DeleteResourceGroupResponse
from huaweicloudsdkces.v1.model.event_data_info import EventDataInfo
from huaweicloudsdkces.v1.model.event_info import EventInfo
from huaweicloudsdkces.v1.model.event_info_detail import EventInfoDetail
from huaweicloudsdkces.v1.model.event_item import EventItem
from huaweicloudsdkces.v1.model.event_item_detail import EventItemDetail
from huaweicloudsdkces.v1.model.instance_statistics import InstanceStatistics
from huaweicloudsdkces.v1.model.list_alarm_histories_request import ListAlarmHistoriesRequest
from huaweicloudsdkces.v1.model.list_alarm_histories_response import ListAlarmHistoriesResponse
from huaweicloudsdkces.v1.model.list_alarm_templates_request import ListAlarmTemplatesRequest
from huaweicloudsdkces.v1.model.list_alarm_templates_response import ListAlarmTemplatesResponse
from huaweicloudsdkces.v1.model.list_alarms_request import ListAlarmsRequest
from huaweicloudsdkces.v1.model.list_alarms_response import ListAlarmsResponse
from huaweicloudsdkces.v1.model.list_event_detail_request import ListEventDetailRequest
from huaweicloudsdkces.v1.model.list_event_detail_response import ListEventDetailResponse
from huaweicloudsdkces.v1.model.list_events_request import ListEventsRequest
from huaweicloudsdkces.v1.model.list_events_response import ListEventsResponse
from huaweicloudsdkces.v1.model.list_metrics_request import ListMetricsRequest
from huaweicloudsdkces.v1.model.list_metrics_response import ListMetricsResponse
from huaweicloudsdkces.v1.model.list_resource_group_request import ListResourceGroupRequest
from huaweicloudsdkces.v1.model.list_resource_group_response import ListResourceGroupResponse
from huaweicloudsdkces.v1.model.meta_data import MetaData
from huaweicloudsdkces.v1.model.meta_data_for_alarm_history import MetaDataForAlarmHistory
from huaweicloudsdkces.v1.model.metric_alarms import MetricAlarms
from huaweicloudsdkces.v1.model.metric_data_item import MetricDataItem
from huaweicloudsdkces.v1.model.metric_for_alarm import MetricForAlarm
from huaweicloudsdkces.v1.model.metric_info import MetricInfo
from huaweicloudsdkces.v1.model.metric_info_for_alarm import MetricInfoForAlarm
from huaweicloudsdkces.v1.model.metric_info_list import MetricInfoList
from huaweicloudsdkces.v1.model.metrics_dimension import MetricsDimension
from huaweicloudsdkces.v1.model.modify_alarm_action_req import ModifyAlarmActionReq
from huaweicloudsdkces.v1.model.quotas import Quotas
from huaweicloudsdkces.v1.model.resource import Resource
from huaweicloudsdkces.v1.model.resource_group import ResourceGroup
from huaweicloudsdkces.v1.model.resource_group_info import ResourceGroupInfo
from huaweicloudsdkces.v1.model.show_alarm_request import ShowAlarmRequest
from huaweicloudsdkces.v1.model.show_alarm_response import ShowAlarmResponse
from huaweicloudsdkces.v1.model.show_event_data_request import ShowEventDataRequest
from huaweicloudsdkces.v1.model.show_event_data_response import ShowEventDataResponse
from huaweicloudsdkces.v1.model.show_metric_data_request import ShowMetricDataRequest
from huaweicloudsdkces.v1.model.show_metric_data_response import ShowMetricDataResponse
from huaweicloudsdkces.v1.model.show_quotas_request import ShowQuotasRequest
from huaweicloudsdkces.v1.model.show_quotas_response import ShowQuotasResponse
from huaweicloudsdkces.v1.model.show_resource_group_request import ShowResourceGroupRequest
from huaweicloudsdkces.v1.model.show_resource_group_response import ShowResourceGroupResponse
from huaweicloudsdkces.v1.model.template_item import TemplateItem
from huaweicloudsdkces.v1.model.total_meta_data import TotalMetaData
from huaweicloudsdkces.v1.model.update_alarm_action_request import UpdateAlarmActionRequest
from huaweicloudsdkces.v1.model.update_alarm_action_response import UpdateAlarmActionResponse
from huaweicloudsdkces.v1.model.update_alarm_request import UpdateAlarmRequest
from huaweicloudsdkces.v1.model.update_alarm_request_body import UpdateAlarmRequestBody
from huaweicloudsdkces.v1.model.update_alarm_response import UpdateAlarmResponse
from huaweicloudsdkces.v1.model.update_alarm_template_request import UpdateAlarmTemplateRequest
from huaweicloudsdkces.v1.model.update_alarm_template_request_body import UpdateAlarmTemplateRequestBody
from huaweicloudsdkces.v1.model.update_alarm_template_response import UpdateAlarmTemplateResponse
from huaweicloudsdkces.v1.model.update_resource_group_request import UpdateResourceGroupRequest
from huaweicloudsdkces.v1.model.update_resource_group_request_body import UpdateResourceGroupRequestBody
from huaweicloudsdkces.v1.model.update_resource_group_response import UpdateResourceGroupResponse
| 77.237624
| 105
| 0.913729
| 884
| 7,801
| 7.808824
| 0.165158
| 0.28292
| 0.309865
| 0.369115
| 0.524989
| 0.469361
| 0.301898
| 0.085035
| 0.015645
| 0
| 0
| 0.012686
| 0.050122
| 7,801
| 100
| 106
| 78.01
| 0.918893
| 0.00782
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6f6313266bdbacf1959edb85efd500cb3def3604
| 25
|
py
|
Python
|
__init__.py
|
mhawry/auquantoolbox
|
f88b052d04baa81575b6886f829bca71d9425196
|
[
"Apache-2.0"
] | 91
|
2020-07-31T13:57:49.000Z
|
2022-01-21T14:16:47.000Z
|
__init__.py
|
mhawry/auquantoolbox
|
f88b052d04baa81575b6886f829bca71d9425196
|
[
"Apache-2.0"
] | 3
|
2021-02-26T14:43:10.000Z
|
2022-01-12T14:58:01.000Z
|
__init__.py
|
mhawry/auquantoolbox
|
f88b052d04baa81575b6886f829bca71d9425196
|
[
"Apache-2.0"
] | 30
|
2020-07-30T13:48:00.000Z
|
2022-03-09T14:20:36.000Z
|
from backtester import *
| 12.5
| 24
| 0.8
| 3
| 25
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
48cf73cbfcab92b9f944a6068e47f294f15979d4
| 148
|
py
|
Python
|
src/werkzeug/__init__.py
|
salva-imm/werkzeug
|
44866cc6419dd1742b86a0b7bccd32ba2eb58f77
|
[
"BSD-3-Clause"
] | 1
|
2021-04-03T16:49:38.000Z
|
2021-04-03T16:49:38.000Z
|
src/werkzeug/__init__.py
|
salva-imm/werkzeug
|
44866cc6419dd1742b86a0b7bccd32ba2eb58f77
|
[
"BSD-3-Clause"
] | null | null | null |
src/werkzeug/__init__.py
|
salva-imm/werkzeug
|
44866cc6419dd1742b86a0b7bccd32ba2eb58f77
|
[
"BSD-3-Clause"
] | null | null | null |
from .serving import run_simple
from .test import Client
from .wrappers import Request
from .wrappers import Response
__version__ = "2.0.0rc2.dev"
| 21.142857
| 31
| 0.797297
| 22
| 148
| 5.136364
| 0.681818
| 0.212389
| 0.318584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.135135
| 148
| 6
| 32
| 24.666667
| 0.851563
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
48d11978f6b933a18257cb32ed895d32fe42455b
| 122
|
py
|
Python
|
dindin_attendance/models/__init__.py
|
redblow/odooDingDing
|
d5bea9d69889615819d82d94924a5d54b498db03
|
[
"Apache-2.0"
] | 6
|
2019-10-04T01:57:03.000Z
|
2021-10-25T00:53:27.000Z
|
dindin_attendance/models/__init__.py
|
redblow/odooDingDing
|
d5bea9d69889615819d82d94924a5d54b498db03
|
[
"Apache-2.0"
] | null | null | null |
dindin_attendance/models/__init__.py
|
redblow/odooDingDing
|
d5bea9d69889615819d82d94924a5d54b498db03
|
[
"Apache-2.0"
] | 1
|
2022-03-22T09:23:48.000Z
|
2022-03-22T09:23:48.000Z
|
# -*- coding: utf-8 -*-
from . import simplegroups
from . import attendance_list # 已弃用 原考勤列表
from . import hr_attendance
| 24.4
| 42
| 0.721311
| 16
| 122
| 5.375
| 0.6875
| 0.348837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009901
| 0.172131
| 122
| 5
| 43
| 24.4
| 0.841584
| 0.254098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
48f758e17cde991395d08cba48e5ddae38f301a7
| 25,116
|
py
|
Python
|
test/network.py
|
namjiwon1023/Code_With_RL
|
37beec975b1685e9f6cf991abed491b854b78173
|
[
"MIT"
] | 3
|
2021-08-12T15:11:28.000Z
|
2021-09-27T16:04:16.000Z
|
test/network.py
|
namjiwon1023/Code_With_RL
|
37beec975b1685e9f6cf991abed491b854b78173
|
[
"MIT"
] | null | null | null |
test/network.py
|
namjiwon1023/Code_With_RL
|
37beec975b1685e9f6cf991abed491b854b78173
|
[
"MIT"
] | 1
|
2021-08-05T07:20:57.000Z
|
2021-08-05T07:20:57.000Z
|
# Copyright (c) 2021: Zhiyuan Nan (namjw@hanyang.ac.kr).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import os
import numpy as np
import math
from torch.distributions import Normal, Categorical
import random
from test.utils import build_mlp, initialize_weight, reset_parameters, reset_single_layer_parameters
''' Simple neural network structure '''
class QNetwork(nn.Module):
def __init__(self, n_states, n_actions, args):
super(QNetwork, self).__init__()
self.args = args
self.device = args.device
if not args.use_noisy_layer:
self.critic = nn.Sequential(nn.Linear(n_states, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, n_actions)
)
reset_parameters(self.critic)
else:
self.feature = nn.Linear(n_states, args.hidden_units)
self.noisy_layer1 = NoisyLinear(args.hidden_units, args.hidden_units)
self.noisy_layer2 = NoisyLinear(args.hidden_units, n_actions)
reset_single_layer_parameters(self.feature)
self.to(self.device)
def forward(self, state):
if not self.args.use_noisy_layer:
out = self.critic(state)
else:
feature = F.relu(self.feature(state))
hidden = F.relu(self.noisy_layer1(feature))
out = self.noisy_layer2(hidden)
return out
def reset_noise(self):
self.noisy_layer1.reset_noise()
self.noisy_layer2.reset_noise()
class DuelingNetwork(nn.Module):
def __init__(self, n_states, n_actions, args):
super(DuelingNetwork, self).__init__()
self.device = args.device
self.feature = nn.Sequential(nn.Linear(n_states, args.hidden_units),
nn.ReLU(),)
self.advantage = nn.Sequential(
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, n_actions),
)
self.value = nn.Sequential(
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, 1),
)
reset_parameters(self.feature)
reset_parameters(self.advantage)
reset_parameters(self.value)
self.to(self.device)
def forward(self, state):
feature = self.feature(state)
advantage = self.advantage(feature)
value = self.value(feature)
# Here we calculate advantage Q(s,a) = A(s,a) + V(s)
out = value + advantage - advantage.mean(dim=-1, keepdim=True)
return out
class DuelingTwinNetwork(nn.Module):
def __init__(self, n_states, n_actions, args):
super(DuelingTwinNetwork, self).__init__()
self.device = args.device
self.feature = nn.Sequential(nn.Linear(n_states, args.hidden_units),
nn.ReLU(),)
self.advantage1 = nn.Sequential(
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, n_actions),
)
self.value1 = nn.Sequential(
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, 1),
)
self.advantage2 = nn.Sequential(
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, n_actions),
)
self.value2 = nn.Sequential(
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, 1),
)
reset_parameters(self.feature)
reset_parameters(self.advantage1)
reset_parameters(self.value1)
reset_parameters(self.advantage2)
reset_parameters(self.value2)
self.to(self.device)
def forward(self, state):
feature = self.feature(state)
advantage1 = self.advantage1(feature)
value1 = self.value1(feature)
# Here we calculate advantage Q(s,a) = A(s,a) + V(s)
out = value1 + advantage1 - advantage1.mean(dim=-1, keepdim=True)
return out
def get_double_q(self, state):
feature = self.feature(state)
advantage1 = self.advantage1(feature)
value1 = self.value1(feature)
advantage2 = self.advantage2(feature)
value2 = self.value2(feature)
q1 = value1 + advantage1 - advantage1.mean(dim=-1, keepdim=True)
q2 = value2 + advantage2 - advantage2.mean(dim=-1, keepdim=True)
return q1, q2
class NoisyLinear(nn.Module):
def __init__(self, in_features, out_features, std_init=0.5):
super(NoisyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = nn.Parameter(T.Tensor(out_features, in_features))
self.weight_sigma = nn.Parameter(T.Tensor(out_features, in_features))
self.register_buffer("weight_epsilon", T.Tensor(out_features, in_features))
self.bias_mu = nn.Parameter(T.Tensor(out_features))
self.bias_sigma = nn.Parameter(T.Tensor(out_features))
self.register_buffer("bias_epsilon", T.Tensor(out_features))
self.reset_parameters()
self.reset_noise()
def reset_parameters(self):
mu_range = 1 / math.sqrt(self.in_features)
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
def reset_noise(self):
epsilon_in = self.scale_noise(self.in_features)
epsilon_out = self.scale_noise(self.out_features)
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(epsilon_out)
def forward(self, x):
return F.linear(x, self.weight_mu+self.weight_sigma*self.weight_epsilon, self.bias_mu+self.bias_sigma*self.bias_epsilon)
@staticmethod
def scale_noise(size):
x = T.randn(size)
return x.sign().mul(x.abs().sqrt())
class Actor(nn.Module): # Deterministic Policy Gradient(DPG), Deep Deterministic Policy Gradient(DDPG), Twin Delayed Deep Deterministic Policy Gradients(TD3)
def __init__(self, n_states, n_actions, args, max_action=None):
super(Actor, self).__init__()
self.device = args.device
self.max_action = max_action
self.pi = nn.Sequential(nn.Linear(n_states, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, n_actions),
nn.Tanh())
reset_parameters(self.pi)
self.to(self.device)
def forward(self, state):
u = self.pi(state)
if self.max_action == None: return u
return self.max_action*u
class ActorA2C(nn.Module): # Advantage Actor-Critic
def __init__(self, n_states, n_actions, args):
super(ActorA2C, self).__init__()
self.args = args
self.device = args.device
self.feature = nn.Sequential(nn.Linear(n_states, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
)
self.mu = nn.Linear(args.hidden_units, n_actions)
self.log_std = nn.Linear(args.hidden_units, n_actions)
reset_parameters(self.feature)
reset_single_layer_parameters(self.mu)
reset_single_layer_parameters(self.log_std)
self.to(self.device)
def forward(self, state):
feature = self.feature(state)
mu = T.tanh(self.mu(feature)) * 2
log_std = F.softplus(self.log_std(feature))
std = T.exp(log_std)
return mu, std
class ActorPPO(nn.Module): # Proximal Policy Optimization
def __init__(self, n_states, n_actions, args):
super(ActorPPO, self).__init__()
self.args = args
self.device = args.device
self.mu = nn.Sequential(nn.Linear(n_states, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, n_actions),
)
self.log_std = nn.Parameter(T.zeros((1, n_actions)) -0.5, requires_grad=True)
reset_parameters(self.mu)
self.to(self.device)
def forward(self, state):
mu = self.mu(state)
std = T.exp(self.log_std).expand_as(mu)
return mu, std
class ActorSAC(nn.Module): # Soft Actor-Critic
def __init__(self, n_states, n_actions, args, max_action=None):
super(ActorSAC, self).__init__()
self.args = args
self.device = args.device
self.min_log_std = args.min_log_std
self.max_log_std = args.max_log_std
self.max_action = max_action
self.feature = nn.Sequential(nn.Linear(n_states, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
)
self.log_std = nn.Linear(args.hidden_units, n_actions)
self.mu = nn.Linear(args.hidden_units, n_actions)
reset_parameters(self.feature)
reset_single_layer_parameters(self.log_std)
reset_single_layer_parameters(self.mu)
self.to(self.device)
def forward(self, state):
feature = self.feature(state)
mu = self.mu(feature)
log_std = self.log_std(feature)
log_std = T.clamp(log_std, self.min_log_std, self.max_log_std)
std = T.exp(log_std)
dist = Normal(mu, std)
z = dist.rsample()
if self.args.evaluate:
action = mu.tanh()
else:
action = z.tanh()
if self.args.with_logprob:
log_prob = dist.log_prob(z) - T.log(1 - action.pow(2) + 1e-7)
log_prob = log_prob.sum(-1, keepdim=True)
else:
log_prob = None
if self.max_action == None: return action, log_prob
return self.max_action*action, log_prob
class CriticQ(nn.Module): # Action Value Function
def __init__(self, n_states, n_actions, args):
super(CriticQ, self).__init__()
self.device = args.device
self.Value = nn.Sequential(nn.Linear(n_states + n_actions, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, 1)
)
reset_parameters(self.Value)
self.to(self.device)
def forward(self, state, action):
cat = T.cat((state, action), dim=-1)
Q = self.Value(cat)
return Q
class CriticV(nn.Module): # State Value Function
def __init__(self, n_states, args):
super(CriticV, self).__init__()
self.device = args.device
self.Value = nn.Sequential(nn.Linear(n_states, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, 1)
)
reset_parameters(self.Value)
self.to(self.device)
def forward(self, state):
V = self.Value(state)
return V
class CriticTwin(nn.Module): # Twin Delayed Deep Deterministic Policy Gradients(TD3), Double Deep Q Network
def __init__(self, n_states, n_actions, args):
super(CriticTwin, self).__init__()
self.device = args.device
self.Value1 = nn.Sequential(nn.Linear(n_states + n_actions, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, 1)
)
self.Value2 = nn.Sequential(nn.Linear(n_states + n_actions, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, args.hidden_units),
nn.ReLU(),
nn.Linear(args.hidden_units, 1)
)
reset_parameters(self.Value1)
reset_parameters(self.Value2)
self.to(self.device)
def forward(self, state, action):
cat = T.cat((state, action), dim=-1)
Q1 = self.Value1(cat)
return Q1
def get_double_q(self, state, action):
cat = T.cat((state, action), dim=-1)
Q1 = self.Value1(cat)
Q2 = self.Value2(cat)
return Q1, Q2
'''Use functions(build_mlp) to create neural networks'''
class QNetwork_mlp(nn.Module):
def __init__(self, n_states, n_actions, args):
super(QNetwork_mlp, self).__init__()
self.args = args
self.device = args.device
if not args.use_noisy_layer:
self.net = build_mlp(
input_dim=n_states,
output_dim=n_actions,
hidden_units=args.hidden_units_mlp,
hidden_activation=nn.ReLU(),
).apply(initialize_weight)
else:
self.feature = nn.Linear(n_states, args.hidden_units_mlp[0])
self.noisy_layer1 = NoisyLinear(args.hidden_units_mlp[0], args.hidden_units_mlp[1])
self.noisy_layer2 = NoisyLinear(args.hidden_units_mlp[1], n_actions)
self.apply(initialize_weight)
self.to(self.device)
def forward(self, state):
if not self.args.use_noisy_layer:
out = self.net(state)
else:
feature = F.relu(self.feature(state))
hidden = F.relu(self.noisy_layer1(feature))
out = self.noisy_layer2(hidden)
return out
def reset_noise(self):
self.noisy_layer1.reset_noise()
self.noisy_layer2.reset_noise()
class DuelingNetwork_mlp(nn.Module):
def __init__(self, n_states, n_actions, args):
super(DuelingNetwork_mlp, self).__init__()
self.device = args.device
self.feature = build_mlp(
input_dim=n_states,
output_dim=args.hidden_units_mlp[0],
hidden_units=args.hidden_units_mlp[:1],
hidden_activation=nn.ReLU(),
output_activation=nn.ReLU(),
).apply(initialize_weight)
self.advantage = build_mlp(
input_dim=args.hidden_units_mlp[0],
output_dim=n_actions,
hidden_units=args.hidden_units_mlp[1:],
hidden_activation=nn.ReLU(),
).apply(initialize_weight)
self.value = build_mlp(
input_dim=args.hidden_units_mlp[0],
output_dim=1,
hidden_units=args.hidden_units_mlp[1:],
hidden_activation=nn.ReLU(),
).apply(initialize_weight)
self.to(self.device)
def forward(self, state):
feature = self.feature(state)
advantage = self.advantage(feature)
value = self.value(feature)
# Here we calculate advantage Q(s,a) = A(s,a) + V(s)
out = value + advantage - advantage.mean(dim=-1, keepdim=True)
return out
class DuelingTwinNetwork_mlp(nn.Module):
def __init__(self, n_states, n_actions, args):
super(DuelingTwinNetwork_mlp, self).__init__()
self.device = args.device
self.feature = build_mlp(
input_dim=n_states,
output_dim=args.hidden_units_mlp[0],
hidden_units=args.hidden_units_mlp[:1],
hidden_activation=nn.ReLU(),
output_activation=nn.ReLU(),
).apply(initialize_weight)
self.advantage1 = build_mlp(
input_dim=args.hidden_units_mlp[0],
output_dim=n_actions,
hidden_units=args.hidden_units_mlp[1:],
hidden_activation=nn.ReLU(),
).apply(initialize_weight)
self.value1 = build_mlp(
input_dim=args.hidden_units_mlp[0],
output_dim=1,
hidden_units=args.hidden_units_mlp[1:],
hidden_activation=nn.ReLU(),
).apply(initialize_weight)
self.advantage2 = build_mlp(
input_dim=args.hidden_units_mlp[0],
output_dim=n_actions,
hidden_units=args.hidden_units_mlp[1:],
hidden_activation=nn.ReLU(),
).apply(initialize_weight)
self.value2 = build_mlp(
input_dim=args.hidden_units_mlp[0],
output_dim=1,
hidden_units=args.hidden_units_mlp[1:],
hidden_activation=nn.ReLU(),
).apply(initialize_weight)
self.to(self.device)
def forward(self, state):
feature = self.feature(state)
advantage1 = self.advantage1(feature)
value1 = self.value1(feature)
# Here we calculate advantage Q(s,a) = A(s,a) + V(s)
out = value1 + advantage1 - advantage1.mean(dim=-1, keepdim=True)
return out
def get_double_q(self, state):
feature = self.feature(state)
advantage1 = self.advantage1(feature)
value1 = self.value1(feature)
advantage2 = self.advantage2(feature)
value2 = self.value2(feature)
q1 = value1 + advantage1 - advantage1.mean(dim=-1, keepdim=True)
q2 = value2 + advantage2 - advantage2.mean(dim=-1, keepdim=True)
return q1, q2
class DeterministicPolicy_mlp(nn.Module): # Deterministic Policy Gradient(DPG), Deep Deterministic Policy Gradient(DDPG), Twin Delayed Deep Deterministic Policy Gradients(TD3)
def __init__(self, n_states, n_actions, args, max_action=None):
super(DeterministicPolicy_mlp, self).__init__()
self.device = args.device
self.max_action = max_action
self.pi = build_mlp(
input_dim=n_states,
output_dim=n_actions,
hidden_units=args.hidden_units_mlp,
hidden_activation=nn.ReLU(),
output_activation=nn.Tanh(),
).apply(initialize_weight)
self.to(self.device)
def forward(self, state):
if self.max_action == None: return self.pi(state) # action -> tanh() -> [-1,1]
return self.max_action * self.pi(state) # max_action -> [-max_action, max_action]
class ActorA2C_mlp(nn.Module): # Advantage Actor-Critic
def __init__(self, n_states, n_actions, args):
super(ActorA2C_mlp, self).__init__()
self.args = args
self.device = args.device
self.net = build_mlp(
input_dim=n_states,
output_dim=2*n_actions,
hidden_units=args.hidden_units_mlp,
hidden_activation=nn.ReLU(),
).apply(initialize_weight)
self.to(self.device)
def forward(self, state):
mu, log_std = T.chunk(self.net(state), 2, dim=-1)
mu = T.tanh(mu) * 2
log_std = F.softplus(log_std)
std = T.exp(log_std)
return mu, std
class ActorPPO_mlp(nn.Module): # Proximal Policy Optimization
def __init__(self, n_states, n_actions, args):
super(ActorPPO_mlp, self).__init__()
self.args = args
self.device = args.device
self.mu = build_mlp(
input_dim=n_states,
output_dim=n_actions,
hidden_units=args.hidden_units_mlp,
hidden_activation=nn.ReLU(),
).apply(initialize_weight)
self.log_std = nn.Parameter(T.zeros((1, n_actions)) -0.5, requires_grad=True)
self.to(self.device)
def forward(self, state):
mu = self.mu(state)
std = T.exp(self.log_std).expand_as(mu)
return mu, std
class ActorSAC_mlp(nn.Module): # Soft Actor-Critic
def __init__(self, n_states, n_actions, args, max_action=None, min_log_std=-20, max_log_std=2):
super(ActorSAC_mlp, self).__init__()
self.args = args
self.device = args.device
self.min_log_std = min_log_std
self.max_log_std = max_log_std
self.max_action = max_action
self.net = build_mlp(
input_dim=n_states,
output_dim=2*n_actions,
hidden_units=args.hidden_units_mlp,
hidden_activation=nn.ReLU(),
).apply(initialize_weight)
self.to(self.device)
def forward(self, state):
mu, log_std = T.chunk(self.net(state), 2, dim=-1)
log_std = T.clamp(log_std, self.min_log_std, self.max_log_std)
std = T.exp(log_std)
dist = Normal(mu, std)
z = dist.rsample()
if self.args.evaluate:
action = mu.tanh()
else:
action = z.tanh()
if self.args.with_logprob:
log_prob = dist.log_prob(z) - T.log(1 - action.pow(2) + 1e-7)
log_prob = log_prob.sum(-1, keepdim=True)
else:
log_prob = None
if self.max_action == None: return action, log_prob
return self.max_action*action, log_prob
class CriticQ_mlp(nn.Module): # Action Value Function
def __init__(self, n_states, n_actions, args):
super(CriticQ_mlp, self).__init__()
self.device = args.device
self.value = build_mlp(
input_dim=n_states + n_actions,
output_dim=1,
hidden_units=args.hidden_units_mlp,
hidden_activation=nn.ReLU(),
).apply(initialize_weight)
self.to(self.device)
def forward(self, state, action):
cat = T.cat((state, action), dim=-1)
return self.value(cat)
class CriticV_mlp(nn.Module): # State Value Function
def __init__(self, n_states, args):
super(CriticV_mlp, self).__init__()
self.device = args.device
self.value = build_mlp(
input_dim=n_states,
output_dim=1,
hidden_units=args.hidden_units_mlp,
hidden_activation=nn.ReLU(),
).apply(initialize_weight)
self.to(self.device)
def forward(self, state):
return self.value(state)
class CriticTwin_mlp(nn.Module): # Twin Delayed Deep Deterministic Policy Gradients(TD3), Double Deep Q Network
def __init__(self, n_states, n_actions, args):
super(CriticTwin_mlp, self).__init__()
self.device = args.device
self.value1 = build_mlp(
input_dim=n_states + n_actions,
output_dim=1,
hidden_units=args.hidden_units_mlp,
hidden_activation=nn.ReLU(),
).apply(initialize_weight)
self.value2 = build_mlp(
input_dim=n_states + n_actions,
output_dim=1,
hidden_units=args.hidden_units_mlp,
hidden_activation=nn.ReLU(),
).apply(initialize_weight)
self.to(self.device)
def forward(self, state, action):
cat = T.cat((state, action), dim=-1)
return self.value1(cat)
def get_double_q(self, state, action):
cat = T.cat((state, action), dim=-1)
return self.value1(cat), self.value2(cat)
| 35.225806
| 175
| 0.565217
| 2,960
| 25,116
| 4.545946
| 0.067568
| 0.088288
| 0.101442
| 0.051501
| 0.870764
| 0.859542
| 0.850624
| 0.814284
| 0.800981
| 0.784037
| 0
| 0.010986
| 0.333134
| 25,116
| 713
| 176
| 35.225806
| 0.792406
| 0.04308
| 0
| 0.7
| 0
| 0
| 0.001087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094444
| false
| 0
| 0.016667
| 0.003704
| 0.198148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d2b73f64f1b6b9923070d2bfeee19c3b007f334c
| 5,300
|
py
|
Python
|
kml_callbacks.py
|
jricheimer/keras-metric-learning
|
847828324ca877472cb9ffd3268ebceaee2525b9
|
[
"MIT"
] | 12
|
2018-02-21T18:09:09.000Z
|
2020-12-30T13:40:57.000Z
|
kml_callbacks.py
|
jricheimer/keras-metric-learning
|
847828324ca877472cb9ffd3268ebceaee2525b9
|
[
"MIT"
] | 1
|
2019-09-25T15:30:40.000Z
|
2019-09-25T15:30:40.000Z
|
kml_callbacks.py
|
jricheimer/keras-metric-learning
|
847828324ca877472cb9ffd3268ebceaee2525b9
|
[
"MIT"
] | null | null | null |
"""Useful Callbacks for training Metric Learning models.
"""
from keras.models import Model
from keras.callbacks import Callback
import numpy as np
from types import GeneratorType
from kml_utils import recall_at_k, nmi
class RecallAtK(Callback):
"""Callback that computes the Recall@k metric for a given validation set at the end of each epoch.
# Arguments:
validation_data: Can be either a tuple of data and labels, or a generator that yields batches of tuples
validation_steps: Only relevant if validation_data is a generator
k: How many closest embeddings to consider when computing recall
metric: The distance metric in the embedding space. Defaults to 'euclidean'.
model_name: The name of the base network that maps input samples to their embeddings.\
If not provided, the layer within the training network of type 'Model' will be selected.
verbose: Whether to print the computed recall after each epoch.
"""
def __init__(self, validation_data, validation_steps=1, k=1, metric='euclidean', model_name=None, verbose=False):
super(RecallAtK, self).__init__()
self.model_name = model_name
self.k = k
self.metric = metric
# self.validation_data = validation_data
# self.validation_steps = validation_steps
self.verbose = verbose
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if 'recall_at_{}'.format(self.k) not in logs:
logs['recall_at_{}'.format(self.k)] = []
if epoch == 0:
if self.model_name:
self.model = self.model.get_layer(self.model_name)
else:
sub_models = [l for l in self.model.layers if isinstance(l, Model)]
if len(sub_models) == 1:
self.model = sub_models[0]
# if isinstance(self.validation_data, GeneratorType):
# val_embeddings = []
# labels = []
# for i in range(self.validation_steps):
# data, targets = self.validation_data.next()
# val_embeddings.append(self.model.predict(data))
# labels.extend(targets)
# val_embeddings = np.concatenate(val_embeddings, axis=0)
# elif isinstance(self.validation_data, tuple) and len(self.validation_data) == 2:
val_embeddings = self.model.predict(self.validation_data[0])
labels = self.validation_data[1]
# else:
# raise ValueError('validation_data must be either a generator object or a tuple (X,Y)')
recall = recall_at_k(val_embeddings, labels, k=self.k, metric=self.metric)
logs['recall_at_{}'.format(self.k)].append(recall)
if self.verbose:
print '\nRecall@{} on validation: {}'.format(self.k, recall)
class NMI(Callback):
"""Callback that computes the Normalized Mutual Information score for the embeddings of a given validation set \
at the end of each epoch.
# Arguments:
validation_data: Can be either a tuple of data and labels, or a generator that yields batches of tuples
validation_steps: Only relevant if validation_data is a generator
metric: The distance metric in the embedding space. Defaults to 'euclidean'.
model_name: The name of the base network that maps input samples to their embeddings.
If not provided, the layer within the training network of type 'Model' will be selected.
verbose: Whether to print the computed NMI score after each epoch.
"""
def __init__(self, validation_data, validation_steps=1, metric='euclidean', model_name=None, verbose=False):
super(NMI, self).__init__()
if model_name:
self.model = self.model.get_layer(model_name)
else:
sub_models = [l for l in self.model.layers if isinstance(l, Model)]
if len(sub_models) != 1:
raise ValueError('Training network must contain exactly one sub-model')
self.model = sub_models[0]
self.metric = metric
self.validation_data = validation_data
self.validation_steps = validation_steps
self.verbose = verbose
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if 'nmi' not in logs:
logs['nmi'] = []
if isinstance(self.validation_data, GeneratorType):
val_embeddings = []
labels = []
for i in range(self.validation_steps):
data, targets = self.validation_data.next()
val_embeddings.append(self.model.predict(data))
labels.extend(targets)
val_embeddings = np.concatenate(val_embeddings, axis=0)
elif isinstance(self.validation_data, tuple) and len(self.validation_data == 2):
val_embeddings = self.model.predict(self.validation_data[0])
labels = self.validation_data[1]
else:
raise ValueError('validation_data must be either a generator object or a tuple (X,Y)')
this_nmi = nmi(val_embeddings, labels, metric=self.metric)
logs['nmi'].append(this_nmi)
if self.verbose:
print '\nNMI on validation: {}'.format(this_nmi)
| 44.915254
| 117
| 0.642075
| 681
| 5,300
| 4.853157
| 0.195301
| 0.101664
| 0.087141
| 0.033888
| 0.77882
| 0.742814
| 0.728896
| 0.728896
| 0.707716
| 0.680182
| 0
| 0.004149
| 0.272453
| 5,300
| 118
| 118
| 44.915254
| 0.852956
| 0.111509
| 0
| 0.328125
| 0
| 0
| 0.072796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.078125
| null | null | 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d2e40c85612f3c2ce10db59006c7ee7c411dd971
| 70
|
py
|
Python
|
xastropy/xguis/__init__.py
|
bpholden/xastropy
|
66aff0995a84c6829da65996d2379ba4c946dabe
|
[
"BSD-3-Clause"
] | 3
|
2015-08-23T00:32:58.000Z
|
2020-12-31T02:37:52.000Z
|
xastropy/xguis/__init__.py
|
Kristall-WangShiwei/xastropy
|
723fe56cb48d5a5c4cdded839082ee12ef8c6732
|
[
"BSD-3-Clause"
] | 104
|
2015-07-17T18:31:54.000Z
|
2018-06-29T17:04:09.000Z
|
xastropy/xguis/__init__.py
|
Kristall-WangShiwei/xastropy
|
723fe56cb48d5a5c4cdded839082ee12ef8c6732
|
[
"BSD-3-Clause"
] | 16
|
2015-07-17T15:50:37.000Z
|
2019-04-21T03:42:47.000Z
|
#import img_widgets
import utils
import spec_widgets
import spec_guis
| 14
| 19
| 0.871429
| 11
| 70
| 5.272727
| 0.545455
| 0.448276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 70
| 4
| 20
| 17.5
| 0.935484
| 0.257143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
96111885bbc8b3a66e668f8b00dc75bc5c0e0406
| 10,091
|
py
|
Python
|
include/decoder.py
|
chenhepupu/MR_DD
|
4b2572fb5e1327a781d2bed9bca74132e24efa6d
|
[
"Apache-1.1"
] | null | null | null |
include/decoder.py
|
chenhepupu/MR_DD
|
4b2572fb5e1327a781d2bed9bca74132e24efa6d
|
[
"Apache-1.1"
] | null | null | null |
include/decoder.py
|
chenhepupu/MR_DD
|
4b2572fb5e1327a781d2bed9bca74132e24efa6d
|
[
"Apache-1.1"
] | null | null | null |
import torch
import torch.nn as nn
import numpy as np
def add_module(self, module):
self.add_module(str(len(self) + 1), module)
torch.nn.Module.add = add_module
def conv(in_f, out_f, kernel_size, stride=1, pad='zero',bias=False):
padder = None
to_pad = int((kernel_size - 1) / 2)
if pad == 'reflection':
padder = nn.ReflectionPad2d(to_pad)
to_pad = 0
convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias)
layers = filter(lambda x: x is not None, [padder, convolver])
return nn.Sequential(*layers)
def decodernw(
num_output_channels=3,
num_channels_up=[128]*5,
filter_size_up=1,
need_sigmoid=True,
pad ='reflection',
upsample_mode='bilinear',
act_fun=nn.ReLU(), # nn.LeakyReLU(0.2, inplace=True)
bn_before_act = False,
bn_affine = True,
bn = True,
upsample_first = True,
bias=False
):
num_channels_up = num_channels_up + [num_channels_up[-1],num_channels_up[-1]]
n_scales = len(num_channels_up)
if not (isinstance(filter_size_up, list) or isinstance(filter_size_up, tuple)) :
filter_size_up = [filter_size_up]*n_scales
model = nn.Sequential()
for i in range(len(num_channels_up)-1):
if upsample_first:
model.add(conv( num_channels_up[i], num_channels_up[i+1], filter_size_up[i], 1, pad=pad, bias=bias))
if upsample_mode!='none' and i != len(num_channels_up)-2:
model.add(nn.Upsample(scale_factor=2, mode=upsample_mode))
#model.add(nn.functional.interpolate(size=None,scale_factor=2, mode=upsample_mode))
else:
if upsample_mode!='none' and i!=0:
model.add(nn.Upsample(scale_factor=2, mode=upsample_mode))
#model.add(nn.functional.interpolate(size=None,scale_factor=2, mode=upsample_mode))
model.add(conv( num_channels_up[i], num_channels_up[i+1], filter_size_up[i], 1, pad=pad,bias=bias))
if i != len(num_channels_up)-1:
if(bn_before_act and bn):
model.add(nn.BatchNorm2d( num_channels_up[i+1] ,affine=bn_affine))
if act_fun is not None:
model.add(act_fun)
if( (not bn_before_act) and bn):
model.add(nn.BatchNorm2d( num_channels_up[i+1], affine=bn_affine))
model.add(conv( num_channels_up[-1], num_output_channels, 1, pad=pad,bias=bias))
if need_sigmoid:
model.add(nn.Sigmoid())
return model
# Residual block
class ResidualBlock(nn.Module):
def __init__(self, in_f, out_f):
super(ResidualBlock, self).__init__()
self.conv = nn.Conv2d(in_f, out_f, 1, 1, padding=0, bias=False)
def forward(self, x):
residual = x
out = self.conv(x)
out += residual
return out
def resdecoder(
num_output_channels=3,
num_channels_up=[128]*5,
filter_size_up=1,
need_sigmoid=True,
pad='reflection',
upsample_mode='bilinear',
act_fun=nn.ReLU(), # nn.LeakyReLU(0.2, inplace=True)
bn_before_act = False,
bn_affine = True,
):
num_channels_up = num_channels_up + [num_channels_up[-1],num_channels_up[-1]]
n_scales = len(num_channels_up)
if not (isinstance(filter_size_up, list) or isinstance(filter_size_up, tuple)) :
filter_size_up = [filter_size_up]*n_scales
model = nn.Sequential()
for i in range(len(num_channels_up)-2):
model.add( ResidualBlock( num_channels_up[i], num_channels_up[i+1]) )
if upsample_mode!='none':
model.add(nn.Upsample(scale_factor=2, mode=upsample_mode))
#model.add(nn.functional.interpolate(size=None,scale_factor=2, mode=upsample_mode))
if i != len(num_channels_up)-1:
model.add(act_fun)
#model.add(nn.BatchNorm2d( num_channels_up[i+1], affine=bn_affine))
# new
model.add(ResidualBlock( num_channels_up[-1], num_channels_up[-1]))
#model.add(nn.BatchNorm2d( num_channels_up[-1] ,affine=bn_affine))
model.add(act_fun)
# end new
model.add(conv( num_channels_up[-1], num_output_channels, 1, pad=pad))
if need_sigmoid:
model.add(nn.Sigmoid())
return model
##########################
def np_to_tensor(img_np):
'''Converts image in numpy.array to torch.Tensor.
From C x W x H [0..1] to C x W x H [0..1]
'''
return torch.from_numpy(img_np)
def set_to(tensor,mtx):
if not len(tensor.shape)==4:
raise Exception("assumes a 4D tensor")
num_kernels = tensor.shape[0]
for i in range(tensor.shape[0]):
for j in range(tensor.shape[1]):
if i == j:
tensor[i,j] = np_to_tensor(mtx)
else:
tensor[i,j] = np_to_tensor(np.zeros(mtx.shape))
return tensor
def conv2(in_f, out_f, kernel_size, stride=1, pad='zero',bias=False):
padder = None
to_pad = int((kernel_size - 1) / 2)
if kernel_size != 4:
convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias)
else:
padder = nn.ReflectionPad2d( (1,0,1,0) )
convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=1, bias=bias)
layers = filter(lambda x: x is not None, [padder, convolver])
return nn.Sequential(*layers)
def fixed_decodernw(
num_output_channels=3,
num_channels_up=[128]*5,
need_sigmoid=True,
pad ='reflection',
act_fun=nn.ReLU(), # nn.LeakyReLU(0.2, inplace=True)
bn_affine = True,
bn = True,
mtx = np.array( [[1,2,2,1] , [2,4,4,2], [2,4,4,2], [1,2,2,1] ] )*1/16.,
output_padding = 0,padding=1,
):
num_channels_up = num_channels_up + [num_channels_up[-1],num_channels_up[-1]]
n_scales = len(num_channels_up)
model = nn.Sequential()
for i in range(len(num_channels_up)-2):
# those will be fixed
model.add(conv2( num_channels_up[i], num_channels_up[i], 4, 1, pad=pad))
# those will be learned
model.add(conv( num_channels_up[i], num_channels_up[i+1], 1, 1, pad=pad))
if i != len(num_channels_up)-1:
if act_fun is not None:
model.add(act_fun)
model.add(nn.BatchNorm2d( num_channels_up[i+1], affine=bn_affine))
model.add(conv( num_channels_up[-1], num_output_channels, 1, pad=pad))
if need_sigmoid:
model.add(nn.Sigmoid())
###
# this is a Gaussian kernel
# set filters to fixed and then set the gradients to zero
for m in model.modules():
if isinstance(m, nn.Conv2d):
if(m.kernel_size == mtx.shape):
m.weight.data = set_to(m.weight.data,mtx)
for param in m.parameters():
param.requires_grad = False
###
return model
####
def deconv_decoder(
num_output_channels=3,
num_channels_up=[128]*5,
filter_size=1,
pad ='reflection',
act_fun=nn.ReLU(), # nn.LeakyReLU(0.2, inplace=True)
bn_affine = True,
stride=2,
padding=0,
output_padding=0,
final_conv=False,
):
n_scales = len(num_channels_up)
model = nn.Sequential()
for i in range(len(num_channels_up)-1):
model.add(
nn.ConvTranspose2d(num_channels_up[i], num_channels_up[i+1], filter_size, stride=stride, padding=padding, output_padding=output_padding, groups=1, bias=False, dilation=1)
)
#model.add(deconv(num_channels_up[i], num_channels_up[i+1], filter_size, stride,pad))
if i != len(num_channels_up)-1:
model.add(act_fun)
model.add(nn.BatchNorm2d( num_channels_up[i+1], affine=bn_affine))
if final_conv:
model.add(conv( num_channels_up[-1], num_channels_up[-1], 1, 1, pad=pad))
model.add(act_fun)
model.add(nn.BatchNorm2d( num_channels_up[i+1], affine=bn_affine))
model.add(conv( num_channels_up[-1], num_output_channels, 1, pad=pad))
model.add(nn.Sigmoid())
return model
#####
def fixed_deconv_decoder(
num_output_channels=3,
num_channels_up=[128]*5,
filter_size=1,
pad ='reflection',
act_fun=nn.ReLU(), # nn.LeakyReLU(0.2, inplace=True)
bn_affine = True,
mtx = np.array( [[1,4,7,4,1] , [4,16,26,16,4], [7,26,41,26,7], [4,16,26,16,4], [1,4,7,4,1]] ),
output_padding=1,
padding=2,
):
num_channels_up = num_channels_up + [num_channels_up[-1]]
n_scales = len(num_channels_up)
model = nn.Sequential()
for i in range(len(num_channels_up)-1):
# those will be learned - conv
model.add(conv( num_channels_up[i], num_channels_up[i+1], 1, 1, pad=pad))
# those will be fixed - upsample
model.add( nn.ConvTranspose2d(
num_channels_up[i],
num_channels_up[i+1],
kernel_size=4,
stride=2,
padding=padding,
output_padding=output_padding, groups=1, bias=False, dilation=1) )
if i != len(num_channels_up)-1:
model.add(act_fun)
model.add(nn.BatchNorm2d( num_channels_up[i+1], affine=bn_affine))
model.add(conv( num_channels_up[-1], num_output_channels, 1, pad=pad))
model.add(nn.Sigmoid())
###
# this is a Gaussian kernel
# set filters to fixed and then set the gradients to zero
for m in model.modules():
if isinstance(m, nn.ConvTranspose2d):
if(m.kernel_size == mtx.shape):
m.weight.data = set_to(m.weight.data,mtx)
for param in m.parameters():
param.requires_grad = False
###
return model
| 32.551613
| 182
| 0.591418
| 1,454
| 10,091
| 3.879642
| 0.099725
| 0.138451
| 0.163623
| 0.062046
| 0.822904
| 0.808367
| 0.774863
| 0.75749
| 0.732849
| 0.71849
| 0
| 0.029764
| 0.280844
| 10,091
| 309
| 183
| 32.656958
| 0.747554
| 0.100386
| 0
| 0.644231
| 0
| 0
| 0.012788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.014423
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9616157e8d53581affdd0fa2148bdf433fb0d642
| 87
|
py
|
Python
|
wp2latex/__init__.py
|
bairaelyn/wordpress-to-latex
|
184b66f43ce985d7083fc7c1500806440eaa15ec
|
[
"MIT"
] | null | null | null |
wp2latex/__init__.py
|
bairaelyn/wordpress-to-latex
|
184b66f43ce985d7083fc7c1500806440eaa15ec
|
[
"MIT"
] | 1
|
2021-03-31T19:40:24.000Z
|
2021-03-31T19:40:24.000Z
|
wp2latex/__init__.py
|
bairaelyn/wordpress-to-latex
|
184b66f43ce985d7083fc7c1500806440eaa15ec
|
[
"MIT"
] | null | null | null |
from .parse import extract_blog_from_XML
from . import parse
from . import latexwrite
| 17.4
| 40
| 0.816092
| 13
| 87
| 5.230769
| 0.538462
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149425
| 87
| 4
| 41
| 21.75
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
82b9f90943f1a035b553ace64060109fad23763c
| 10,483
|
py
|
Python
|
test/functions/test_inv_quad_log_det.py
|
julieli/gpytorch
|
21f08b6067a3733ffd9d729a1ce25487976f927e
|
[
"MIT"
] | 1
|
2018-05-30T07:32:29.000Z
|
2018-05-30T07:32:29.000Z
|
test/functions/test_inv_quad_log_det.py
|
julieli/gpytorch
|
21f08b6067a3733ffd9d729a1ce25487976f927e
|
[
"MIT"
] | null | null | null |
test/functions/test_inv_quad_log_det.py
|
julieli/gpytorch
|
21f08b6067a3733ffd9d729a1ce25487976f927e
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import torch
import unittest
import gpytorch
from torch.autograd import Variable
from gpytorch.lazy import NonLazyVariable
from gpytorch.utils import approx_equal
class TestInvQuadLogDetNonBatch(unittest.TestCase):
def setUp(self):
if (
os.getenv("UNLOCK_SEED") is None
or os.getenv("UNLOCK_SEED").lower() == "false"
):
self.rng_state = torch.get_rng_state()
torch.manual_seed(1)
mat = torch.Tensor([[3, -1, 0], [-1, 3, 0], [0, 0, 3]])
vec = torch.randn(3)
vecs = torch.randn(3, 4)
self.mat_var = Variable(mat, requires_grad=True)
self.vec_var = Variable(vec, requires_grad=True)
self.vecs_var = Variable(vecs, requires_grad=True)
self.mat_var_clone = Variable(mat, requires_grad=True)
self.vec_var_clone = Variable(vec, requires_grad=True)
self.vecs_var_clone = Variable(vecs, requires_grad=True)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_inv_quad_log_det_vector(self):
# Forward pass
actual_inv_quad = (
self.mat_var_clone.inverse().matmul(self.vec_var_clone).mul(
self.vec_var_clone
).sum()
)
actual_log_det = self.mat_var_clone.det().log()
with gpytorch.settings.num_trace_samples(1000):
nlv = NonLazyVariable(self.mat_var)
res_inv_quad, res_log_det = nlv.inv_quad_log_det(
inv_quad_rhs=self.vec_var, log_det=True
)
self.assertAlmostEqual(res_inv_quad, actual_inv_quad, places=1)
self.assertAlmostEqual(res_log_det.item(), actual_log_det.item(), places=1)
# Backward
inv_quad_grad_output = torch.Tensor([3])
log_det_grad_output = torch.Tensor([4])
actual_inv_quad.backward(gradient=inv_quad_grad_output)
actual_log_det.backward(log_det_grad_output)
res_inv_quad.backward(gradient=inv_quad_grad_output, retain_graph=True)
res_log_det.backward(gradient=log_det_grad_output)
self.assertTrue(
approx_equal(
self.mat_var_clone.grad.data, self.mat_var.grad.data, epsilon=1e-1
)
)
self.assertTrue(
approx_equal(self.vec_var_clone.grad.data, self.vec_var.grad.data)
)
def test_inv_quad_only_vector(self):
# Forward pass
res = NonLazyVariable(self.mat_var).inv_quad(self.vec_var)
actual = self.mat_var_clone.inverse().matmul(self.vec_var_clone).mul(
self.vec_var_clone
).sum()
self.assertAlmostEqual(res.item(), actual.item(), places=1)
# Backward
inv_quad_grad_output = torch.randn(1)
actual.backward(gradient=inv_quad_grad_output)
res.backward(gradient=inv_quad_grad_output)
self.assertTrue(
approx_equal(
self.mat_var_clone.grad.data, self.mat_var.grad.data, epsilon=1e-1
)
)
self.assertTrue(
approx_equal(self.vec_var_clone.grad.data, self.vec_var.grad.data)
)
def test_inv_quad_log_det_many_vectors(self):
# Forward pass
actual_inv_quad = (
self.mat_var_clone.inverse().matmul(self.vecs_var_clone).mul(
self.vecs_var_clone
).sum()
)
actual_log_det = self.mat_var_clone.det().log()
with gpytorch.settings.num_trace_samples(1000):
nlv = NonLazyVariable(self.mat_var)
res_inv_quad, res_log_det = nlv.inv_quad_log_det(
inv_quad_rhs=self.vecs_var, log_det=True
)
self.assertAlmostEqual(res_inv_quad.item(), actual_inv_quad.item(), places=1)
self.assertAlmostEqual(res_log_det.item(), actual_log_det.item(), places=1)
# Backward
inv_quad_grad_output = torch.Tensor([3])
log_det_grad_output = torch.Tensor([4])
actual_inv_quad.backward(gradient=inv_quad_grad_output)
actual_log_det.backward(log_det_grad_output)
res_inv_quad.backward(gradient=inv_quad_grad_output, retain_graph=True)
res_log_det.backward(gradient=log_det_grad_output)
self.assertTrue(
approx_equal(
self.mat_var_clone.grad.data, self.mat_var.grad.data, epsilon=1e-1
)
)
self.assertTrue(
approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data)
)
def test_inv_quad_only_many_vectors(self):
# Forward pass
res = NonLazyVariable(self.mat_var).inv_quad(self.vecs_var)
actual = self.mat_var_clone.inverse().matmul(self.vecs_var_clone).mul(
self.vecs_var_clone
).sum()
self.assertAlmostEqual(res.item(), actual.item(), places=1)
# Backward
inv_quad_grad_output = torch.randn(1)
actual.backward(gradient=inv_quad_grad_output)
res.backward(gradient=inv_quad_grad_output)
self.assertTrue(
approx_equal(
self.mat_var_clone.grad.data, self.mat_var.grad.data, epsilon=1e-1
)
)
self.assertTrue(
approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data)
)
def test_log_det_only(self):
# Forward pass
with gpytorch.settings.num_trace_samples(1000):
res = NonLazyVariable(self.mat_var).log_det()
actual = self.mat_var_clone.det().log()
self.assertAlmostEqual(res.item(), actual.item(), places=1)
# Backward
grad_output = torch.Tensor([3])
actual.backward(gradient=grad_output)
res.backward(gradient=grad_output)
self.assertTrue(
approx_equal(
self.mat_var_clone.grad.data, self.mat_var.grad.data, epsilon=1e-1
)
)
class TestInvQuadLogDetBatch(unittest.TestCase):
def setUp(self):
if (
os.getenv("UNLOCK_SEED") is None
or os.getenv("UNLOCK_SEED").lower() == "false"
):
self.rng_state = torch.get_rng_state()
torch.manual_seed(1)
mats = torch.Tensor(
[
[[3, -1, 0], [-1, 3, 0], [0, 0, 3]],
[[10, -2, 1], [-2, 10, 0], [1, 0, 10]],
]
)
vecs = torch.randn(2, 3, 4)
self.mats_var = Variable(mats, requires_grad=True)
self.vecs_var = Variable(vecs, requires_grad=True)
self.mats_var_clone = Variable(mats, requires_grad=True)
self.vecs_var_clone = Variable(vecs, requires_grad=True)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_inv_quad_log_det_many_vectors(self):
# Forward pass
actual_inv_quad = torch.cat(
[
self.mats_var_clone[0].inverse().unsqueeze(0),
self.mats_var_clone[1].inverse().unsqueeze(0),
]
).matmul(
self.vecs_var_clone
).mul(
self.vecs_var_clone
).sum(
2
).sum(
1
)
actual_log_det = torch.cat(
[
self.mats_var_clone[0].det().log().unsqueeze(0),
self.mats_var_clone[1].det().log().unsqueeze(0),
]
)
with gpytorch.settings.num_trace_samples(1000):
nlv = NonLazyVariable(self.mats_var)
res_inv_quad, res_log_det = nlv.inv_quad_log_det(
inv_quad_rhs=self.vecs_var, log_det=True
)
self.assertTrue(
approx_equal(res_inv_quad.data, actual_inv_quad.data, epsilon=1e-1)
)
self.assertTrue(
approx_equal(res_log_det.data, actual_log_det.data, epsilon=1e-1)
)
# Backward
inv_quad_grad_output = torch.Tensor([3, 4])
log_det_grad_output = torch.Tensor([4, 2])
actual_inv_quad.backward(gradient=inv_quad_grad_output)
actual_log_det.backward(gradient=log_det_grad_output)
res_inv_quad.backward(gradient=inv_quad_grad_output, retain_graph=True)
res_log_det.backward(gradient=log_det_grad_output)
self.assertTrue(
approx_equal(
self.mats_var_clone.grad.data, self.mats_var.grad.data, epsilon=1e-1
)
)
self.assertTrue(
approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data)
)
def test_inv_quad_only_many_vectors(self):
# Forward pass
res = NonLazyVariable(self.mats_var).inv_quad(self.vecs_var)
actual = torch.cat(
[
self.mats_var_clone[0].inverse().unsqueeze(0),
self.mats_var_clone[1].inverse().unsqueeze(0),
]
).matmul(
self.vecs_var_clone
).mul(
self.vecs_var_clone
).sum(
2
).sum(
1
)
self.assertTrue(approx_equal(res.data, actual.data, epsilon=1e-1))
# Backward
inv_quad_grad_output = torch.randn(2)
actual.backward(gradient=inv_quad_grad_output)
res.backward(gradient=inv_quad_grad_output)
self.assertTrue(
approx_equal(
self.mats_var_clone.grad.data, self.mats_var.grad.data, epsilon=1e-1
)
)
self.assertTrue(
approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data)
)
def test_log_det_only(self):
# Forward pass
with gpytorch.settings.num_trace_samples(1000):
res = NonLazyVariable(self.mats_var).log_det()
actual = torch.cat(
[
self.mats_var_clone[0].det().log().unsqueeze(0),
self.mats_var_clone[1].det().log().unsqueeze(0),
]
)
self.assertTrue(approx_equal(res.data, actual.data, epsilon=1e-1))
# Backward
grad_output = torch.Tensor([3, 4])
actual.backward(gradient=grad_output)
res.backward(gradient=grad_output)
self.assertTrue(
approx_equal(
self.mats_var_clone.grad.data, self.mats_var.grad.data, epsilon=1e-1
)
)
if __name__ == "__main__":
unittest.main()
| 34.483553
| 85
| 0.6087
| 1,332
| 10,483
| 4.465465
| 0.075826
| 0.06002
| 0.04035
| 0.051446
| 0.901648
| 0.893746
| 0.889375
| 0.873067
| 0.841459
| 0.816409
| 0
| 0.016453
| 0.286845
| 10,483
| 303
| 86
| 34.59736
| 0.77916
| 0.016694
| 0
| 0.592
| 0
| 0
| 0.007774
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.048
| false
| 0
| 0.044
| 0
| 0.1
| 0.004
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
82bb62c1468342b44aba31c707b7296a211ce2d6
| 11,519
|
py
|
Python
|
tests/test_conformity.py
|
dcslagel/lascheck
|
4df427a56db14d8e0c64ccea852f1c3e6460e416
|
[
"MIT"
] | 8
|
2020-01-27T19:29:37.000Z
|
2021-10-02T17:55:01.000Z
|
tests/test_conformity.py
|
dcslagel/lascheck
|
4df427a56db14d8e0c64ccea852f1c3e6460e416
|
[
"MIT"
] | 7
|
2020-06-18T11:42:50.000Z
|
2020-09-27T13:22:51.000Z
|
tests/test_conformity.py
|
dcslagel/lascheck
|
4df427a56db14d8e0c64ccea852f1c3e6460e416
|
[
"MIT"
] | 6
|
2020-05-16T14:03:27.000Z
|
2021-08-05T15:00:29.000Z
|
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import logging
import lascheck
from lascheck import spec
test_dir = os.path.dirname(__file__)
readfromexamples = lambda fn: os.path.join(os.path.dirname(__file__), "examples", fn)
logger = logging.getLogger(__name__)
# todo: add test for missing_a_section.las
def test_check_conforming_no_version_section():
las = lascheck.read(readfromexamples("missing_version_section.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ['Missing mandatory sections: [\'~V\']', '~v section not first']
def test_check_conforming_no_well_section():
las = lascheck.read(readfromexamples("missing_well_section.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ['Missing mandatory sections: [\'~W\']',
"Missing mandatory lines in ~w Section"]
def test_check_conforming_no_curves_section():
las = lascheck.read(readfromexamples("missing_curves_section.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ['Missing mandatory sections: [\'~C\']']
def test_check_conforming_no_well_curves_ascii_section():
las = lascheck.read(readfromexamples("missing_well_curves_ascii_section.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ['Missing mandatory sections: [\'~W\', \'~C\', \'~A\']',
'Missing mandatory lines in ~w Section']
def test_check_conforming_no_ascii_section():
las = lascheck.read(readfromexamples("missing_ascii_section.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ['Missing mandatory sections: [\'~A\']']
# Test for a las file containing ~A section but no ~C section
def test_check_ascii_for_no_curves():
las = lascheck.read(readfromexamples("missing_curves_section.las"))
assert spec.AsciiSectionExists.check(las)
assert las.get_non_conformities() == ['Missing mandatory sections: [\'~C\']']
def test_check_no_version():
las = lascheck.read(readfromexamples("missing_vers.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ['Missing mandatory lines in ~v Section']
def test_check_no_wrap():
las = lascheck.read(readfromexamples("missing_wrap.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ['Missing mandatory lines in ~v Section']
def test_check_no_version_section():
las = lascheck.read(readfromexamples("missing_version_section.las"))
assert not spec.MandatoryLinesInVersionSection.check(las)
assert las.get_non_conformities() == ['Missing mandatory sections: [\'~V\']', '~v section not first']
def test_check_no_well_well():
las = lascheck.read(readfromexamples("missing_well_well.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Missing mandatory lines in ~w Section"]
def test_check_no_well_strt():
las = lascheck.read(readfromexamples("missing_well_strt.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Missing mandatory lines in ~w Section"]
def test_check_no_well_stop():
las = lascheck.read(readfromexamples("missing_well_stop.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Missing mandatory lines in ~w Section"]
def test_check_no_well_step():
las = lascheck.read(readfromexamples("missing_well_step.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Missing mandatory lines in ~w Section"]
def test_check_no_well_null():
las = lascheck.read(readfromexamples("missing_well_null.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Missing mandatory lines in ~w Section"]
def test_check_no_well_comp():
las = lascheck.read(readfromexamples("missing_well_comp.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Missing mandatory lines in ~w Section"]
def test_check_no_well_fld():
las = lascheck.read(readfromexamples("missing_well_fld.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Missing mandatory lines in ~w Section"]
def test_check_no_well_loc():
las = lascheck.read(readfromexamples("missing_well_loc.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Missing mandatory lines in ~w Section"]
def test_check_no_well_prov():
las = lascheck.read(readfromexamples("missing_well_prov.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Missing mandatory lines in ~w Section"]
def test_check_no_well_prov_having_cnty():
las = lascheck.read(readfromexamples("missing_well_prov_having_cnty.las"))
assert las.check_conformity()
assert las.get_non_conformities() == []
def test_check_no_well_srvc():
las = lascheck.read(readfromexamples("missing_well_srvc.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Missing mandatory lines in ~w Section"]
def test_check_no_well_date():
las = lascheck.read(readfromexamples("missing_well_date.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Missing mandatory lines in ~w Section"]
def test_check_no_well_uwi():
las = lascheck.read(readfromexamples("missing_well_uwi.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Missing mandatory lines in ~w Section"]
def test_check_no_well_uwi_having_api():
las = lascheck.read(readfromexamples("missing_well_uwi_having_api.las"))
assert las.check_conformity()
assert las.get_non_conformities() == []
def test_check_invalid_start_step():
las = lascheck.read(readfromexamples("sample_invalid_start_step.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ['STRT divided by step is not a whole number']
def test_check_invalid_stop_step():
las = lascheck.read(readfromexamples("sample_invalid_stop_step.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ['STOP divided by step is not a whole number']
def test_check_invalid_step():
las = lascheck.read(readfromexamples("sample_invalid_step.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ['STRT divided by step is not a whole number',
'STOP divided by step is not a whole number']
def test_check_no_well_section():
las = lascheck.read(readfromexamples("missing_well_section.las"))
assert not spec.MandatoryLinesInWellSection.check(las)
def test_check_duplicate_sections():
las = lascheck.read(readfromexamples("sample_duplicate_sections.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ['Duplicate v section',
'Duplicate w section',
'Duplicate c section',
'Duplicate p section',
'Duplicate o section']
def test_check_sections_after_a_section():
las = lascheck.read(readfromexamples("sample_sections_after_a_section.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Sections after ~a section"]
def test_check_sections_after_a_section_2():
las = lascheck.read(readfromexamples("sample_sections_after_a_section_2.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Sections after ~a section"]
def test_check_sections_before_a_section():
las = lascheck.read(readfromexamples("sample_sections_before_a_section.las"))
assert las.check_conformity()
assert las.get_non_conformities() == []
def test_check_valid_mnemonic():
las = lascheck.read(readfromexamples("invalid_index_mnemonic.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["Invalid index mnemonic. "
"The only valid mnemonics for the index channel are DEPT, DEPTH, TIME, or INDEX."]
def test_check_valid_depth_unit():
las = lascheck.read(readfromexamples("invalid_depth_unit.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["If the index is depth, the units must be M (metres), F (feet) or FT (feet)"]
def test_check_valid_depth_unit_mismatch():
las = lascheck.read(readfromexamples("invalid_depth_unit_mismatch.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["If the index is depth, the units must be M (metres), F (feet) or FT (feet)"]
def test_check_v_section_first():
las = lascheck.read(readfromexamples("sample_v_section_second.las"))
assert not las.check_conformity()
assert las.get_non_conformities() == ["~v section not first"]
def test_check_depth_divide_by_step():
las = lascheck.read(readfromexamples("sample.las"))
assert spec.ValidDepthDividedByStep.check(las)
def test_check_blank_line_in_version_section():
las = lascheck.read(readfromexamples("blank_line_in_version_section.las"))
# import pdb; pdb.set_trace()
assert not spec.BlankLineInSection.check(las)
assert las.get_non_conformities() == ["Section ~VERSION having blank line"]
def test_check_blank_line_in_well_section():
las = lascheck.read(readfromexamples("blank_line_in_well_section.las"))
assert not spec.BlankLineInSection.check(las)
assert las.get_non_conformities() == ["Section ~WELL having blank line"]
def test_check_blank_line_in_curve_section():
las = lascheck.read(readfromexamples("blank_line_in_curve_section.las"))
assert not spec.BlankLineInSection.check(las)
assert las.get_non_conformities() == ["Section ~CURVE having blank line"]
def test_check_blank_line_in_parameter_section():
las = lascheck.read(readfromexamples("blank_line_in_parameter_section.las"))
assert not spec.BlankLineInSection.check(las)
assert las.get_non_conformities() == ["Section ~PARAMETER having blank line"]
def test_check_blank_line_in_other_section():
las = lascheck.read(readfromexamples("blank_line_in_other_section.las"))
assert not spec.BlankLineInSection.check(las)
assert las.get_non_conformities() == ["Section ~Other having blank line"]
def test_check_blank_line_in_ascii_section():
las = lascheck.read(readfromexamples("blank_line_in_ascii_section.las"))
assert not spec.BlankLineInSection.check(las)
assert las.get_non_conformities() == ["Section ~A having blank line"]
def test_check_blank_lines_in_two_section():
las = lascheck.read(readfromexamples("blank_line_in_two_sections.las"))
assert not spec.BlankLineInSection.check(las)
assert las.get_non_conformities() == [
"Section ~CURVE having blank line",
"Section ~PARAMETER having blank line"]
def test_check_conforming_positive():
las = lascheck.read(readfromexamples("sample.las"))
assert las.check_conformity()
assert las.get_non_conformities() == []
| 39.72069
| 124
| 0.712649
| 1,457
| 11,519
| 5.323953
| 0.085106
| 0.060333
| 0.068068
| 0.175841
| 0.884878
| 0.841434
| 0.763311
| 0.677324
| 0.612737
| 0.562718
| 0
| 0.000318
| 0.17979
| 11,519
| 289
| 125
| 39.858131
| 0.820703
| 0.011112
| 0
| 0.385417
| 0
| 0.010417
| 0.255271
| 0.091278
| 0
| 0
| 0
| 0.00346
| 0.447917
| 1
| 0.229167
| false
| 0
| 0.020833
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
82c85b6ca689894b1416e36e5fb9b52ebe6dbef7
| 151
|
py
|
Python
|
testflaskbower/views.py
|
fruboes/testflaskbower
|
fbd44e3f279e4476061a8872b9723bac1f1a7bb3
|
[
"BSD-3-Clause"
] | null | null | null |
testflaskbower/views.py
|
fruboes/testflaskbower
|
fbd44e3f279e4476061a8872b9723bac1f1a7bb3
|
[
"BSD-3-Clause"
] | null | null | null |
testflaskbower/views.py
|
fruboes/testflaskbower
|
fbd44e3f279e4476061a8872b9723bac1f1a7bb3
|
[
"BSD-3-Clause"
] | null | null | null |
from testflaskbower import app
from flask import render_template
@app.route("/")
def index_b():
return render_template("bowertest.html")
| 12.583333
| 44
| 0.721854
| 19
| 151
| 5.578947
| 0.736842
| 0.264151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178808
| 151
| 11
| 45
| 13.727273
| 0.854839
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
82cd97420191c9c49fe209e7bdd5306ceb8bb54b
| 63
|
py
|
Python
|
#3 Mundo/#16/tempCodeRunnerFile.py
|
Henrique-Navarro/phyton
|
26d66847afa2b15c254677a36eb22f7558816b59
|
[
"MIT"
] | null | null | null |
#3 Mundo/#16/tempCodeRunnerFile.py
|
Henrique-Navarro/phyton
|
26d66847afa2b15c254677a36eb22f7558816b59
|
[
"MIT"
] | null | null | null |
#3 Mundo/#16/tempCodeRunnerFile.py
|
Henrique-Navarro/phyton
|
26d66847afa2b15c254677a36eb22f7558816b59
|
[
"MIT"
] | null | null | null |
t(input('Digite um numero: '), int(input('Digite um numero: '))
| 63
| 63
| 0.666667
| 10
| 63
| 4.2
| 0.6
| 0.52381
| 0.619048
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 63
| 1
| 63
| 63
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7d72fc17fbbcb156ce9887550f18970a5a0b5707
| 181
|
py
|
Python
|
TWT/apps/challenges/admin.py
|
akionsight/twtcodejam.net
|
a15f16657980e8baf5613ff2d0d339478cb321d1
|
[
"MIT"
] | 3
|
2020-11-17T11:03:57.000Z
|
2021-06-12T12:24:41.000Z
|
TWT/apps/challenges/admin.py
|
akionsight/twtcodejam.net
|
a15f16657980e8baf5613ff2d0d339478cb321d1
|
[
"MIT"
] | null | null | null |
TWT/apps/challenges/admin.py
|
akionsight/twtcodejam.net
|
a15f16657980e8baf5613ff2d0d339478cb321d1
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models.challenge import Challenge
from .models.custom_pages import CustomPage
admin.site.register(Challenge)
admin.site.register(CustomPage)
| 25.857143
| 43
| 0.845304
| 24
| 181
| 6.333333
| 0.5
| 0.131579
| 0.223684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082873
| 181
| 6
| 44
| 30.166667
| 0.915663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7d7d2131224ae5918e8d61b691f46e3a54a88424
| 40
|
py
|
Python
|
tests/test_objs/__init__.py
|
FI18-Trainees/FISocketChat
|
a3c9f9ec502e1b7961716ac4f8ccb14e145e4f86
|
[
"MIT"
] | 4
|
2019-09-19T12:46:52.000Z
|
2019-12-02T13:51:13.000Z
|
tests/test_objs/__init__.py
|
FI18-Trainees/FISocketChat
|
a3c9f9ec502e1b7961716ac4f8ccb14e145e4f86
|
[
"MIT"
] | 102
|
2019-09-20T06:56:15.000Z
|
2021-12-19T23:33:06.000Z
|
tests/test_objs/__init__.py
|
FI18-Trainees/FISocketChat
|
a3c9f9ec502e1b7961716ac4f8ccb14e145e4f86
|
[
"MIT"
] | null | null | null |
from .sockets import SocketIOConnection
| 20
| 39
| 0.875
| 4
| 40
| 8.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.972222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7da6e113b103c58489ecc4b29b5a16946c351dc6
| 93
|
py
|
Python
|
effective_manager/core/tests/test_models.py
|
Gordi91/effective-manager
|
6d57a6de0c333f89b56fca08d5e049ba68d49bc4
|
[
"MIT"
] | null | null | null |
effective_manager/core/tests/test_models.py
|
Gordi91/effective-manager
|
6d57a6de0c333f89b56fca08d5e049ba68d49bc4
|
[
"MIT"
] | null | null | null |
effective_manager/core/tests/test_models.py
|
Gordi91/effective-manager
|
6d57a6de0c333f89b56fca08d5e049ba68d49bc4
|
[
"MIT"
] | null | null | null |
import unittest
def sample_user(email='test@gmail.com', password='testpass123'):
pass
| 13.285714
| 64
| 0.731183
| 12
| 93
| 5.583333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0375
| 0.139785
| 93
| 6
| 65
| 15.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.271739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.666667
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
7daf0d4906ee9649a0a09e5e530f4f677fa83560
| 23
|
py
|
Python
|
pybot/strings/__init__.py
|
NEK-RA/py-runner-bot
|
37d425da1a7210be54b0e433a5fdd38793a64d6a
|
[
"MIT"
] | 1
|
2019-08-13T18:49:27.000Z
|
2019-08-13T18:49:27.000Z
|
pybot/strings/__init__.py
|
NEK-RA/py-runner-bot
|
37d425da1a7210be54b0e433a5fdd38793a64d6a
|
[
"MIT"
] | 1
|
2020-03-27T12:53:11.000Z
|
2020-04-25T15:36:28.000Z
|
pybot/strings/__init__.py
|
NEK-RA/py-runner-bot
|
37d425da1a7210be54b0e433a5fdd38793a64d6a
|
[
"MIT"
] | 3
|
2019-09-20T18:47:24.000Z
|
2020-06-04T12:06:49.000Z
|
from .strings import *
| 11.5
| 22
| 0.73913
| 3
| 23
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7dd4285115b8a1b5033b857a358bf89b789b2a2d
| 3,782
|
py
|
Python
|
test/programytest/aiml_tests/hash_tests/test_hash_aiml.py
|
motazsaad/fit-bot-fb-clt
|
580477aa1ec91855b621d9ae276f2705962f6a87
|
[
"MIT"
] | null | null | null |
test/programytest/aiml_tests/hash_tests/test_hash_aiml.py
|
motazsaad/fit-bot-fb-clt
|
580477aa1ec91855b621d9ae276f2705962f6a87
|
[
"MIT"
] | null | null | null |
test/programytest/aiml_tests/hash_tests/test_hash_aiml.py
|
motazsaad/fit-bot-fb-clt
|
580477aa1ec91855b621d9ae276f2705962f6a87
|
[
"MIT"
] | 4
|
2019-04-01T15:42:23.000Z
|
2020-11-05T08:14:27.000Z
|
import unittest
import os
from programytest.client import TestClient
class HashAIMLTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_storage(self):
super(HashAIMLTestClient, self).load_storage()
self.add_default_stores()
self.add_categories_store([os.path.dirname(__file__)])
class HashAIMLTests(unittest.TestCase):
def setUp(self):
client = HashAIMLTestClient()
self._client_context = client.create_client_context("testid")
def test_hash_first_word(self):
response = self._client_context.bot.ask_question(self._client_context, "SAY HEY")
self.assertIsNotNone(response)
self.assertEqual(response, 'HASH IS SAY.')
def test_hash_first_no_word(self):
response = self._client_context.bot.ask_question(self._client_context, "HEY")
self.assertIsNotNone(response)
self.assertEqual(response, 'HASH IS.')
def test_hash_first_multi_word(self):
response = self._client_context.bot.ask_question(self._client_context, "WE SAY HEY")
self.assertIsNotNone(response)
self.assertEqual(response, 'HASH IS WE SAY.')
def test_hash_last_word(self):
response = self._client_context.bot.ask_question(self._client_context, "HELLO YOU")
self.assertIsNotNone(response)
self.assertEqual(response, 'HASH IS YOU.')
def test_hash_no_word(self):
response = self._client_context.bot.ask_question(self._client_context, "HELLO")
self.assertIsNotNone(response)
self.assertEqual(response, 'HASH IS.')
def test_hash_no_multi_word(self):
response = self._client_context.bot.ask_question(self._client_context, "HELLO YOU THERE")
self.assertIsNotNone(response)
self.assertEqual(response, 'HASH IS YOU THERE.')
def test_hash_middle_word(self):
response = self._client_context.bot.ask_question(self._client_context, "WELL HI THERE")
self.assertIsNotNone(response)
self.assertEqual(response, 'HASH IS HI.')
def test_hash_middle_no_word(self):
response = self._client_context.bot.ask_question(self._client_context, "WELL THERE")
self.assertIsNotNone(response)
self.assertEqual(response, 'HASH IS.')
def test_hash_middle_multi_word(self):
response = self._client_context.bot.ask_question(self._client_context, "WELL I WAS THERE")
self.assertIsNotNone(response)
self.assertEqual(response, 'HASH IS I WAS.')
def test_hash_middle_and_end(self):
response = self._client_context.bot.ask_question(self._client_context, "ARE YOU FUN")
self.assertIsNotNone(response)
self.assertEqual(response, 'I AM FUNNY.')
response = self._client_context.bot.ask_question(self._client_context, "DO FUN YOU")
self.assertIsNotNone(response)
self.assertEqual(response, 'I AM FUNNY.')
def test_test1_testx_issue(self):
response = self._client_context.bot.ask_question(self._client_context, "TEST1 TESTX")
self.assertIsNotNone(response)
self.assertEqual(response, 'Answer 1.')
def test_test1_test2_issue(self):
response = self._client_context.bot.ask_question(self._client_context, "TEST1 TEST2")
self.assertIsNotNone(response)
self.assertEqual(response, 'Answer 2.')
def test_test3_issue(self):
response = self._client_context.bot.ask_question(self._client_context, "TEST3")
self.assertIsNotNone(response)
self.assertEqual(response, 'Answer 3.')
def test_test1_issue(self):
response = self._client_context.bot.ask_question(self._client_context, "TEST1")
self.assertIsNotNone(response)
self.assertEqual(response, 'Answer 1.')
| 38.989691
| 98
| 0.710206
| 458
| 3,782
| 5.552402
| 0.152838
| 0.125836
| 0.207236
| 0.147464
| 0.764451
| 0.764451
| 0.764451
| 0.719229
| 0.6744
| 0.529296
| 0
| 0.004551
| 0.186674
| 3,782
| 96
| 99
| 39.395833
| 0.822172
| 0
| 0
| 0.30137
| 0
| 0
| 0.082232
| 0
| 0
| 0
| 0
| 0
| 0.410959
| 1
| 0.232877
| false
| 0
| 0.041096
| 0
| 0.30137
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c4a313ecf741745eef315af916b95309a108468a
| 71
|
py
|
Python
|
logcabin/inputs/input.py
|
srault95/logcabin
|
27f0e589284923d49e9255494bb1e7c23da82ca2
|
[
"Apache-2.0"
] | null | null | null |
logcabin/inputs/input.py
|
srault95/logcabin
|
27f0e589284923d49e9255494bb1e7c23da82ca2
|
[
"Apache-2.0"
] | null | null | null |
logcabin/inputs/input.py
|
srault95/logcabin
|
27f0e589284923d49e9255494bb1e7c23da82ca2
|
[
"Apache-2.0"
] | null | null | null |
from ..common import SpawnedStage
class Input(SpawnedStage):
pass
| 14.2
| 33
| 0.760563
| 8
| 71
| 6.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169014
| 71
| 4
| 34
| 17.75
| 0.915254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
c4c850b9cb4b5f1428e956d91850c9db2696861b
| 93
|
py
|
Python
|
fluentmail/__init__.py
|
shinvdu/fluentmail
|
f785ad650145af175b1cf152fd9bc33cf37858a4
|
[
"MIT"
] | 40
|
2015-02-16T12:15:20.000Z
|
2021-08-18T18:51:58.000Z
|
fluentmail/__init__.py
|
shinvdu/fluentmail
|
f785ad650145af175b1cf152fd9bc33cf37858a4
|
[
"MIT"
] | 7
|
2015-03-31T17:09:37.000Z
|
2018-12-08T21:01:03.000Z
|
fluentmail/__init__.py
|
shinvdu/fluentmail
|
f785ad650145af175b1cf152fd9bc33cf37858a4
|
[
"MIT"
] | 13
|
2015-01-17T06:31:15.000Z
|
2020-11-25T14:19:03.000Z
|
# -*- coding: utf-8 -*-
from .backends import *
from .message import *
from .utils import *
| 15.5
| 23
| 0.645161
| 12
| 93
| 5
| 0.666667
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013333
| 0.193548
| 93
| 5
| 24
| 18.6
| 0.786667
| 0.225806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c4cfdb5e295347b3ad7b872feb7fe31f79a2764c
| 120
|
py
|
Python
|
typhoon/contrib/transformations/files.py
|
typhoon-data-org/typhoon-orchestrator
|
f24c4807b0e1ee38713ba1468db761119724dcf2
|
[
"Apache-2.0"
] | 21
|
2021-04-10T20:57:49.000Z
|
2022-03-24T06:45:30.000Z
|
typhoon/contrib/transformations/files.py
|
biellls/typhoon-orchestrator
|
f8e40c905fda8143562fc286359233f29d30c6bb
|
[
"Apache-2.0"
] | 7
|
2021-11-06T16:10:41.000Z
|
2021-12-12T11:41:32.000Z
|
typhoon/contrib/transformations/files.py
|
biellls/typhoon-orchestrator
|
f8e40c905fda8143562fc286359233f29d30c6bb
|
[
"Apache-2.0"
] | 1
|
2022-03-08T21:22:33.000Z
|
2022-03-08T21:22:33.000Z
|
from pathlib import Path
from typing import Union
def name(path: Union[str, Path]) -> str:
return Path(path).name
| 17.142857
| 40
| 0.716667
| 19
| 120
| 4.526316
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183333
| 120
| 6
| 41
| 20
| 0.877551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
c4e457fe6997260d8fe4dc647582bd2e7a250138
| 32
|
py
|
Python
|
10个编程技巧/lambda.py
|
shao1chuan/pythonbook
|
cd9877d04e1e11422d38cc051e368d3d9ce2ab45
|
[
"MulanPSL-1.0"
] | 95
|
2020-10-11T04:45:46.000Z
|
2022-02-25T01:50:40.000Z
|
10个编程技巧/lambda.py
|
shao1chuan/pythonbook
|
cd9877d04e1e11422d38cc051e368d3d9ce2ab45
|
[
"MulanPSL-1.0"
] | null | null | null |
10个编程技巧/lambda.py
|
shao1chuan/pythonbook
|
cd9877d04e1e11422d38cc051e368d3d9ce2ab45
|
[
"MulanPSL-1.0"
] | 30
|
2020-11-05T09:01:00.000Z
|
2022-03-08T05:58:55.000Z
|
p = lambda x,y:x+y
print(p(4,6))
| 16
| 18
| 0.59375
| 10
| 32
| 1.9
| 0.7
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0.125
| 32
| 2
| 19
| 16
| 0.607143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
c4f41658f632aa8adae489184b22402490ff9035
| 6,877
|
py
|
Python
|
skills/dff_friendship_skill/scenario/weekend_response.py
|
deepmipt/assistant-base
|
ab2dcbdc4c31cd52ca336022003bf69fa80c9189
|
[
"Apache-2.0"
] | 7
|
2020-12-07T19:59:16.000Z
|
2020-12-24T16:20:47.000Z
|
skills/dff_friendship_skill/scenario/weekend_response.py
|
deepmipt/assistant-base
|
ab2dcbdc4c31cd52ca336022003bf69fa80c9189
|
[
"Apache-2.0"
] | null | null | null |
skills/dff_friendship_skill/scenario/weekend_response.py
|
deepmipt/assistant-base
|
ab2dcbdc4c31cd52ca336022003bf69fa80c9189
|
[
"Apache-2.0"
] | null | null | null |
import logging
import random
import sentry_sdk
from os import getenv
from typing import Tuple
import common.dff.integration.condition as int_cnd
import common.dff.integration.context as int_ctx
import common.greeting as common_greeting
import common.scenarios.weekend as common_weekend
from common.constants import CAN_CONTINUE_SCENARIO
from df_engine.core import Actor, Context
sentry_sdk.init(getenv("SENTRY_DSN"))
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
REPLY_TYPE = Tuple[str, float, dict, dict, dict]
DIALOG_BEGINNING_START_CONFIDENCE = 0.98
DIALOG_BEGINNING_CONTINUE_CONFIDENCE = 0.9
DIALOG_BEGINNING_SHORT_ANSWER_CONFIDENCE = 0.98
MIDDLE_DIALOG_START_CONFIDENCE = 0.7
SUPER_CONFIDENCE = 1.0
HIGH_CONFIDENCE = 0.98
MIDDLE_CONFIDENCE = 0.95
GREETING_STEPS = list(common_greeting.GREETING_QUESTIONS)
def std_weekend_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor)
# obtaining random response from weekend questions
body = random.choice(common_weekend.WEEKEND_QUESTIONS)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_START_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_cleaned_up_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor)
# obtaining random response from weekend questions
body = random.choice(common_weekend.CLEANED_UP_STATEMENTS)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_slept_in_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor)
# obtaining random response from weekend questions
body = random.choice(common_weekend.SLEPT_IN_QUESTIONS)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_START_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_feel_great_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor)
# obtaining random response from weekend questions
body = random.choice(common_weekend.WHAT_PLANS_FOR_TODAY)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_need_more_time_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor)
# obtaining random response from weekend questions
body = random.choice(common_weekend.WISH_MORE_TIME)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_watched_film_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor)
# obtaining random response from weekend questions
body = random.choice(common_weekend.MOVIE_NAME_QUESTION)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_read_book_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor)
# obtaining random response from weekend questions
body = random.choice(common_weekend.BOOK_NAME_QUESTION)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_played_computer_game_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor)
# obtaining random response from weekend questions
body = random.choice(common_weekend.COMPUTER_GAME_NAME_QUESTION)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_play_on_weekends_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor)
# obtaining random response from weekend questions
body = random.choice(common_weekend.GAME_EMOTIONS_QUESTION)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_play_regularly_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor)
# obtaining random response from weekend questions
body = random.choice(common_weekend.REGULAR_PLAYER_QUESTION)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_play_once_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor)
# obtaining random response from weekend questions
body = random.choice(common_weekend.OCCASIONAL_PLAYER_QUESTION)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
| 35.632124
| 102
| 0.777083
| 951
| 6,877
| 5.256572
| 0.126183
| 0.070414
| 0.070414
| 0.083617
| 0.812362
| 0.812362
| 0.812362
| 0.812362
| 0.812362
| 0.812362
| 0
| 0.003051
| 0.142068
| 6,877
| 192
| 103
| 35.817708
| 0.844237
| 0.124618
| 0
| 0.55
| 0
| 0
| 0.012195
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.11
| false
| 0
| 0.11
| 0
| 0.33
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c4f6e9bf3877ed7267d66a6c1ea2049800044709
| 35
|
py
|
Python
|
electrumsv_sdk/builtin_components/status_monitor/__init__.py
|
electrumsv/electrumsv-sdk
|
2d4b9474b2e2fc5518bba10684c5d5130ffb6328
|
[
"OML"
] | 4
|
2020-07-06T12:13:14.000Z
|
2021-07-29T12:45:27.000Z
|
electrumsv_sdk/builtin_components/status_monitor/__init__.py
|
electrumsv/electrumsv-sdk
|
2d4b9474b2e2fc5518bba10684c5d5130ffb6328
|
[
"OML"
] | 62
|
2020-07-04T04:50:27.000Z
|
2021-08-19T21:06:10.000Z
|
electrumsv_sdk/builtin_components/status_monitor/__init__.py
|
electrumsv/electrumsv-sdk
|
2d4b9474b2e2fc5518bba10684c5d5130ffb6328
|
[
"OML"
] | 3
|
2021-01-21T09:22:45.000Z
|
2021-06-12T10:16:03.000Z
|
from .status_monitor import Plugin
| 17.5
| 34
| 0.857143
| 5
| 35
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f21968a813d4ba036357a8bfb4d6998864f551e4
| 6,868
|
py
|
Python
|
sodium/model/model.py
|
Keerthi001/PySodium
|
761598d8a129ce95a42404898b7f16ddcae568d9
|
[
"MIT"
] | 22
|
2020-05-16T08:15:48.000Z
|
2021-12-30T14:38:31.000Z
|
sodium/model/model.py
|
Keerthi001/PySodium
|
761598d8a129ce95a42404898b7f16ddcae568d9
|
[
"MIT"
] | 1
|
2020-09-07T17:10:41.000Z
|
2020-09-09T20:51:31.000Z
|
sodium/model/model.py
|
Keerthi001/PySodium
|
761598d8a129ce95a42404898b7f16ddcae568d9
|
[
"MIT"
] | 43
|
2020-03-07T22:08:41.000Z
|
2022-03-16T21:07:30.000Z
|
import torch.nn as nn
import torch.nn.functional as F
from sodium.utils import setup_logger
from sodium.base import BaseModel
logger = setup_logger(__name__)
class MNISTModel(BaseModel):
def __init__(self, dropout_value=0.08):
self.dropout_value = dropout_value # dropout value
super(MNISTModel, self).__init__()
# Input Block
self.convblock1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=14,
kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(14),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 26
# CONVOLUTION BLOCK 1
self.convblock2 = nn.Sequential(
nn.Conv2d(in_channels=14, out_channels=30,
kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(30),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 24
# TRANSITION BLOCK 1
self.convblock3 = nn.Sequential(
nn.Conv2d(in_channels=30, out_channels=10,
kernel_size=(1, 1), padding=0, bias=False),
) # output_size = 24
self.pool1 = nn.MaxPool2d(2, 2) # output_size = 12
# CONVOLUTION BLOCK 2
self.convblock4 = nn.Sequential(
nn.Conv2d(in_channels=10, out_channels=14,
kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(14),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 10
self.convblock5 = nn.Sequential(
nn.Conv2d(in_channels=14, out_channels=15,
kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(15),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 8
self.convblock6 = nn.Sequential(
nn.Conv2d(in_channels=15, out_channels=15,
kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(15),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 6
# OUTPUT BLOCK
self.gap = nn.Sequential(
nn.AvgPool2d(kernel_size=6)
) # output_size = 1
self.convblock7 = nn.Sequential(
nn.Conv2d(in_channels=15, out_channels=15,
kernel_size=(1, 1), padding=0, bias=False),
nn.ReLU(),
nn.BatchNorm2d(15),
nn.Dropout(self.dropout_value)
)
self.convblock8 = nn.Sequential(
nn.Conv2d(in_channels=15, out_channels=10,
kernel_size=(1, 1), padding=0, bias=False),
)
self.dropout = nn.Dropout(self.dropout_value)
def forward(self, x):
x = self.convblock1(x)
x = self.convblock2(x)
x = self.convblock3(x)
x = self.pool1(x)
x = self.convblock4(x)
x = self.convblock5(x)
x = self.convblock6(x)
x = self.gap(x)
x = self.convblock7(x)
x = self.convblock8(x)
x = x.view(-1, 10)
return F.log_softmax(x, dim=-1)
class CIFAR10Model(BaseModel):
def __init__(self, dropout_value=0.25):
self.dropout_value = dropout_value # dropout value
super(CIFAR10Model, self).__init__()
# Input Block
self.convblock1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32,
kernel_size=(3, 3), padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 32
# CONVOLUTION BLOCK 1
self.convblock2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64,
kernel_size=(3, 3), padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 32
# TRANSITION BLOCK 1
self.convblock3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=32,
kernel_size=(1, 1), padding=0, bias=False),
) # output_size = 32
self.pool1 = nn.MaxPool2d(2, 2) # output_size = 16
# CONVOLUTION BLOCK 2
# DEPTHWISE CONVOLUTION AND POINTWISE CONVOLUTION
self.depthwise1 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64,
kernel_size=(3, 3), padding=0, groups=32, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 16
self.convblock4 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=(1, 1), padding=0, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 16
# TRANSITION BLOCK 2
self.pool2 = nn.MaxPool2d(2, 2) # output_size = 8
# CONVOLUTION BLOCK 3
self.convblock5 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=(3, 3), padding=4, dilation=2, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 11
self.convblock6 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=(3, 3), padding=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Dropout(self.dropout_value)
) # output_size = 11
# TRANSITION BLOCK 3
self.pool3 = nn.MaxPool2d(2, 2) # output_size = 5
# OUTPUT BLOCK
self.gap = nn.Sequential(
nn.AvgPool2d(kernel_size=5)
) # output_size = 1
self.convblock7 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=(1, 1), padding=0, bias=False),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Dropout(self.dropout_value)
)
self.convblock8 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=10,
kernel_size=(1, 1), padding=0, bias=False),
)
self.dropout = nn.Dropout(self.dropout_value)
def forward(self, x):
x = self.convblock1(x)
x = self.convblock2(x)
x = self.convblock3(x)
x = self.pool1(x)
x = self.depthwise1(x)
x = self.convblock4(x)
x = self.pool2(x)
x = self.convblock5(x)
x = self.convblock6(x)
x = self.pool3(x)
x = self.gap(x)
x = self.convblock7(x)
x = self.convblock8(x)
x = x.view(-1, 10)
return F.log_softmax(x, dim=-1)
| 32.704762
| 77
| 0.541206
| 819
| 6,868
| 4.395604
| 0.108669
| 0.015
| 0.038333
| 0.094444
| 0.884444
| 0.880278
| 0.867222
| 0.84
| 0.765556
| 0.727778
| 0
| 0.067658
| 0.343623
| 6,868
| 209
| 78
| 32.861244
| 0.730923
| 0.089983
| 0
| 0.677019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024845
| false
| 0
| 0.024845
| 0
| 0.074534
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1ef9c75b473c06d95ce1421d1f9c9a0d6760be20
| 39
|
py
|
Python
|
exchanges/twse/handlers/__init__.py
|
shwang-bk/fin4crawl
|
3c86add7c30817b1d739e510c321f631a43b9c71
|
[
"MIT"
] | 1
|
2020-03-26T14:46:55.000Z
|
2020-03-26T14:46:55.000Z
|
exchanges/twse/handlers/__init__.py
|
shwang-bk/fin4crawl
|
3c86add7c30817b1d739e510c321f631a43b9c71
|
[
"MIT"
] | null | null | null |
exchanges/twse/handlers/__init__.py
|
shwang-bk/fin4crawl
|
3c86add7c30817b1d739e510c321f631a43b9c71
|
[
"MIT"
] | 1
|
2021-04-10T00:53:14.000Z
|
2021-04-10T00:53:14.000Z
|
from .branch import StockBranchHandler
| 19.5
| 38
| 0.871795
| 4
| 39
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
480dac83956f4619481adb62eb504c55d6e3023d
| 249
|
py
|
Python
|
tests/test_service/__init__.py
|
amih90/bacpypes
|
27ab4f18aa252ceb6ffdc32d53af2995a2e92647
|
[
"MIT"
] | 240
|
2015-07-17T16:27:54.000Z
|
2022-03-29T13:53:06.000Z
|
tests/test_service/__init__.py
|
amih90/bacpypes
|
27ab4f18aa252ceb6ffdc32d53af2995a2e92647
|
[
"MIT"
] | 400
|
2015-07-23T05:37:52.000Z
|
2022-03-29T12:32:30.000Z
|
tests/test_service/__init__.py
|
amih90/bacpypes
|
27ab4f18aa252ceb6ffdc32d53af2995a2e92647
|
[
"MIT"
] | 143
|
2015-07-17T18:22:27.000Z
|
2022-03-22T01:21:24.000Z
|
#!/usr/bin/python
"""
Test Services
"""
from . import test_cov
from . import test_cov_av
from . import test_cov_bv
from . import test_cov_pc
from . import test_device
from . import test_device_2
from . import test_file
from . import test_object
| 14.647059
| 27
| 0.759036
| 41
| 249
| 4.317073
| 0.365854
| 0.451977
| 0.632768
| 0.384181
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004808
| 0.164659
| 249
| 16
| 28
| 15.5625
| 0.846154
| 0.120482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4883314efc2c59871df1db49f568219542a48870
| 2,471
|
py
|
Python
|
tests/unit/butterfree/dataframe_service/test_repartition.py
|
fossabot/butterfree
|
8a7da8c540b51c6560b2825cb926c40a351f202b
|
[
"Apache-2.0"
] | 208
|
2020-07-17T18:46:10.000Z
|
2022-03-21T12:44:12.000Z
|
tests/unit/butterfree/dataframe_service/test_repartition.py
|
fossabot/butterfree
|
8a7da8c540b51c6560b2825cb926c40a351f202b
|
[
"Apache-2.0"
] | 124
|
2020-07-17T19:42:47.000Z
|
2021-07-21T00:38:05.000Z
|
tests/unit/butterfree/dataframe_service/test_repartition.py
|
fossabot/butterfree
|
8a7da8c540b51c6560b2825cb926c40a351f202b
|
[
"Apache-2.0"
] | 30
|
2020-07-17T20:24:09.000Z
|
2022-03-17T00:50:37.000Z
|
from pyspark.sql.functions import spark_partition_id
from butterfree.dataframe_service import repartition_df, repartition_sort_df
class TestRepartition:
def test_repartition_df(self, input_df):
result_df = repartition_df(dataframe=input_df, partition_by=["timestamp"])
# Only one partition id, meaning data is not partitioned
assert input_df.select(spark_partition_id()).distinct().count() == 1
# Desired number of partitions
assert result_df.select(spark_partition_id()).distinct().count() == 200
def test_repartition_df_partitions(self, input_df):
result_df = repartition_df(
dataframe=input_df, partition_by=["timestamp"], num_partitions=50
)
# Only one partition id, meaning data is not partitioned
assert input_df.select(spark_partition_id()).distinct().count() == 1
# Desired number of partitions
assert result_df.select(spark_partition_id()).distinct().count() == 50
def test_repartition_sort_df(self, input_df):
result_df = repartition_sort_df(
dataframe=input_df, partition_by=["timestamp"], order_by=["timestamp"]
)
# Only one partition id, meaning data is not partitioned
assert input_df.select(spark_partition_id()).distinct().count() == 1
# Desired number of partitions
assert result_df.select(spark_partition_id()).distinct().count() == 200
def test_repartition_sort_df_processors(self, input_df):
result_df = repartition_sort_df(
dataframe=input_df,
partition_by=["timestamp"],
order_by=["timestamp"],
num_processors=3,
)
# Only one partition id, meaning data is not partitioned
assert input_df.select(spark_partition_id()).distinct().count() == 1
# Desired number of partitions
assert result_df.select(spark_partition_id()).distinct().count() == 12
def test_repartition_sort_df_processors_partitions(self, input_df):
result_df = repartition_sort_df(
dataframe=input_df,
partition_by=["timestamp"],
order_by=["timestamp"],
num_partitions=50,
)
# Only one partition id, meaning data is not partitioned
assert input_df.select(spark_partition_id()).distinct().count() == 1
# Desired number of partitions
assert result_df.select(spark_partition_id()).distinct().count() == 50
| 41.183333
| 82
| 0.674221
| 298
| 2,471
| 5.298658
| 0.154362
| 0.111463
| 0.111463
| 0.139329
| 0.896137
| 0.882204
| 0.850538
| 0.835339
| 0.835339
| 0.835339
| 0
| 0.011506
| 0.226224
| 2,471
| 59
| 83
| 41.881356
| 0.814331
| 0.169567
| 0
| 0.486486
| 0
| 0
| 0.035277
| 0
| 0
| 0
| 0
| 0
| 0.27027
| 1
| 0.135135
| false
| 0
| 0.054054
| 0
| 0.216216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f89a6d91f2bcdda2debdef85d393053dde424e6
| 4,034
|
py
|
Python
|
pojson/test_convert.py
|
cederigo/pojson
|
c3c64cac712f8c70fc12f50463ac4d6f1499db93
|
[
"BSD-3-Clause"
] | null | null | null |
pojson/test_convert.py
|
cederigo/pojson
|
c3c64cac712f8c70fc12f50463ac4d6f1499db93
|
[
"BSD-3-Clause"
] | null | null | null |
pojson/test_convert.py
|
cederigo/pojson
|
c3c64cac712f8c70fc12f50463ac4d6f1499db93
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from pojson import convert, po2dict
import polib
def test_po2dict():
po = polib.POFile()
po.metadata = {}
entry = polib.POEntry(
msgid=u'Hello world',
msgstr=u'Hallo wereld')
po.append(entry)
result = po2dict(po)
assert result == {'': {}, u'Hello world': [None, u'Hallo wereld']}
def test_po2dict_with_metadata():
po = polib.POFile()
po.metadata = {'Project-Id-Version': '1.0'}
entry = polib.POEntry(
msgid=u'Hello world',
msgstr=u'Hallo wereld')
po.append(entry)
result = po2dict(po)
assert result == {'': {'Project-Id-Version': '1.0'},
'Hello world': [None, u'Hallo wereld']}
def test_convert(tmpdir):
po = polib.POFile()
po.metadata = {}
entry = polib.POEntry(
msgid=u'Hello world',
msgstr=u'Hallo wereld')
po.append(entry)
path = tmpdir.join('test.po').strpath
po.save(path)
result = convert('foo', path)
# XXX dependent on default key sorting of simplejson
assert result == '{"foo": {"": {}, "Hello world": [null, "Hallo wereld"]}}'
def test_convert_detect_encoding(tmpdir):
po = polib.POFile()
po.metadata = {}
entry = polib.POEntry(
msgid=u'One',
msgstr=u'Eén')
po.append(entry)
path = tmpdir.join('test.po').strpath
po.save(path)
result = convert('foo', path)
# XXX dependent on default key sorting of simplejson
assert result == u'{"foo": {"": {}, "One": [null, "Eén"]}}'
def test_convert_explicit_encoding(tmpdir):
po = polib.POFile()
po.metadata = {}
entry = polib.POEntry(
msgid=u'One',
msgstr=u'Eén')
po.append(entry)
path = tmpdir.join('test.po').strpath
po.save(path)
result = convert('foo', path, encoding='utf-8')
# XXX dependent on default key sorting of simplejson
assert result == u'{"foo": {"": {}, "One": [null, "Eén"]}}'
def test_convert_pretty_print(tmpdir):
po = polib.POFile()
po.metadata = {}
entry = polib.POEntry(
msgid=u'One',
msgstr=u'Een')
po.append(entry)
path = tmpdir.join('test.po').strpath
po.save(path)
result = convert('foo', path, pretty_print=True)
# XXX apparently different versions of simplejson use different
# pretty printing algorithms, so this may break
assert result == u'''\
{
"foo": {
"": {},
"One": [
null,
"Een"
]
}
}'''
def test_convert_javascript(tmpdir):
po = polib.POFile()
po.metadata = {}
entry = polib.POEntry(
msgid=u'One',
msgstr=u'Een')
po.append(entry)
path = tmpdir.join('test.po').strpath
po.save(path)
result = convert('foo', path, encoding='utf-8',
js=True)
assert result == u'var json_locale_data = {"foo": {"": {}, "One": [null, "Een"]}};'
def test_convert_javascript_prettyprint(tmpdir):
po = polib.POFile()
po.metadata = {}
entry = polib.POEntry(
msgid=u'One',
msgstr=u'Een')
po.append(entry)
path = tmpdir.join('test.po').strpath
po.save(path)
result = convert('foo', path, encoding='utf-8',
js=True, pretty_print=True)
# XXX apparently different versions of simplejson use different
# pretty printing algorithms, so this may break
assert result == u'''\
var json_locale_data = {
"foo": {
"": {},
"One": [
null,
"Een"
]
}
};'''
def pytest_funcarg__nl_po(request):
p = os.path.join(
os.path.dirname(request.module.__file__), 'testdata', 'nl.po')
return polib.pofile(p)
def test_po2dict_with_plural(nl_po):
result = po2dict(nl_po)
values = result["1 field did not validate"]
assert values == [u'%1 fields did not validate',
u'1 veld kon niet gevalideerd worden',
u'%1 velden konden niet gevalideerd worden']
| 24.597561
| 87
| 0.571145
| 493
| 4,034
| 4.596349
| 0.200811
| 0.027802
| 0.045896
| 0.052957
| 0.815975
| 0.775816
| 0.775816
| 0.775816
| 0.721094
| 0.721094
| 0
| 0.006509
| 0.276401
| 4,034
| 163
| 88
| 24.748466
| 0.769784
| 0.096678
| 0
| 0.669492
| 0
| 0
| 0.227235
| 0
| 0
| 0
| 0
| 0
| 0.076271
| 1
| 0.084746
| false
| 0
| 0.025424
| 0
| 0.118644
| 0.033898
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6faceab6d8e0c488980f8e5d6e78feb126d2c2c0
| 7,202
|
py
|
Python
|
sshportal_api/resources/userroles.py
|
Whyrl35/sshportal-api
|
1aa437e746f01ba1df48de904466f91a103a11ad
|
[
"MIT"
] | 1
|
2021-07-06T21:11:06.000Z
|
2021-07-06T21:11:06.000Z
|
sshportal_api/resources/userroles.py
|
Whyrl35/sshportal-api
|
1aa437e746f01ba1df48de904466f91a103a11ad
|
[
"MIT"
] | null | null | null |
sshportal_api/resources/userroles.py
|
Whyrl35/sshportal-api
|
1aa437e746f01ba1df48de904466f91a103a11ad
|
[
"MIT"
] | null | null | null |
from flask_restful import Resource
from flask_jwt_extended import jwt_required
from sshportal_api import api
from sshportal_api.models import UserModel, UserRolesModel, UserUserRolesModel
from flask_restful_swagger_3 import swagger
class UserRoles(Resource):
@swagger.doc({
'security': [{'bearerAuth': []}],
'tags': ['userroles'],
'description': "Return list of userroles",
'responses': {
'200': {
'description': "A list of userroles",
'content': {
'application/json': {
'examples': {
'application/json': [
{
"id": 1,
"created_at": "2019-11-13T16:08:45.263598",
"updated_at": "2019-11-13T16:31:34.940968",
"deleted_at": None,
"name": "admin",
"users": [
{
"id": 1,
"name": "admin",
"password": None,
"created_at": "2019-11-13T16:08:45.490830",
"updated_at": "2019-11-13T16:39:21.114928",
"deleted_at": None,
"is_admin": None,
"email": "admin@localhost",
"comment": "created by sshportal",
"invite_token": "711pl82xwUJ4CCFw"
}
]
}
]
}
}
}
}
}
})
@jwt_required
def get(self):
userroles_json = []
userroles = UserRolesModel.return_all()
for userrole in userroles:
userroles_part = UserRolesModel.to_json(userrole)
useruserroles = UserUserRolesModel.by_user_role_id(userrole.id)
userroles_part['users'] = []
for uur in useruserroles:
user = UserModel.by_id(uur.user_id)
userroles_part['users'].append(UserModel.to_json(user))
userroles_json.append(userroles_part)
return userroles_json
class UserRoleId(Resource):
@swagger.doc({
'security': [{'bearerAuth': []}],
'tags': ['userrole'],
'description': "Return a userrole that match the given ID",
'parameters': [
{
'name': 'id',
'description': 'the id of a userrole',
'in': 'path',
'schema': {'type': 'integer'}
}
],
'responses': {
'200': {
'description': "The userrole that match the ID",
'content': {
'application/json': {
'examples': {
"id": 1,
"created_at": "2019-11-13T16:08:45.263598",
"updated_at": "2019-11-13T16:31:34.940968",
"deleted_at": None,
"name": "admin",
"users": [
{
"id": 1,
"name": "admin",
"password": None,
"created_at": "2019-11-13T16:08:45.490830",
"updated_at": "2019-11-13T16:39:21.114928",
"deleted_at": None,
"is_admin": None,
"email": "admin@localhost",
"comment": "created by sshportal",
"invite_token": "711pl82xwUJ4CCFw"
}
]
}
}
}
}
}
})
@jwt_required
def get(self, id):
userrole = UserRolesModel.by_id(id)
userroles_part = UserRolesModel.to_json(userrole)
useruserroles = UserUserRolesModel.by_user_role_id(userrole.id)
userroles_part['users'] = []
for uur in useruserroles:
user = UserModel.by_id(uur.user_id)
userroles_part['users'].append(UserModel.to_json(user))
return userroles_part
class UserRoleName(Resource):
@swagger.doc({
'security': [{'bearerAuth': []}],
'tags': ['userrole'],
'description': "Return a userrole that match the given name",
'parameters': [
{
'name': 'name',
'description': 'the name of a userrole',
'in': 'path',
'schema': {'type': 'integer'}
}
],
'responses': {
'200': {
'description': "The userrole that match the name",
'content': {
'application/json': {
'examples': {
"id": 1,
"created_at": "2019-11-13T16:08:45.263598",
"updated_at": "2019-11-13T16:31:34.940968",
"deleted_at": None,
"name": "admin",
"users": [
{
"id": 1,
"name": "admin",
"password": None,
"created_at": "2019-11-13T16:08:45.490830",
"updated_at": "2019-11-13T16:39:21.114928",
"deleted_at": None,
"is_admin": None,
"email": "admin@localhost",
"comment": "created by sshportal",
"invite_token": "711pl82xwUJ4CCFw"
}
]
}
}
}
}
}
})
@jwt_required
def get(self, name):
userrole = UserRolesModel.by_name(name)
userroles_part = UserRolesModel.to_json(userrole)
useruserroles = UserUserRolesModel.by_user_role_id(userrole.id)
userroles_part['users'] = []
for uur in useruserroles:
user = UserModel.by_id(uur.user_id)
userroles_part['users'].append(UserModel.to_json(user))
return userroles_part
api.add_resource(UserRoles, '/v1/userroles')
api.add_resource(UserRoleId, '/v1/userrole/<int:id>')
api.add_resource(UserRoleName, '/v1/userrole/<string:name>')
| 38.308511
| 87
| 0.386837
| 516
| 7,202
| 5.248062
| 0.182171
| 0.026588
| 0.035451
| 0.057607
| 0.722304
| 0.722304
| 0.707533
| 0.707533
| 0.707533
| 0.707533
| 0
| 0.07805
| 0.50722
| 7,202
| 187
| 88
| 38.513369
| 0.684982
| 0
| 0
| 0.621302
| 0
| 0
| 0.218134
| 0.049847
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017751
| false
| 0.017751
| 0.029586
| 0
| 0.08284
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b513c9e9933b1d5d06d26fc69bd5f00070c530af
| 10,642
|
py
|
Python
|
core/tests/test_tables.py
|
lttga/test
|
9f0125b3e0daf1dfbec4d7083b331206e85b02fc
|
[
"BSD-3-Clause"
] | null | null | null |
core/tests/test_tables.py
|
lttga/test
|
9f0125b3e0daf1dfbec4d7083b331206e85b02fc
|
[
"BSD-3-Clause"
] | 2
|
2021-03-18T08:07:54.000Z
|
2021-04-09T11:17:33.000Z
|
core/tests/test_tables.py
|
lttga/test
|
9f0125b3e0daf1dfbec4d7083b331206e85b02fc
|
[
"BSD-3-Clause"
] | null | null | null |
import errno
import pytest
from django.conf import settings
from rest_framework import status
from core.tests.utils import create_data_selection
@pytest.mark.django_db
class TestTableViews:
url_prefix = "/uploads/" if not settings.API_PREFIX else f"/{settings.API_PREFIX}uploads/"
@pytest.fixture(autouse=True)
def setUp(self, client, dataset, cleanup_upload_task, validation_task, upload_obj, upload_obj_validated, mocker):
self.client = client
self.dataset = dataset
self.task_cleanup = cleanup_upload_task
self.task_validation = validation_task
self.datasource = upload_obj
self.validated_datasource = upload_obj_validated
self.mocker = mocker
def test_delete_table(self):
selection = create_data_selection(self.client, self.validated_datasource, self.url_prefix)
response = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/"
)
assert len(response.json()) == 2
table_data = response.json()[0]
assert table_data["include"]
response = self.client.patch(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{table_data['id']}/",
content_type="application/json",
data={"include": False},
)
assert response.status_code == status.HTTP_200_OK
assert not response.json()["include"]
response = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{table_data['id']}/"
)
assert not response.json()["include"]
def test_list_tables(self):
selection = create_data_selection(self.client, self.datasource, self.url_prefix)
response = self.client.get(f"{self.url_prefix}{self.datasource.id}/selections/{selection['id']}/tables/")
assert len(response.json()) == 2
def test_table_preview(self):
selection = create_data_selection(self.client, self.validated_datasource, self.url_prefix)
tables = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/"
).json()
response = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{tables[0]['id']}/preview/"
)
assert len(response.json()) == 1
data = response.json()[0]
assert set(data.keys()) == {"id", "name", "preview", "heading"}
def test_table_r_friendly_preview(self):
selection = create_data_selection(self.client, self.validated_datasource, self.url_prefix)
tables = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/"
).json()
response = self.client.patch(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/",
data={"headings_type": "es_r_friendly"},
content_type="application/json",
)
response = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{tables[0]['id']}/preview/"
)
assert len(response.json()) == 1
data = response.json()[0]
assert set(data.keys()) == {"id", "name", "preview", "column_headings", "heading"}
def test_table_split_preview(self):
selection = create_data_selection(self.client, self.validated_datasource, self.url_prefix)
tables = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/"
).json()
response = self.client.patch(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{tables[0]['id']}/",
data={"split": True},
content_type="application/json",
)
assert response.status_code == status.HTTP_200_OK
response = self.client.patch(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/",
data={"headings_type": "es_r_friendly"},
content_type="application/json",
)
assert response.status_code == status.HTTP_200_OK
response = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{tables[0]['id']}/preview/"
)
assert len(response.json()) == 4
data = response.json()[0]
assert set(data.keys()) == {"id", "name", "preview", "heading", "column_headings"}
def test_table_split_include_preview(self):
selection = create_data_selection(self.client, self.validated_datasource, self.url_prefix)
tables = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/"
).json()
response = self.client.patch(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{tables[0]['id']}/",
data={"split": True},
content_type="application/json",
)
assert response.status_code == status.HTTP_200_OK
array_tables = response.json()["array_tables"]
response = self.client.patch(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/",
data={"headings_type": "es_r_friendly"},
content_type="application/json",
)
assert response.status_code == status.HTTP_200_OK
response = self.client.patch(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{array_tables[0]['id']}/",
data={"include": False},
content_type="application/json",
)
assert response.status_code == status.HTTP_200_OK
response = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{tables[0]['id']}/preview/"
)
assert len(response.json()) == 3
data = response.json()[0]
assert set(data.keys()) == {"id", "name", "preview", "heading", "column_headings"}
def test_table_split_no_left_space(self):
selection = create_data_selection(self.client, self.validated_datasource, self.url_prefix)
tables = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/"
).json()
mocked_split = self.mocker.patch("core.views.store_preview_csv")
mocked_split.side_effect = OSError(errno.ENOSPC, "No left space.")
response = self.client.patch(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{tables[0]['id']}/",
data={"split": True},
content_type="application/json",
)
assert response.status_code == status.HTTP_413_REQUEST_ENTITY_TOO_LARGE
assert response.json() == {"detail": "Currently, the space limit was reached. Please try again later."}
def test_table_split_preview_no_left_space(self):
selection = create_data_selection(self.client, self.validated_datasource, self.url_prefix)
tables = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/"
).json()
response = self.client.patch(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{tables[0]['id']}/",
data={"split": True},
content_type="application/json",
)
assert response.status_code == status.HTTP_200_OK
array_tables = response.json()["array_tables"]
response = self.client.patch(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/",
data={"headings_type": "es_r_friendly"},
content_type="application/json",
)
assert response.status_code == status.HTTP_200_OK
response = self.client.patch(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{array_tables[0]['id']}/",
data={"include": False},
content_type="application/json",
)
assert response.status_code == status.HTTP_200_OK
mocked_split = self.mocker.MagicMock()
mocked_split.side_effect = OSError(errno.ENOSPC, "No left space.")
with self.mocker.patch("core.views.store_preview_csv", mocked_split):
response = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{tables[0]['id']}/preview/"
)
assert response.status_code == status.HTTP_413_REQUEST_ENTITY_TOO_LARGE
assert response.json() == {"detail": "Currently, the space limit was reached. Please try again later."}
def test_table_split_file_not_found(self):
selection = create_data_selection(self.client, self.validated_datasource, self.url_prefix)
tables = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/"
).json()
mocked_open = self.mocker.patch("core.views.DataPreprocessor.restore")
mocked_open.side_effect = FileNotFoundError(errno.ENOENT, "File not found.")
response = self.client.patch(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{tables[0]['id']}/",
data={"split": True},
content_type="application/json",
)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert response.json() == {"detail": "Datasource expired."}
def test_table_preview_file_not_found(self):
selection = create_data_selection(self.client, self.validated_datasource, self.url_prefix)
tables = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/"
).json()
mocked_open = self.mocker.patch("core.views.open")
mocked_open.return_value.__enter__.side_effect = FileNotFoundError(errno.ENOENT, "File not found.")
response = self.client.get(
f"{self.url_prefix}{self.validated_datasource.id}/selections/{selection['id']}/tables/{tables[0]['id']}/preview/"
)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert response.json() == {"detail": "Datasource expired."}
| 46.675439
| 129
| 0.646213
| 1,248
| 10,642
| 5.300481
| 0.098558
| 0.06198
| 0.076644
| 0.061376
| 0.879063
| 0.863492
| 0.863492
| 0.863492
| 0.850642
| 0.850642
| 0
| 0.007467
| 0.207198
| 10,642
| 227
| 130
| 46.881057
| 0.776579
| 0
| 0
| 0.666667
| 0
| 0.078125
| 0.341383
| 0.267901
| 0
| 0
| 0
| 0
| 0.15625
| 1
| 0.057292
| false
| 0
| 0.026042
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b5157a4c95839b7b9d10010e5042730d3bec1af9
| 260
|
py
|
Python
|
json_ref_dict/exceptions.py
|
RangelReale/json-ref-dict
|
1ed1c96707359b5d648bafd3062a4446b469b682
|
[
"MIT"
] | 9
|
2020-06-25T13:05:22.000Z
|
2022-02-21T15:32:47.000Z
|
json_ref_dict/exceptions.py
|
RangelReale/json-ref-dict
|
1ed1c96707359b5d648bafd3062a4446b469b682
|
[
"MIT"
] | 7
|
2020-03-20T16:36:58.000Z
|
2021-09-10T09:55:28.000Z
|
json_ref_dict/exceptions.py
|
RangelReale/json-ref-dict
|
1ed1c96707359b5d648bafd3062a4446b469b682
|
[
"MIT"
] | 8
|
2020-10-30T13:58:21.000Z
|
2021-09-06T09:05:04.000Z
|
class JSONRefParseError(Exception):
"""Base exception for failures when parsing."""
class DocumentParseError(JSONRefParseError):
"""Failed to parse a document."""
class ReferenceParseError(JSONRefParseError):
"""Failed to parse a reference."""
| 23.636364
| 51
| 0.738462
| 25
| 260
| 7.68
| 0.64
| 0.239583
| 0.260417
| 0.3125
| 0.322917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146154
| 260
| 10
| 52
| 26
| 0.864865
| 0.376923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
d20183b3fde2aa257945634822e51f9bcf8b4bb1
| 27,532
|
py
|
Python
|
tests/test_cfg_cls.py
|
KonnexionsGmbH/dcr
|
3b58be5df66831e5389558599cf1d234da605aeb
|
[
"CNRI-Python",
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2
|
2022-02-24T15:13:23.000Z
|
2022-03-28T00:45:31.000Z
|
tests/test_cfg_cls.py
|
KonnexionsGmbH/dcr
|
3b58be5df66831e5389558599cf1d234da605aeb
|
[
"CNRI-Python",
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
tests/test_cfg_cls.py
|
KonnexionsGmbH/dcr
|
3b58be5df66831e5389558599cf1d234da605aeb
|
[
"CNRI-Python",
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# pylint: disable=unused-argument
"""Testing Module cfg.cls_..."""
import os
import cfg.cls_setup
import cfg.glob
import pytest
# -----------------------------------------------------------------------------
# Constants & Globals.
# -----------------------------------------------------------------------------
# pylint: disable=W0212
# @pytest.mark.issue
# -----------------------------------------------------------------------------
# Check parameter DELETE_AUXILIARY_FILES - True.
# -----------------------------------------------------------------------------
def check_param_delete_auxiliary_files():
"""Check parameter DELETE_AUXILIARY_FILES - True."""
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_DELETE_AUXILIARY_FILES, cfg.glob.INFORMATION_NOT_YET_AVAILABLE),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert cfg.glob.setup.is_delete_auxiliary_files, "DCR_CFG_DELETE_AUXILIARY_FILES: true (not false)"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_DELETE_AUXILIARY_FILES, "fALSE"),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert not cfg.glob.setup.is_delete_auxiliary_files, "DCR_CFG_DELETE_AUXILIARY_FILES: false"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -----------------------------------------------------------------------------
# Check parameter IGNORE_DUPLICATES - False.
# -----------------------------------------------------------------------------
def check_param_ignore_duplicates():
"""Check parameter IGNORE_DUPLICATES - False."""
cfg.glob.setup.is_ignore_duplicates = False
cfg.glob.setup = cfg.cls_setup.Setup()
assert len(cfg.glob.setup._config) == cfg.glob.setup._CONFIG_PARAM_NO, "cfg:: complete"
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, cfg.glob.INFORMATION_NOT_YET_AVAILABLE),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert not cfg.glob.setup.is_ignore_duplicates, "DCR_CFG_IGNORE_DUPLICATES: false (any not true)"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES, "TruE"),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert cfg.glob.setup.is_ignore_duplicates, "DCR_CFG_IGNORE_DUPLICATES: true"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -----------------------------------------------------------------------------
# Check parameter TETML_PAGE - False.
# -----------------------------------------------------------------------------
def check_param_tetml_page():
"""Check parameter TETML_PAGE - False."""
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_TETML_PAGE, cfg.glob.INFORMATION_NOT_YET_AVAILABLE),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert not cfg.glob.setup.is_tetml_page, "DCR_CFG_TETML_PAGE: false (not true)"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_TETML_PAGE, "tRUE"),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert cfg.glob.setup.is_tetml_page, "DCR_CFG_TETML_PAGE: true"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -----------------------------------------------------------------------------
# Check parameter TETML_WORD - False.
# -----------------------------------------------------------------------------
def check_param_tetml_word():
"""Check parameter TETML_WORD - False."""
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_TETML_WORD, cfg.glob.INFORMATION_NOT_YET_AVAILABLE),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert not cfg.glob.setup.is_tetml_word, "DCR_CFG_TETML_WORD: false (not true)"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_TETML_WORD, "tRUE"),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert cfg.glob.setup.is_tetml_word, "DCR_CFG_TETML_WORD: true"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -----------------------------------------------------------------------------
# Check parameter TOKENIZE_2_DATABASE - True.
# -----------------------------------------------------------------------------
def check_param_tokenize_2_database():
"""Check parameter TOKENIZE_2_DATABASE - True."""
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_TOKENIZE_2_DATABASE, cfg.glob.INFORMATION_NOT_YET_AVAILABLE),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert cfg.glob.setup.is_tokenize_2_database, "DCR_CFG_TOKENIZE_2_DATABASE: true (not false)"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_TOKENIZE_2_DATABASE, "fALSE"),
(cfg.cls_setup.Setup._DCR_CFG_TOKENIZE_2_JSONFILE, "tRUE"),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert not cfg.glob.setup.is_tokenize_2_database, "DCR_CFG_TOKENIZE_2_DATABASE: false"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -----------------------------------------------------------------------------
# Check parameter TOKENIZE_2_JSONFILE - False.
# -----------------------------------------------------------------------------
def check_param_tokenize_2_jsonfile():
"""Check parameter TOKENIZE_2_JSONFILE - False."""
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_TOKENIZE_2_JSONFILE, cfg.glob.INFORMATION_NOT_YET_AVAILABLE),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert not cfg.glob.setup.is_tokenize_2_jsonfile, "DCR_CFG_TOKENIZE_2_JSONFILE: false (not true)"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_TOKENIZE_2_JSONFILE, "tRUE"),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert cfg.glob.setup.is_tokenize_2_jsonfile, "DCR_CFG_TOKENIZE_2_JSONFILE: true"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_TOKENIZE_2_DATABASE, "fALSE"),
(cfg.cls_setup.Setup._DCR_CFG_TOKENIZE_2_JSONFILE, "fALSE"),
],
)
with pytest.raises(SystemExit) as expt:
cfg.glob.setup = cfg.cls_setup.Setup()
assert expt.type == SystemExit, "both DCR_CFG_TOKENIZE_2_DATABASE and DCR_CFG_TOKENIZE_2_JSONFILE false"
assert expt.value.code == 1, "both DCR_CFG_TOKENIZE_2_DATABASE and DCR_CFG_TOKENIZE_2_JSONFILE false"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -----------------------------------------------------------------------------
# Check parameter VERBOSE - True.
# -----------------------------------------------------------------------------
def check_param_verbose():
"""Check parameter VERBOSE - True."""
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_VERBOSE, cfg.glob.INFORMATION_NOT_YET_AVAILABLE),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert cfg.glob.setup.is_verbose, "DCR_CFG_VERBOSE: true (not false)"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_VERBOSE, "fALSE"),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert not cfg.glob.setup.is_verbose, "DCR_CFG_VERBOSE: false"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -----------------------------------------------------------------------------
# Check parameter VERBOSE_LINE_TYPE - False.
# -----------------------------------------------------------------------------
def check_param_verbose_line_type():
"""Check parameter VERBOSE_LINE_TYPE - False."""
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_VERBOSE_LINE_TYPE, cfg.glob.INFORMATION_NOT_YET_AVAILABLE),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert not cfg.glob.setup.is_verbose_line_type, "DCR_CFG_VERBOSE_LINE_TYPE: false (not true)"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_VERBOSE_LINE_TYPE, "tRUE"),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert cfg.glob.setup.is_verbose_line_type, "DCR_CFG_VERBOSE_LINE_TYPE: true"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -----------------------------------------------------------------------------
# Test Function - get_config().
# -----------------------------------------------------------------------------
def test_get_config(fxtr_setup_logger_environment):
"""Test: get_config()."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_PDF2IMAGE_TYPE, cfg.glob.INFORMATION_NOT_YET_AVAILABLE),
],
)
with pytest.raises(SystemExit) as expt:
cfg.glob.setup = cfg.cls_setup.Setup()
assert expt.type == SystemExit, "DCR_CFG_PDF2IMAGE_TYPE: invalid"
assert expt.value.code == 1, "DCR_CFG_PDF2IMAGE_TYPE: invalid"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test Function - get_config() - coverage - false.
# -----------------------------------------------------------------------------
def test_get_config_coverage_false(fxtr_setup_logger_environment):
"""Test: test_get_config_coverage_false()."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
values_original = pytest.helpers.set_complete_cfg_spacy("false")
cfg.glob.setup = cfg.cls_setup.Setup()
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_SPACY,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test Function - get_config() - coverage - true.
# -----------------------------------------------------------------------------
def test_get_config_coverage_true(fxtr_setup_logger_environment):
"""Test: test_get_config_coverage_true()."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
values_original = pytest.helpers.set_complete_cfg_spacy("true")
cfg.glob.setup = cfg.cls_setup.Setup()
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_SPACY,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test Function - get_config().
# -----------------------------------------------------------------------------
def test_get_config_logical_false(fxtr_setup_logger_environment):
"""Test: test_get_config_tetml()."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
check_param_ignore_duplicates()
check_param_tetml_page()
check_param_tetml_word()
check_param_tokenize_2_jsonfile()
check_param_verbose_line_type()
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test Function - get_config().
# -----------------------------------------------------------------------------
def test_get_config_logical_true(fxtr_setup_logger_environment):
"""Test: test_get_config_tetml()."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
check_param_delete_auxiliary_files()
check_param_tokenize_2_database()
check_param_verbose()
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test Function - get_config() - missing.
# -----------------------------------------------------------------------------
def test_get_config_missing(fxtr_setup_logger_environment): # pylint: disable=R0915
"""Test: get_config() - missing."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
cfg.glob.setup = cfg.cls_setup.Setup()
assert len(cfg.glob.setup._config) == cfg.glob.setup._CONFIG_PARAM_NO, "cfg:: complete"
# -------------------------------------------------------------------------
values_original = pytest.helpers.delete_config_param(
cfg.cls_setup.Setup._DCR_CFG_SECTION, cfg.cls_setup.Setup._DCR_CFG_DIRECTORY_INBOX
)
values_original_test = pytest.helpers.delete_config_param(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, cfg.cls_setup.Setup._DCR_CFG_DIRECTORY_INBOX
)
with pytest.raises(SystemExit) as expt:
cfg.glob.setup = cfg.cls_setup.Setup()
assert expt.type == SystemExit, "DCR_CFG_DIRECTORY_INBOX: missing"
assert expt.value.code == 1, "DCR_CFG_DIRECTORY_INBOX: missing"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original_test,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.delete_config_param(
cfg.cls_setup.Setup._DCR_CFG_SECTION, cfg.cls_setup.Setup._DCR_CFG_DIRECTORY_INBOX_ACCEPTED
)
values_original_test = pytest.helpers.delete_config_param(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, cfg.cls_setup.Setup._DCR_CFG_DIRECTORY_INBOX_ACCEPTED
)
with pytest.raises(SystemExit) as expt:
cfg.glob.setup = cfg.cls_setup.Setup()
assert expt.type == SystemExit, "DCR_CFG_DIRECTORY_INBOX_ACCEPTED: missing"
assert expt.value.code == 1, "DCR_CFG_DIRECTORY_INBOX_ACCEPTED: missing"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original_test,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.delete_config_param(
cfg.cls_setup.Setup._DCR_CFG_SECTION, cfg.cls_setup.Setup._DCR_CFG_DIRECTORY_INBOX_REJECTED
)
values_original_test = pytest.helpers.delete_config_param(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, cfg.cls_setup.Setup._DCR_CFG_DIRECTORY_INBOX_REJECTED
)
with pytest.raises(SystemExit) as expt:
cfg.glob.setup = cfg.cls_setup.Setup()
assert expt.type == SystemExit, "DCR_CFG_DIRECTORY_INBOX_REJECTED: missing"
assert expt.value.code == 1, "DCR_CFG_DIRECTORY_INBOX_REJECTED: missing"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original_test,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.delete_config_param(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, cfg.cls_setup.Setup._DCR_CFG_IGNORE_DUPLICATES
)
cfg.glob.setup.is_ignore_duplicates = False
cfg.glob.setup = cfg.cls_setup.Setup()
assert not cfg.glob.setup.is_ignore_duplicates, "DCR_CFG_IGNORE_DUPLICATES: false (missing)"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.delete_config_param(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, cfg.cls_setup.Setup._DCR_CFG_PDF2IMAGE_TYPE
)
cfg.glob.setup.pdf2image_type = cfg.glob.setup.PDF2IMAGE_TYPE_JPEG
cfg.glob.setup = cfg.cls_setup.Setup()
assert cfg.glob.setup.pdf2image_type == cfg.glob.setup.PDF2IMAGE_TYPE_JPEG, (
"DCR_CFG_PDF2IMAGE_TYPE: default should not be '" + cfg.glob.setup.pdf2image_type + "'"
)
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.delete_config_param(
cfg.cls_setup.Setup._DCR_CFG_SECTION, cfg.cls_setup.Setup._DCR_CFG_VERBOSE
)
values_original_test = pytest.helpers.delete_config_param(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, cfg.cls_setup.Setup._DCR_CFG_VERBOSE
)
cfg.glob.setup.is_verbose = True
cfg.glob.setup = cfg.cls_setup.Setup()
assert cfg.glob.setup.is_verbose, "DCR_CFG_VERBOSE: true (missing)"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION,
values_original,
)
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original_test,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.delete_config_param(
cfg.cls_setup.Setup._DCR_CFG_SECTION, cfg.cls_setup.Setup._DCR_CFG_VERBOSE_PARSER
)
values_original_test = pytest.helpers.delete_config_param(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST, cfg.cls_setup.Setup._DCR_CFG_VERBOSE_PARSER
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert cfg.glob.setup.verbose_parser == "none", "DCR_CFG_VERBOSE_PARSER: none (missing)"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original_test,
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test Function - get_config() - unknown.
# -----------------------------------------------------------------------------
def test_get_config_unknown(fxtr_setup_logger_environment):
"""Test: get_config() - unknown."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
cfg.glob.setup = cfg.cls_setup.Setup()
assert len(cfg.glob.setup._config) == cfg.glob.setup._CONFIG_PARAM_NO, "cfg:: complete"
# -------------------------------------------------------------------------
pytest.helpers.insert_config_param(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
"UNKNOWN",
"n/a",
)
with pytest.raises(SystemExit) as expt:
cfg.glob.setup = cfg.cls_setup.Setup()
assert expt.type == SystemExit, "UNKNOWN: unknown"
assert expt.value.code == 1, "UNKNOWN: unknown"
pytest.helpers.delete_config_param(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
"UNKNOWN",
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test Function - get_config().
# -----------------------------------------------------------------------------
def test_get_config_verbose_parser(fxtr_setup_logger_environment):
"""Test: get_config_verbose_parser()."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_VERBOSE_PARSER, "aLL"),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert cfg.glob.setup.verbose_parser == "all", "DCR_CFG_VERBOSE_PARSER: all"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_VERBOSE_PARSER, cfg.glob.INFORMATION_NOT_YET_AVAILABLE),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert cfg.glob.setup.verbose_parser == "none", "DCR_CFG_VERBOSE_PARSER: none (not all or text)"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
values_original = pytest.helpers.backup_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
[
(cfg.cls_setup.Setup._DCR_CFG_VERBOSE_PARSER, "tEXT"),
],
)
cfg.glob.setup = cfg.cls_setup.Setup()
assert cfg.glob.setup.verbose_parser == "text", "DCR_CFG_VERBOSE_PARSER: all"
pytest.helpers.restore_config_params(
cfg.cls_setup.Setup._DCR_CFG_SECTION_ENV_TEST,
values_original,
)
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
# -----------------------------------------------------------------------------
# Test Function - get_environment().
# -----------------------------------------------------------------------------
def test_get_environment(fxtr_setup_logger):
"""Test: get_environment()."""
cfg.glob.logger.debug(cfg.glob.LOGGER_START)
# -------------------------------------------------------------------------
cfg.glob.setup = cfg.cls_setup.Setup()
os.environ[cfg.glob.setup._DCR_ENVIRONMENT_TYPE] = cfg.glob.INFORMATION_NOT_YET_AVAILABLE
with pytest.raises(SystemExit) as expt:
cfg.glob.setup._get_environment_variant()
os.environ[cfg.glob.setup._DCR_ENVIRONMENT_TYPE] = cfg.glob.setup.ENVIRONMENT_TYPE_TEST
assert expt.type == SystemExit, "_DCR_ENVIRONMENT_TYPE: invalid"
assert expt.value.code == 1, "_DCR_ENVIRONMENT_TYPE: invalid"
# -------------------------------------------------------------------------
os.environ.pop(cfg.glob.setup._DCR_ENVIRONMENT_TYPE)
with pytest.raises(SystemExit) as expt:
cfg.glob.setup._get_environment_variant()
os.environ[cfg.glob.setup._DCR_ENVIRONMENT_TYPE] = cfg.glob.setup.ENVIRONMENT_TYPE_TEST
assert expt.type == SystemExit, "_DCR_ENVIRONMENT_TYPE: missing"
assert expt.value.code == 1, "_DCR_ENVIRONMENT_TYPE: missing"
# -------------------------------------------------------------------------
cfg.glob.setup._get_environment_variant()
assert cfg.glob.setup.environment_variant == cfg.glob.setup.ENVIRONMENT_TYPE_TEST, "_DCR_ENVIRONMENT_TYPE: ok"
# -------------------------------------------------------------------------
cfg.glob.logger.debug(cfg.glob.LOGGER_END)
| 36.955705
| 114
| 0.570064
| 2,947
| 27,532
| 4.885307
| 0.036308
| 0.059179
| 0.107731
| 0.155588
| 0.958116
| 0.92561
| 0.889421
| 0.8806
| 0.865389
| 0.854831
| 0
| 0.002273
| 0.153095
| 27,532
| 744
| 115
| 37.005376
| 0.615173
| 0.249891
| 0
| 0.559653
| 0
| 0
| 0.076901
| 0.042163
| 0
| 0
| 0
| 0
| 0.093275
| 1
| 0.036876
| false
| 0
| 0.008677
| 0
| 0.045553
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d20808e25d95837d0f9f3f594f2630f73b403812
| 317
|
py
|
Python
|
source/window/__init__.py
|
whitegreyblack/PyWin
|
78f3637b4c03c11d7f6ef15b20a1acf699d4be24
|
[
"MIT"
] | null | null | null |
source/window/__init__.py
|
whitegreyblack/PyWin
|
78f3637b4c03c11d7f6ef15b20a1acf699d4be24
|
[
"MIT"
] | null | null | null |
source/window/__init__.py
|
whitegreyblack/PyWin
|
78f3637b4c03c11d7f6ef15b20a1acf699d4be24
|
[
"MIT"
] | null | null | null |
from source.window.base import Window
from source.window.display import DisplayWindow
from source.window.help import HelpWindow
from source.window.scrollable import ScrollableWindow, keypress_up, keypress_down, keypress_a
from source.window.property import WindowProperty
from source.window.prompt import PromptWindow
| 52.833333
| 93
| 0.870662
| 42
| 317
| 6.5
| 0.452381
| 0.21978
| 0.351648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082019
| 317
| 6
| 94
| 52.833333
| 0.938144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d275ca3cfb4b132e112eaee9ad51ad67b84b073a
| 94
|
py
|
Python
|
ansible_var_checker/jinja/visitors/__init__.py
|
KlutzyBubbles/jinja2schema
|
6e04e8b2b1cdef6dbecab80f0d28129873d3f82b
|
[
"BSD-3-Clause"
] | null | null | null |
ansible_var_checker/jinja/visitors/__init__.py
|
KlutzyBubbles/jinja2schema
|
6e04e8b2b1cdef6dbecab80f0d28129873d3f82b
|
[
"BSD-3-Clause"
] | null | null | null |
ansible_var_checker/jinja/visitors/__init__.py
|
KlutzyBubbles/jinja2schema
|
6e04e8b2b1cdef6dbecab80f0d28129873d3f82b
|
[
"BSD-3-Clause"
] | null | null | null |
from .util import visit, visit_many
from .expr import visit_expr
from .stmt import visit_stmt
| 23.5
| 35
| 0.819149
| 16
| 94
| 4.625
| 0.4375
| 0.445946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138298
| 94
| 3
| 36
| 31.333333
| 0.91358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d283ba2d7482ed610ea1ddf6cbe8b70775d3efeb
| 47
|
py
|
Python
|
timm/models/vit/__init__.py
|
Hanson0910/Pytorch-RIADD
|
6dfacf729ffcdfb88631f24ae1b88db20b418e5b
|
[
"Apache-2.0"
] | 22
|
2021-02-28T21:51:35.000Z
|
2022-03-25T08:44:47.000Z
|
timm/models/vit/__init__.py
|
Ravimk07/Pytorch-RIADD
|
6dfacf729ffcdfb88631f24ae1b88db20b418e5b
|
[
"Apache-2.0"
] | 2
|
2021-04-25T03:23:33.000Z
|
2022-03-17T06:07:30.000Z
|
timm/models/vit/__init__.py
|
Ravimk07/Pytorch-RIADD
|
6dfacf729ffcdfb88631f24ae1b88db20b418e5b
|
[
"Apache-2.0"
] | 2
|
2021-09-12T14:12:26.000Z
|
2022-03-25T08:44:49.000Z
|
from .modeling import VisionTransformer,CONFIGS
| 47
| 47
| 0.893617
| 5
| 47
| 8.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 47
| 1
| 47
| 47
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
962dfe23b0c666eaf50a680fa470a59d6502ed22
| 182
|
py
|
Python
|
bin/freebox_show_status.py
|
ngraziano/freeboxv5-status
|
77f58dae1d55ae081ea18f1d30ca9c73cbb92a7c
|
[
"MIT"
] | null | null | null |
bin/freebox_show_status.py
|
ngraziano/freeboxv5-status
|
77f58dae1d55ae081ea18f1d30ca9c73cbb92a7c
|
[
"MIT"
] | null | null | null |
bin/freebox_show_status.py
|
ngraziano/freeboxv5-status
|
77f58dae1d55ae081ea18f1d30ca9c73cbb92a7c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf8 -*-
import freebox_v5_status.freeboxstatus
import pprint
fbx = freebox_v5_status.freeboxstatus.FreeboxStatus()
pprint.pprint( fbx.status )
| 20.222222
| 53
| 0.769231
| 23
| 182
| 5.913043
| 0.565217
| 0.132353
| 0.220588
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018405
| 0.104396
| 182
| 8
| 54
| 22.75
| 0.815951
| 0.225275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
9661efbd57c0d3724b91578ae1802b46d72f41dc
| 27
|
py
|
Python
|
strategy/__init__.py
|
hermian/startetf
|
70c2fb1ead50db470141712b73b4f9f8579faa08
|
[
"MIT"
] | 6
|
2022-01-27T13:24:13.000Z
|
2022-03-27T07:39:26.000Z
|
strategy/__init__.py
|
hermian/startetf
|
70c2fb1ead50db470141712b73b4f9f8579faa08
|
[
"MIT"
] | null | null | null |
strategy/__init__.py
|
hermian/startetf
|
70c2fb1ead50db470141712b73b4f9f8579faa08
|
[
"MIT"
] | 4
|
2022-01-24T08:44:34.000Z
|
2022-03-27T07:39:28.000Z
|
from .long_only_ew import *
| 27
| 27
| 0.814815
| 5
| 27
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 27
| 1
| 27
| 27
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
969ad901af56a94fb9eefcda418e075ea062fc12
| 51
|
py
|
Python
|
figuregen/util/units.py
|
Mira-13/figure-gen
|
fd1b8814423dd34973a3fafe68ff5c0f95c08590
|
[
"MIT"
] | 75
|
2020-09-17T17:17:17.000Z
|
2022-01-21T14:28:14.000Z
|
figuregen/util/units.py
|
neshume/figure-gen
|
e4cb1d9ea1841980b4dc90953325e1a8c6dfb510
|
[
"MIT"
] | 3
|
2020-09-29T11:51:35.000Z
|
2020-10-22T15:27:40.000Z
|
figuregen/util/units.py
|
neshume/figure-gen
|
e4cb1d9ea1841980b4dc90953325e1a8c6dfb510
|
[
"MIT"
] | 2
|
2020-10-24T05:57:49.000Z
|
2022-03-14T17:06:02.000Z
|
def mm_to_inches(mm):
return mm * 0.03937007874
| 25.5
| 29
| 0.72549
| 9
| 51
| 3.888889
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0.176471
| 51
| 2
| 29
| 25.5
| 0.547619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
96a0236b085ebe21fddc39bea97188f039639cf3
| 27,935
|
py
|
Python
|
google/cloud/securitycenter_v1/proto/securitycenter_service_pb2_grpc.py
|
tdh911/python-securitycenter
|
54263c36adc26549933e0e9b1b8055b69eb81489
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/securitycenter_v1/proto/securitycenter_service_pb2_grpc.py
|
tdh911/python-securitycenter
|
54263c36adc26549933e0e9b1b8055b69eb81489
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/securitycenter_v1/proto/securitycenter_service_pb2_grpc.py
|
tdh911/python-securitycenter
|
54263c36adc26549933e0e9b1b8055b69eb81489
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.securitycenter_v1.proto import (
finding_pb2 as google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_finding__pb2,
)
from google.cloud.securitycenter_v1.proto import (
notification_config_pb2 as google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_notification__config__pb2,
)
from google.cloud.securitycenter_v1.proto import (
organization_settings_pb2 as google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_organization__settings__pb2,
)
from google.cloud.securitycenter_v1.proto import (
security_marks_pb2 as google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_security__marks__pb2,
)
from google.cloud.securitycenter_v1.proto import (
securitycenter_service_pb2 as google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2,
)
from google.cloud.securitycenter_v1.proto import (
source_pb2 as google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_source__pb2,
)
from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class SecurityCenterStub(object):
"""V1 APIs for Security Center service.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateSource = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/CreateSource",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.CreateSourceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_source__pb2.Source.FromString,
)
self.CreateFinding = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/CreateFinding",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.CreateFindingRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_finding__pb2.Finding.FromString,
)
self.CreateNotificationConfig = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/CreateNotificationConfig",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.CreateNotificationConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_notification__config__pb2.NotificationConfig.FromString,
)
self.DeleteNotificationConfig = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/DeleteNotificationConfig",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.DeleteNotificationConfigRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetIamPolicy = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/GetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.GetNotificationConfig = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/GetNotificationConfig",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.GetNotificationConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_notification__config__pb2.NotificationConfig.FromString,
)
self.GetOrganizationSettings = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/GetOrganizationSettings",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.GetOrganizationSettingsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_organization__settings__pb2.OrganizationSettings.FromString,
)
self.GetSource = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/GetSource",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.GetSourceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_source__pb2.Source.FromString,
)
self.GroupAssets = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/GroupAssets",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.GroupAssetsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.GroupAssetsResponse.FromString,
)
self.GroupFindings = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/GroupFindings",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.GroupFindingsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.GroupFindingsResponse.FromString,
)
self.ListAssets = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/ListAssets",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListAssetsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListAssetsResponse.FromString,
)
self.ListFindings = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/ListFindings",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListFindingsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListFindingsResponse.FromString,
)
self.ListNotificationConfigs = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/ListNotificationConfigs",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListNotificationConfigsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListNotificationConfigsResponse.FromString,
)
self.ListSources = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/ListSources",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListSourcesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListSourcesResponse.FromString,
)
self.RunAssetDiscovery = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/RunAssetDiscovery",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.RunAssetDiscoveryRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.SetFindingState = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/SetFindingState",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.SetFindingStateRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_finding__pb2.Finding.FromString,
)
self.SetIamPolicy = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/SetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.TestIamPermissions = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/TestIamPermissions",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
)
self.UpdateFinding = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/UpdateFinding",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.UpdateFindingRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_finding__pb2.Finding.FromString,
)
self.UpdateNotificationConfig = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/UpdateNotificationConfig",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.UpdateNotificationConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_notification__config__pb2.NotificationConfig.FromString,
)
self.UpdateOrganizationSettings = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/UpdateOrganizationSettings",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.UpdateOrganizationSettingsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_organization__settings__pb2.OrganizationSettings.FromString,
)
self.UpdateSource = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/UpdateSource",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.UpdateSourceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_source__pb2.Source.FromString,
)
self.UpdateSecurityMarks = channel.unary_unary(
"/google.cloud.securitycenter.v1.SecurityCenter/UpdateSecurityMarks",
request_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.UpdateSecurityMarksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_security__marks__pb2.SecurityMarks.FromString,
)
class SecurityCenterServicer(object):
"""V1 APIs for Security Center service.
"""
def CreateSource(self, request, context):
"""Creates a source.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateFinding(self, request, context):
"""Creates a finding. The corresponding source must exist for finding creation
to succeed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateNotificationConfig(self, request, context):
"""Creates a notification config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteNotificationConfig(self, request, context):
"""Deletes a notification config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetIamPolicy(self, request, context):
"""Gets the access control policy on the specified Source.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetNotificationConfig(self, request, context):
"""Gets a notification config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetOrganizationSettings(self, request, context):
"""Gets the settings for an organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetSource(self, request, context):
"""Gets a source.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GroupAssets(self, request, context):
"""Filters an organization's assets and groups them by their specified
properties.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GroupFindings(self, request, context):
"""Filters an organization or source's findings and groups them by their
specified properties.
To group across all sources provide a `-` as the source id.
Example: /v1/organizations/{organization_id}/sources/-/findings
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListAssets(self, request, context):
"""Lists an organization's assets.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListFindings(self, request, context):
"""Lists an organization or source's findings.
To list across all sources provide a `-` as the source id.
Example: /v1/organizations/{organization_id}/sources/-/findings
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListNotificationConfigs(self, request, context):
"""Lists notification configs.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListSources(self, request, context):
"""Lists all sources belonging to an organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RunAssetDiscovery(self, request, context):
"""Runs asset discovery. The discovery is tracked with a long-running
operation.
This API can only be called with limited frequency for an organization. If
it is called too frequently the caller will receive a TOO_MANY_REQUESTS
error.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetFindingState(self, request, context):
"""Updates the state of a finding.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetIamPolicy(self, request, context):
"""Sets the access control policy on the specified Source.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def TestIamPermissions(self, request, context):
"""Returns the permissions that a caller has on the specified source.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateFinding(self, request, context):
"""Creates or updates a finding. The corresponding source must exist for a
finding creation to succeed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateNotificationConfig(self, request, context):
"""
Updates a notification config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateOrganizationSettings(self, request, context):
"""Updates an organization's settings.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateSource(self, request, context):
"""Updates a source.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateSecurityMarks(self, request, context):
"""Updates security marks.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_SecurityCenterServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateSource": grpc.unary_unary_rpc_method_handler(
servicer.CreateSource,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.CreateSourceRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_source__pb2.Source.SerializeToString,
),
"CreateFinding": grpc.unary_unary_rpc_method_handler(
servicer.CreateFinding,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.CreateFindingRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_finding__pb2.Finding.SerializeToString,
),
"CreateNotificationConfig": grpc.unary_unary_rpc_method_handler(
servicer.CreateNotificationConfig,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.CreateNotificationConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_notification__config__pb2.NotificationConfig.SerializeToString,
),
"DeleteNotificationConfig": grpc.unary_unary_rpc_method_handler(
servicer.DeleteNotificationConfig,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.DeleteNotificationConfigRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.GetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"GetNotificationConfig": grpc.unary_unary_rpc_method_handler(
servicer.GetNotificationConfig,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.GetNotificationConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_notification__config__pb2.NotificationConfig.SerializeToString,
),
"GetOrganizationSettings": grpc.unary_unary_rpc_method_handler(
servicer.GetOrganizationSettings,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.GetOrganizationSettingsRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_organization__settings__pb2.OrganizationSettings.SerializeToString,
),
"GetSource": grpc.unary_unary_rpc_method_handler(
servicer.GetSource,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.GetSourceRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_source__pb2.Source.SerializeToString,
),
"GroupAssets": grpc.unary_unary_rpc_method_handler(
servicer.GroupAssets,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.GroupAssetsRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.GroupAssetsResponse.SerializeToString,
),
"GroupFindings": grpc.unary_unary_rpc_method_handler(
servicer.GroupFindings,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.GroupFindingsRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.GroupFindingsResponse.SerializeToString,
),
"ListAssets": grpc.unary_unary_rpc_method_handler(
servicer.ListAssets,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListAssetsRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListAssetsResponse.SerializeToString,
),
"ListFindings": grpc.unary_unary_rpc_method_handler(
servicer.ListFindings,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListFindingsRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListFindingsResponse.SerializeToString,
),
"ListNotificationConfigs": grpc.unary_unary_rpc_method_handler(
servicer.ListNotificationConfigs,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListNotificationConfigsRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListNotificationConfigsResponse.SerializeToString,
),
"ListSources": grpc.unary_unary_rpc_method_handler(
servicer.ListSources,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListSourcesRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.ListSourcesResponse.SerializeToString,
),
"RunAssetDiscovery": grpc.unary_unary_rpc_method_handler(
servicer.RunAssetDiscovery,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.RunAssetDiscoveryRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"SetFindingState": grpc.unary_unary_rpc_method_handler(
servicer.SetFindingState,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.SetFindingStateRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_finding__pb2.Finding.SerializeToString,
),
"SetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"TestIamPermissions": grpc.unary_unary_rpc_method_handler(
servicer.TestIamPermissions,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString,
),
"UpdateFinding": grpc.unary_unary_rpc_method_handler(
servicer.UpdateFinding,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.UpdateFindingRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_finding__pb2.Finding.SerializeToString,
),
"UpdateNotificationConfig": grpc.unary_unary_rpc_method_handler(
servicer.UpdateNotificationConfig,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.UpdateNotificationConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_notification__config__pb2.NotificationConfig.SerializeToString,
),
"UpdateOrganizationSettings": grpc.unary_unary_rpc_method_handler(
servicer.UpdateOrganizationSettings,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.UpdateOrganizationSettingsRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_organization__settings__pb2.OrganizationSettings.SerializeToString,
),
"UpdateSource": grpc.unary_unary_rpc_method_handler(
servicer.UpdateSource,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.UpdateSourceRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_source__pb2.Source.SerializeToString,
),
"UpdateSecurityMarks": grpc.unary_unary_rpc_method_handler(
servicer.UpdateSecurityMarks,
request_deserializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_securitycenter__service__pb2.UpdateSecurityMarksRequest.FromString,
response_serializer=google_dot_cloud_dot_securitycenter__v1_dot_proto_dot_security__marks__pb2.SecurityMarks.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.securitycenter.v1.SecurityCenter", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| 60.596529
| 166
| 0.78092
| 2,855
| 27,935
| 7.086515
| 0.070403
| 0.113434
| 0.056742
| 0.068901
| 0.838325
| 0.819197
| 0.804765
| 0.747924
| 0.663157
| 0.657374
| 0
| 0.010382
| 0.15869
| 27,935
| 460
| 167
| 60.728261
| 0.850481
| 0.062789
| 0
| 0.347339
| 1
| 0
| 0.112991
| 0.064164
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070028
| false
| 0.02521
| 0.030812
| 0
| 0.106443
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
96b8abd938923b406142627d88a67ec597ee7ef1
| 98
|
py
|
Python
|
tests/test_version.py
|
chrisburr/hist
|
d10132ab8d03f41152f0b934a18291ce699453b2
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_version.py
|
chrisburr/hist
|
d10132ab8d03f41152f0b934a18291ce699453b2
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_version.py
|
chrisburr/hist
|
d10132ab8d03f41152f0b934a18291ce699453b2
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import hist
def test_version():
assert hist.__version__ is not None
| 14
| 39
| 0.663265
| 14
| 98
| 4.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012821
| 0.204082
| 98
| 6
| 40
| 16.333333
| 0.75641
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7376f1b489f6ba8ffc11f50949e1f02e0088a9bc
| 174
|
py
|
Python
|
utils.py
|
pedrofracassi/insper-dessoft-ep2
|
510571e77d0d9d406a110b80b04d7e819a01cb2d
|
[
"MIT"
] | null | null | null |
utils.py
|
pedrofracassi/insper-dessoft-ep2
|
510571e77d0d9d406a110b80b04d7e819a01cb2d
|
[
"MIT"
] | null | null | null |
utils.py
|
pedrofracassi/insper-dessoft-ep2
|
510571e77d0d9d406a110b80b04d7e819a01cb2d
|
[
"MIT"
] | null | null | null |
import os
# Função retirada da resposta de poke, no StackOverflow
# https://stackoverflow.com/a/2084628
def limpa_tela():
os.system('cls' if os.name == 'nt' else 'clear')
| 24.857143
| 55
| 0.718391
| 27
| 174
| 4.592593
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04698
| 0.143678
| 174
| 6
| 56
| 29
| 0.785235
| 0.511494
| 0
| 0
| 0
| 0
| 0.121951
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
73a1ad79f96208e06000c13f53b01ac445f70d36
| 3,656
|
py
|
Python
|
corehq/apps/data_dictionary/tests/test_util.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 471
|
2015-01-10T02:55:01.000Z
|
2022-03-29T18:07:18.000Z
|
corehq/apps/data_dictionary/tests/test_util.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 14,354
|
2015-01-01T07:38:23.000Z
|
2022-03-31T20:55:14.000Z
|
corehq/apps/data_dictionary/tests/test_util.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 175
|
2015-01-06T07:16:47.000Z
|
2022-03-29T13:27:01.000Z
|
import uuid
from django.test import TestCase
from mock import patch
from corehq.apps.data_dictionary.models import CaseProperty, CaseType
from corehq.apps.data_dictionary.util import generate_data_dictionary
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
class GenerateDictionaryTest(TestCase):
domain = uuid.uuid4().hex
def tearDown(self):
CaseType.objects.filter(domain=self.domain).delete()
def test_no_types(self, mock):
mock.return_value = {}
with self.assertNumQueries(1):
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 0)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
def test_empty_type(self, mock):
mock.return_value = {'': ['prop']}
with self.assertNumQueries(2):
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 0)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
def test_no_properties(self, mock):
mock.return_value = {'type': []}
with self.assertNumQueries(3):
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
def test_one_type(self, mock):
mock.return_value = {'type': ['property']}
with self.assertNumQueries(4):
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 1)
def test_two_types(self, mock):
mock.return_value = {'type': ['property'], 'type2': ['property']}
with self.assertNumQueries(5):
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 2)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 2)
def test_two_properties(self, mock):
mock.return_value = {'type': ['property', 'property2']}
with self.assertNumQueries(4):
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 2)
def test_already_existing_property(self, mock):
mock.return_value = {'type': ['property']}
case_type = CaseType(domain=self.domain, name='type')
case_type.save()
CaseProperty(case_type=case_type, name='property').save()
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 1)
with self.assertNumQueries(3):
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 1)
def test_parent_property(self, mock):
mock.return_value = {'type': ['property', 'parent/property']}
with self.assertNumQueries(4):
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 1)
| 41.545455
| 95
| 0.701039
| 436
| 3,656
| 5.692661
| 0.135321
| 0.112812
| 0.128928
| 0.152297
| 0.812651
| 0.776793
| 0.7361
| 0.678082
| 0.643433
| 0.643433
| 0
| 0.009546
| 0.169037
| 3,656
| 87
| 96
| 42.022989
| 0.807439
| 0
| 0
| 0.507937
| 1
| 0
| 0.047593
| 0.015591
| 0
| 0
| 0
| 0
| 0.412698
| 1
| 0.142857
| false
| 0
| 0.079365
| 0
| 0.253968
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
73c51dd60e06da955872ea447174fbd8d1071941
| 685
|
py
|
Python
|
quantlib/pricingengines/api.py
|
yuyingfeng/pyql
|
ceb838581ad4db73a0208bc51bde2771bb534e5f
|
[
"BSD-3-Clause"
] | null | null | null |
quantlib/pricingengines/api.py
|
yuyingfeng/pyql
|
ceb838581ad4db73a0208bc51bde2771bb534e5f
|
[
"BSD-3-Clause"
] | null | null | null |
quantlib/pricingengines/api.py
|
yuyingfeng/pyql
|
ceb838581ad4db73a0208bc51bde2771bb534e5f
|
[
"BSD-3-Clause"
] | 2
|
2016-08-24T20:56:14.000Z
|
2022-01-03T05:58:42.000Z
|
from .vanilla.vanilla import VanillaOptionEngine, AnalyticEuropeanEngine
from .vanilla.vanilla import AnalyticHestonEngine
from .vanilla.vanilla import AnalyticBSMHullWhiteEngine
from .vanilla.vanilla import AnalyticHestonHullWhiteEngine
from .vanilla.vanilla import BaroneAdesiWhaleyApproximationEngine
from .vanilla.vanilla import BatesEngine, BatesDetJumpEngine
from .vanilla.vanilla import BatesDoubleExpEngine, BatesDoubleExpDetJumpEngine
from .vanilla.vanilla import AnalyticDividendEuropeanEngine
from .vanilla.vanilla import FDDividendAmericanEngine, FDAmericanEngine
from .vanilla.vanilla import FdHestonHullWhiteVanillaEngine
from .swaption import JamshidianSwaptionEngine
| 48.928571
| 78
| 0.890511
| 58
| 685
| 10.517241
| 0.327586
| 0.180328
| 0.295082
| 0.393443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072993
| 685
| 13
| 79
| 52.692308
| 0.96063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fb7ecd1f310747b09d0e8db1451eb712e036f0b4
| 279
|
py
|
Python
|
SimCalorimetry/HGCalSimProducers/python/hgcHitAssociation_cfi.py
|
yihui-lai/cmssw
|
ec61da59bdebc84a58d07d7e6d993b74bb7709ee
|
[
"Apache-2.0"
] | 1
|
2020-06-09T10:12:27.000Z
|
2020-06-09T10:12:27.000Z
|
SimCalorimetry/HGCalSimProducers/python/hgcHitAssociation_cfi.py
|
yihui-lai/cmssw
|
ec61da59bdebc84a58d07d7e6d993b74bb7709ee
|
[
"Apache-2.0"
] | null | null | null |
SimCalorimetry/HGCalSimProducers/python/hgcHitAssociation_cfi.py
|
yihui-lai/cmssw
|
ec61da59bdebc84a58d07d7e6d993b74bb7709ee
|
[
"Apache-2.0"
] | 2
|
2020-03-20T18:46:13.000Z
|
2021-03-12T09:23:07.000Z
|
from SimCalorimetry.HGCalAssociatorProducers.layerClusterAssociatorByEnergyScore_cfi import layerClusterAssociatorByEnergyScore as lcAssocByEnergyScoreProducer
from RecoLocalCalo.HGCalRecProducers.hgcalRecHitMapProducer_cfi import hgcalRecHitMapProducer as hgcRecHitMapProducer
| 69.75
| 159
| 0.939068
| 18
| 279
| 14.444444
| 0.666667
| 0.069231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046595
| 279
| 3
| 160
| 93
| 0.977444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fb848fe8a13a551de237382bb5c8c47106ed32c2
| 40
|
py
|
Python
|
tensorstock/feature/ma/__init__.py
|
Hourout/tensorstock
|
7c7fa3a47bfd4b8eb505368d018a2a493cb734b6
|
[
"Apache-2.0"
] | null | null | null |
tensorstock/feature/ma/__init__.py
|
Hourout/tensorstock
|
7c7fa3a47bfd4b8eb505368d018a2a493cb734b6
|
[
"Apache-2.0"
] | null | null | null |
tensorstock/feature/ma/__init__.py
|
Hourout/tensorstock
|
7c7fa3a47bfd4b8eb505368d018a2a493cb734b6
|
[
"Apache-2.0"
] | null | null | null |
from tensorstock.feature.ma._ma import *
| 40
| 40
| 0.825
| 6
| 40
| 5.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.864865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fba3fa6b15b2a5a02c9b5423b7bf90a9b561cfec
| 97
|
py
|
Python
|
netbuffer/abm/__init__.py
|
bstabler/netbuffer
|
25fb44804f160a92c8bee80f9f6b44b8f97b2b16
|
[
"BSD-3-Clause"
] | null | null | null |
netbuffer/abm/__init__.py
|
bstabler/netbuffer
|
25fb44804f160a92c8bee80f9f6b44b8f97b2b16
|
[
"BSD-3-Clause"
] | 15
|
2018-03-08T19:06:01.000Z
|
2020-05-07T23:44:48.000Z
|
netbuffer/abm/__init__.py
|
bstabler/netbuffer
|
25fb44804f160a92c8bee80f9f6b44b8f97b2b16
|
[
"BSD-3-Clause"
] | 3
|
2018-03-19T19:32:52.000Z
|
2019-10-31T17:47:12.000Z
|
from netbuffer.core import network
from . import misc
from . import tables
from . import models
| 16.166667
| 34
| 0.783505
| 14
| 97
| 5.428571
| 0.571429
| 0.394737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175258
| 97
| 5
| 35
| 19.4
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fbc93e55c27fce299e046a634a4a1ac9e2f0d605
| 404
|
py
|
Python
|
tests/test_bookops_nypl_platform.py
|
BookOps-CAT/bookops-nypl-platform
|
a5db62f4ca13c5775283ccc2cec24eba970030c4
|
[
"MIT"
] | null | null | null |
tests/test_bookops_nypl_platform.py
|
BookOps-CAT/bookops-nypl-platform
|
a5db62f4ca13c5775283ccc2cec24eba970030c4
|
[
"MIT"
] | 4
|
2020-10-14T03:35:48.000Z
|
2022-02-07T04:46:16.000Z
|
tests/test_bookops_nypl_platform.py
|
BookOps-CAT/bookops-nypl-platform
|
a5db62f4ca13c5775283ccc2cec24eba970030c4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from bookops_nypl_platform import __version__, __title__
def test_version():
assert __version__ == "0.2.1"
def test_title():
assert __title__ == "bookops-nypl-platform"
def test_PlatfromToken_top_level_import():
from bookops_nypl_platform import PlatformToken
def test_PlatformSession_top_level_import():
from bookops_nypl_platform import PlatformSession
| 20.2
| 56
| 0.769802
| 50
| 404
| 5.58
| 0.42
| 0.157706
| 0.272401
| 0.247312
| 0.412186
| 0.308244
| 0.308244
| 0.308244
| 0
| 0
| 0
| 0.011594
| 0.14604
| 404
| 19
| 57
| 21.263158
| 0.797101
| 0.05198
| 0
| 0
| 0
| 0
| 0.068241
| 0.055118
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.444444
| true
| 0
| 0.555556
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
838ff9fdb9f7fa86b3215dc88031e6de0f7aef9b
| 32,459
|
py
|
Python
|
fhir/resources/testreport.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 144
|
2019-05-08T14:24:43.000Z
|
2022-03-30T02:37:11.000Z
|
fhir/resources/testreport.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 82
|
2019-05-13T17:43:13.000Z
|
2022-03-30T16:45:17.000Z
|
fhir/resources/testreport.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 48
|
2019-04-04T14:14:53.000Z
|
2022-03-30T06:07:31.000Z
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/TestReport
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import typing
from pydantic import Field, root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import backboneelement, domainresource, fhirtypes
class TestReport(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Describes the results of a TestScript execution.
A summary of information based on the results of executing a TestScript.
"""
resource_type = Field("TestReport", const=True)
identifier: fhirtypes.IdentifierType = Field(
None,
alias="identifier",
title="External identifier",
description=(
"Identifier for the TestScript assigned for external purposes outside "
"the context of FHIR."
),
# if property is element of this resource.
element_property=True,
)
issued: fhirtypes.DateTime = Field(
None,
alias="issued",
title="When the TestScript was executed and this TestReport was generated",
description=None,
# if property is element of this resource.
element_property=True,
)
issued__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_issued", title="Extension field for ``issued``."
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Informal name of the executed TestScript",
description="A free text natural language name identifying the executed TestScript.",
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
participant: typing.List[fhirtypes.TestReportParticipantType] = Field(
None,
alias="participant",
title=(
"A participant in the test execution, either the execution engine, a "
"client, or a server"
),
description=None,
# if property is element of this resource.
element_property=True,
)
result: fhirtypes.Code = Field(
None,
alias="result",
title="pass | fail | pending",
description="The overall result from the execution of the TestScript.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["pass", "fail", "pending"],
)
result__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_result", title="Extension field for ``result``."
)
score: fhirtypes.Decimal = Field(
None,
alias="score",
title=(
"The final score (percentage of tests passed) resulting from the "
"execution of the TestScript"
),
description=None,
# if property is element of this resource.
element_property=True,
)
score__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_score", title="Extension field for ``score``."
)
setup: fhirtypes.TestReportSetupType = Field(
None,
alias="setup",
title=(
"The results of the series of required setup operations before the "
"tests were executed"
),
description=None,
# if property is element of this resource.
element_property=True,
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="completed | in-progress | waiting | stopped | entered-in-error",
description="The current state of this test report.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=[
"completed",
"in-progress",
"waiting",
"stopped",
"entered-in-error",
],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
teardown: fhirtypes.TestReportTeardownType = Field(
None,
alias="teardown",
title="The results of running the series of required clean up steps",
description=(
"The results of the series of operations required to clean up after all"
" the tests were executed (successfully or otherwise)."
),
# if property is element of this resource.
element_property=True,
)
test: typing.List[fhirtypes.TestReportTestType] = Field(
None,
alias="test",
title="A test executed from the test script",
description=None,
# if property is element of this resource.
element_property=True,
)
testScript: fhirtypes.ReferenceType = Field(
...,
alias="testScript",
title=(
"Reference to the version-specific TestScript that was executed to "
"produce this TestReport"
),
description=(
"Ideally this is an absolute URL that is used to identify the version-"
"specific TestScript that was executed, matching the `TestScript.url`."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["TestScript"],
)
tester: fhirtypes.String = Field(
None,
alias="tester",
title="Name of the tester producing this report (Organization or individual)",
description=None,
# if property is element of this resource.
element_property=True,
)
tester__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_tester", title="Extension field for ``tester``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``TestReport`` according specification,
with preserving original sequence order.
"""
return [
"id",
"meta",
"implicitRules",
"language",
"text",
"contained",
"extension",
"modifierExtension",
"identifier",
"name",
"status",
"testScript",
"result",
"score",
"tester",
"issued",
"participant",
"setup",
"test",
"teardown",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_1252(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("result", "result__ext"), ("status", "status__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class TestReportParticipant(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A participant in the test execution, either the execution engine, a client,
or a server.
"""
resource_type = Field("TestReportParticipant", const=True)
display: fhirtypes.String = Field(
None,
alias="display",
title="The display name of the participant",
description=None,
# if property is element of this resource.
element_property=True,
)
display__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_display", title="Extension field for ``display``."
)
type: fhirtypes.Code = Field(
None,
alias="type",
title="test-engine | client | server",
description="The type of participant.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["test-engine", "client", "server"],
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
uri: fhirtypes.Uri = Field(
None,
alias="uri",
title="The uri of the participant. An absolute URL is preferred",
description=None,
# if property is element of this resource.
element_property=True,
element_required=True,
)
uri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_uri", title="Extension field for ``uri``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``TestReportParticipant`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "type", "uri", "display"]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2403(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("type", "type__ext"), ("uri", "uri__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class TestReportSetup(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
The results of the series of required setup operations before the tests
were executed.
"""
resource_type = Field("TestReportSetup", const=True)
action: typing.List[fhirtypes.TestReportSetupActionType] = Field(
...,
alias="action",
title="A setup operation or assert that was executed",
description="Action would contain either an operation or an assertion.",
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``TestReportSetup`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "action"]
class TestReportSetupAction(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A setup operation or assert that was executed.
Action would contain either an operation or an assertion.
"""
resource_type = Field("TestReportSetupAction", const=True)
assert_fhir: fhirtypes.TestReportSetupActionAssertType = Field(
None,
alias="assert",
title="The assertion to perform",
description="The results of the assertion performed on the previous operations.",
# if property is element of this resource.
element_property=True,
)
operation: fhirtypes.TestReportSetupActionOperationType = Field(
None,
alias="operation",
title="The operation to perform",
description="The operation performed.",
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``TestReportSetupAction`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "operation", "assert"]
class TestReportSetupActionAssert(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
The assertion to perform.
The results of the assertion performed on the previous operations.
"""
resource_type = Field("TestReportSetupActionAssert", const=True)
detail: fhirtypes.String = Field(
None,
alias="detail",
title="A link to further details on the result",
description=None,
# if property is element of this resource.
element_property=True,
)
detail__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_detail", title="Extension field for ``detail``."
)
message: fhirtypes.Markdown = Field(
None,
alias="message",
title="A message associated with the result",
description="An explanatory message associated with the result.",
# if property is element of this resource.
element_property=True,
)
message__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_message", title="Extension field for ``message``."
)
result: fhirtypes.Code = Field(
None,
alias="result",
title="pass | skip | fail | warning | error",
description="The result of this assertion.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["pass", "skip", "fail", "warning", "error"],
)
result__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_result", title="Extension field for ``result``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``TestReportSetupActionAssert`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "result", "message", "detail"]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3013(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("result", "result__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class TestReportSetupActionOperation(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
The operation to perform.
The operation performed.
"""
resource_type = Field("TestReportSetupActionOperation", const=True)
detail: fhirtypes.Uri = Field(
None,
alias="detail",
title="A link to further details on the result",
description=None,
# if property is element of this resource.
element_property=True,
)
detail__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_detail", title="Extension field for ``detail``."
)
message: fhirtypes.Markdown = Field(
None,
alias="message",
title="A message associated with the result",
description="An explanatory message associated with the result.",
# if property is element of this resource.
element_property=True,
)
message__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_message", title="Extension field for ``message``."
)
result: fhirtypes.Code = Field(
None,
alias="result",
title="pass | skip | fail | warning | error",
description="The result of this operation.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["pass", "skip", "fail", "warning", "error"],
)
result__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_result", title="Extension field for ``result``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``TestReportSetupActionOperation`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "result", "message", "detail"]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3326(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("result", "result__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class TestReportTeardown(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
The results of running the series of required clean up steps.
The results of the series of operations required to clean up after all the
tests were executed (successfully or otherwise).
"""
resource_type = Field("TestReportTeardown", const=True)
action: typing.List[fhirtypes.TestReportTeardownActionType] = Field(
...,
alias="action",
title="One or more teardown operations performed",
description="The teardown action will only contain an operation.",
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``TestReportTeardown`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "action"]
class TestReportTeardownAction(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
One or more teardown operations performed.
The teardown action will only contain an operation.
"""
resource_type = Field("TestReportTeardownAction", const=True)
operation: fhirtypes.TestReportSetupActionOperationType = Field(
...,
alias="operation",
title="The teardown operation performed",
description="An operation would involve a REST request to a server.",
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``TestReportTeardownAction`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "operation"]
class TestReportTest(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A test executed from the test script.
"""
resource_type = Field("TestReportTest", const=True)
action: typing.List[fhirtypes.TestReportTestActionType] = Field(
...,
alias="action",
title="A test operation or assert that was performed",
description="Action would contain either an operation or an assertion.",
# if property is element of this resource.
element_property=True,
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Tracking/reporting short description of the test",
description=(
"A short description of the test used by test engines for tracking and "
"reporting purposes."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Tracking/logging name of this test",
description=(
"The name of this test used for tracking/logging purposes by test "
"engines."
),
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``TestReportTest`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "name", "description", "action"]
class TestReportTestAction(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A test operation or assert that was performed.
Action would contain either an operation or an assertion.
"""
resource_type = Field("TestReportTestAction", const=True)
assert_fhir: fhirtypes.TestReportSetupActionAssertType = Field(
None,
alias="assert",
title="The assertion performed",
description="The results of the assertion performed on the previous operations.",
# if property is element of this resource.
element_property=True,
)
operation: fhirtypes.TestReportSetupActionOperationType = Field(
None,
alias="operation",
title="The operation performed",
description="An operation would involve a REST request to a server.",
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``TestReportTestAction`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "operation", "assert"]
| 37.786962
| 93
| 0.612927
| 3,346
| 32,459
| 5.830245
| 0.09743
| 0.019684
| 0.030859
| 0.030193
| 0.825713
| 0.796904
| 0.763994
| 0.756766
| 0.736211
| 0.730418
| 0
| 0.003267
| 0.302197
| 32,459
| 858
| 94
| 37.831002
| 0.858013
| 0.238978
| 0
| 0.63167
| 0
| 0
| 0.198643
| 0.008839
| 0
| 0
| 0
| 0
| 0.02926
| 1
| 0.030981
| false
| 0.032702
| 0.008606
| 0.006885
| 0.187608
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f7ad87792c2a0dad382583551ee316c8f12a8cc7
| 71
|
py
|
Python
|
run/metrics_shapenet_lm.py
|
crazy-zxx/3d-lmnet-update
|
7739fbfaedc59828378f233f53f3a919f8f0e328
|
[
"MIT"
] | null | null | null |
run/metrics_shapenet_lm.py
|
crazy-zxx/3d-lmnet-update
|
7739fbfaedc59828378f233f53f3a919f8f0e328
|
[
"MIT"
] | null | null | null |
run/metrics_shapenet_lm.py
|
crazy-zxx/3d-lmnet-update
|
7739fbfaedc59828378f233f53f3a919f8f0e328
|
[
"MIT"
] | null | null | null |
import os
os.system('cd .. \n bash ./scripts/metrics_shapenet_lm.sh')
| 17.75
| 59
| 0.71831
| 12
| 71
| 4.083333
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112676
| 71
| 3
| 60
| 23.666667
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.647887
| 0.450704
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f7c6e6d86193663603c1b2019f1925caf5252cf8
| 2,595
|
py
|
Python
|
_sample_suites/many_tests/test_1.py
|
yaelmi3/backslash
|
edf39caf97af2c926da01c340a83648f4874e97e
|
[
"BSD-3-Clause"
] | 17
|
2015-11-25T13:02:38.000Z
|
2021-12-14T20:18:36.000Z
|
_sample_suites/many_tests/test_1.py
|
yaelmi3/backslash
|
edf39caf97af2c926da01c340a83648f4874e97e
|
[
"BSD-3-Clause"
] | 533
|
2015-11-24T12:47:13.000Z
|
2022-02-12T07:59:08.000Z
|
_sample_suites/many_tests/test_1.py
|
parallelsystems/backslash
|
577cdd18d5f665a8b493c4b2e2a605b7e0f6e11b
|
[
"BSD-3-Clause"
] | 15
|
2015-11-22T13:25:54.000Z
|
2022-02-16T19:23:11.000Z
|
import slash
def test_0():
pass
def test_1():
pass
def test_2():
pass
def test_3():
pass
def test_4():
pass
def test_5():
slash.logger.warning("this is a sample warning")
def test_6():
pass
def test_7():
pass
def test_8():
pass
def test_9():
pass
def test_10():
pass
def test_11():
pass
def test_12():
pass
def test_13():
pass
def test_14():
pass
def test_15():
pass
def test_16():
pass
def test_17():
pass
def test_18():
pass
def test_19():
pass
def test_20():
pass
def test_21():
pass
def test_22():
pass
def test_23():
pass
def test_24():
pass
def test_25():
pass
def test_26():
pass
def test_27():
pass
def test_28():
pass
def test_29():
pass
def test_30():
pass
def test_31():
pass
def test_32():
pass
def test_33():
pass
def test_34():
pass
def test_35():
pass
def test_36():
pass
def test_37():
pass
def test_38():
pass
def test_39():
pass
def test_40():
pass
def test_41():
pass
def test_42():
pass
def test_43():
pass
def test_44():
pass
def test_45():
pass
def test_46():
pass
def test_47():
pass
def test_48():
pass
def test_49():
pass
def test_50():
pass
def test_51():
pass
def test_52():
pass
def test_53():
pass
def test_54():
pass
def test_55():
pass
def test_56():
pass
def test_57():
pass
def test_58():
pass
def test_59():
pass
def test_60():
pass
def test_61():
pass
def test_62():
pass
def test_63():
pass
def test_64():
pass
def test_65():
pass
def test_66():
pass
def test_67():
pass
def test_68():
pass
def test_69():
pass
def test_70():
pass
def test_71():
pass
def test_72():
pass
def test_73():
pass
def test_74():
pass
def test_75():
pass
def test_76():
pass
def test_77():
pass
def test_78():
pass
def test_79():
pass
def test_80():
pass
def test_81():
pass
def test_82():
pass
def test_83():
pass
def test_84():
pass
def test_85():
pass
def test_86():
pass
def test_87():
pass
def test_88():
pass
def test_89():
pass
def test_90():
pass
def test_91():
pass
def test_92():
pass
def test_93():
pass
def test_94():
pass
def test_95():
pass
def test_96():
pass
def test_97():
slash.logger.warning("this is a sample warning #2")
def test_98():
pass
def test_99():
pass
| 8.564356
| 55
| 0.566474
| 417
| 2,595
| 3.285372
| 0.266187
| 0.510949
| 0.778832
| 0.032117
| 0.055474
| 0.055474
| 0.055474
| 0.055474
| 0
| 0
| 0
| 0.107123
| 0.312909
| 2,595
| 302
| 56
| 8.592715
| 0.661245
| 0
| 0
| 0.487562
| 0
| 0
| 0.019661
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.497512
| true
| 0.487562
| 0.004975
| 0
| 0.502488
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
f71327a3a8d91361d562cf3cb209adc3828acd01
| 122
|
py
|
Python
|
sub_call/file_call.py
|
jyhh1992/test_algorithm
|
1e6ec7dbdd6881f0c60fbf7c6d7d6f8f4a5b2168
|
[
"Apache-2.0"
] | null | null | null |
sub_call/file_call.py
|
jyhh1992/test_algorithm
|
1e6ec7dbdd6881f0c60fbf7c6d7d6f8f4a5b2168
|
[
"Apache-2.0"
] | null | null | null |
sub_call/file_call.py
|
jyhh1992/test_algorithm
|
1e6ec7dbdd6881f0c60fbf7c6d7d6f8f4a5b2168
|
[
"Apache-2.0"
] | null | null | null |
class response():
def __init__(self) -> None:
self.num = 5
def call_result(self):
return(self.num)
| 24.4
| 31
| 0.581967
| 16
| 122
| 4.125
| 0.6875
| 0.212121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011494
| 0.286885
| 122
| 5
| 32
| 24.4
| 0.747126
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
f725afde370dd3d08133a4a847cf11682d4bc65e
| 30
|
py
|
Python
|
py_yahoo/__init__.py
|
satheshrgs/py_yahoo
|
c18d680a9c75bb28364b95ada8b4dead0302d6ed
|
[
"MIT"
] | null | null | null |
py_yahoo/__init__.py
|
satheshrgs/py_yahoo
|
c18d680a9c75bb28364b95ada8b4dead0302d6ed
|
[
"MIT"
] | null | null | null |
py_yahoo/__init__.py
|
satheshrgs/py_yahoo
|
c18d680a9c75bb28364b95ada8b4dead0302d6ed
|
[
"MIT"
] | null | null | null |
from .py_yahoo import YWeather
| 30
| 30
| 0.866667
| 5
| 30
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f76322719d8f35345f67905088e40b6faf89a356
| 179
|
py
|
Python
|
vega/networks/pytorch/__init__.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
vega/networks/pytorch/__init__.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
vega/networks/pytorch/__init__.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
from . import backbones
from . import heads
from . import blocks
from . import customs
from . import detectors
from . import necks
from . import losses
from . import cyclesrbodys
| 19.888889
| 26
| 0.776536
| 24
| 179
| 5.791667
| 0.416667
| 0.57554
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178771
| 179
| 8
| 27
| 22.375
| 0.945578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f7639b27eccebca8b7b8777b43cef3efbac0cb11
| 150
|
py
|
Python
|
rllib/util/losses/__init__.py
|
sebimarkgraf/rllib
|
a00052ae735f4f7160055ebb42c83f69486df948
|
[
"MIT"
] | null | null | null |
rllib/util/losses/__init__.py
|
sebimarkgraf/rllib
|
a00052ae735f4f7160055ebb42c83f69486df948
|
[
"MIT"
] | null | null | null |
rllib/util/losses/__init__.py
|
sebimarkgraf/rllib
|
a00052ae735f4f7160055ebb42c83f69486df948
|
[
"MIT"
] | null | null | null |
"""Losses utilities used in algorithms."""
from .entropy_loss import EntropyLoss
from .kl_loss import KLLoss
from .pathwise_loss import PathwiseLoss
| 25
| 42
| 0.813333
| 20
| 150
| 5.95
| 0.7
| 0.252101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 150
| 5
| 43
| 30
| 0.901515
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f766dcb78eac26578e373d2ffa860880ae375600
| 2,279
|
py
|
Python
|
datagokr/scripts.py
|
uujei/datagokr
|
308f5151f819010f2c4e174a6ef84d83d3bea922
|
[
"MIT"
] | null | null | null |
datagokr/scripts.py
|
uujei/datagokr
|
308f5151f819010f2c4e174a6ef84d83d3bea922
|
[
"MIT"
] | null | null | null |
datagokr/scripts.py
|
uujei/datagokr
|
308f5151f819010f2c4e174a6ef84d83d3bea922
|
[
"MIT"
] | null | null | null |
import json
import click
from .airkorea import MsrstnAcctoRltmMesureDnsty
from .kma import UltraSrtFcst, UltraSrtNcst, VilageFcst, WthrDataList
def _out(records):
for record in records:
print(record.dict())
@click.group()
def datagokr():
pass
@datagokr.command()
@click.option("-s", "--service-key", default=None, help="API Key")
@click.option("-d", "--base-date", default=None, help="baseDate")
@click.option("-t", "--base-time", default=None, help="baseTime")
@click.option("-x", "--nx", default=None, help="nx")
@click.option("-y", "--ny", default=None, help="ny")
def ultra_srt_ncst(service_key, base_date, base_time, nx, ny):
params = {
"serviceKey": service_key,
"base_date": base_date,
"base_time": base_time,
"nx": nx,
"ny": ny,
}
api = UltraSrtNcst(**{k: v for k, v in params.items() if v is not None})
records = api.get_records()
_out(records)
@datagokr.command()
@click.option("-s", "--service-key", default=None, help="API Key")
@click.option("-d", "--base-date", default=None, help="baseDate")
@click.option("-t", "--base-time", default=None, help="baseTime")
@click.option("-x", "--nx", default=None, help="nx")
@click.option("-y", "--ny", default=None, help="ny")
def ultra_srt_fcst(service_key, base_date, base_time, nx, ny):
params = {
"serviceKey": service_key,
"base_date": base_date,
"base_time": base_time,
"nx": nx,
"ny": ny,
}
api = UltraSrtFcst(**{k: v for k, v in params.items() if v is not None})
records = api.get_records()
_out(records)
@datagokr.command()
@click.option("-s", "--service-key", default=None, help="API Key")
@click.option("-d", "--base-date", default=None, help="baseDate")
@click.option("-t", "--base-time", default=None, help="baseTime")
@click.option("-x", "--nx", default=None, help="nx")
@click.option("-y", "--ny", default=None, help="ny")
def vilage_fcst(service_key, base_date, base_time, nx, ny):
params = {
"serviceKey": service_key,
"base_date": base_date,
"base_time": base_time,
"nx": nx,
"ny": ny,
}
api = VilageFcst(**{k: v for k, v in params.items() if v is not None})
records = api.get_records()
_out(records)
| 30.797297
| 76
| 0.618254
| 314
| 2,279
| 4.372611
| 0.178344
| 0.120175
| 0.163875
| 0.07866
| 0.825929
| 0.825929
| 0.825929
| 0.825929
| 0.825929
| 0.825929
| 0
| 0
| 0.181659
| 2,279
| 73
| 77
| 31.219178
| 0.736193
| 0
| 0
| 0.688525
| 0
| 0
| 0.147433
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0.016393
| 0.065574
| 0
| 0.147541
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f769552d4f2831c615647c2b60050c8abcc4f65b
| 27
|
py
|
Python
|
src/euler_python_package/euler_python/medium/p263.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
src/euler_python_package/euler_python/medium/p263.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
src/euler_python_package/euler_python/medium/p263.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
def problem263():
pass
| 9
| 17
| 0.62963
| 3
| 27
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.259259
| 27
| 2
| 18
| 13.5
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
e3adcb3156f64db461d5da8ab56f594dc55f3387
| 10,169
|
py
|
Python
|
xbasin/tests/test_remapping.py
|
BenniSchmiedel/xbasin
|
63781db7e61914d998f7f1dfa4d3a7cd179ccc91
|
[
"MIT"
] | null | null | null |
xbasin/tests/test_remapping.py
|
BenniSchmiedel/xbasin
|
63781db7e61914d998f7f1dfa4d3a7cd179ccc91
|
[
"MIT"
] | null | null | null |
xbasin/tests/test_remapping.py
|
BenniSchmiedel/xbasin
|
63781db7e61914d998f7f1dfa4d3a7cd179ccc91
|
[
"MIT"
] | null | null | null |
import xarray as xr
import xgcm
import numpy as np
import warnings
import pytest
from xbasin._remapping import remap_vertical
_error = 1e-10
_metrics = {
("X",): ["e1t", "e1u", "e1v", "e1f"], # X distances
("Y",): ["e2t", "e2u", "e2v", "e2f"], # Y distances
("Z",): ["e3t_0", "e3u_0", "e3v_0", "e3f_0", "e3w_0"], # Z distances
}
def warning0(fr, to, error):
N0 = (to != fr).sum().data
N1 = (to == fr).sum().data
m = np.abs(to - fr).max().data
w0 = f"Number of points that are not equal: {N0}, maximum value of the absolute difference: {m}"
w1 = f"Number of points that are equal: {N1}"
w2 = (
f"v_to not perfectly equal to v_fr, testing with an authorized error of {error}"
)
warnings.warn(w2 + "\n" + w0 + "\n" + w1)
def _assert_same_domcfg(v_fr, v_to, error=_error):
try:
assert (v_to == v_fr).all()
except AssertionError:
warning0(v_fr, v_to, error=error)
assert (v_to - v_fr < error).all()
def _assert_same_integrated_value(v_fr, v_to, e3_fr, e3_to, error=_error):
int_fr = (v_fr * e3_fr).sum(dim="z_c")
int_to = (v_to * e3_to).sum(dim="z_c")
print(int_to)
print(int_fr)
# import matplotlib.pyplot as plt
# (int_fr-int_to).plot()
# plt.show()
try:
assert (int_to == int_fr).all()
except AssertionError:
warning0(int_fr, int_to, error=error)
assert (int_to - int_fr < error).all()
def open_domcfg_to():
domcfg_to = xr.open_dataset("data/xnemogcm.domcfg_to.nc")
return domcfg_to
def open_domcfg_fr():
domcfg_fr = xr.open_dataset("data/xnemogcm.domcfg_fr.nc")
domcfg_fr.load()
# correct error in domcfg
domcfg_fr["gdept_0"] = domcfg_fr["gdept_0"].transpose("x_c", "y_c", "z_c")
domcfg_fr["gdept_0"][0] = domcfg_fr["gdept_0"][1]
domcfg_fr["gdept_0"][-1] = domcfg_fr["gdept_0"][-2]
domcfg_fr["gdept_0"][:, 0] = domcfg_fr["gdept_0"][:, 1]
domcfg_fr["gdept_0"][:, -1] = domcfg_fr["gdept_0"][:, -2]
domcfg_fr["gdepw_0"] = domcfg_fr["gdepw_0"].transpose("x_c", "y_c", "z_f")
domcfg_fr["gdepw_0"][0] = domcfg_fr["gdepw_0"][1]
domcfg_fr["gdepw_0"][-1] = domcfg_fr["gdepw_0"][-2]
domcfg_fr["gdepw_0"][:, 0] = domcfg_fr["gdepw_0"][:, 1]
domcfg_fr["gdepw_0"][:, -1] = domcfg_fr["gdepw_0"][:, -2]
domcfg_fr["e3w_0"] = domcfg_fr["e3w_0"].transpose("x_c", "y_c", "z_f")
domcfg_fr["e3w_0"][0] = domcfg_fr["e3w_0"][1]
domcfg_fr["e3w_0"][-1] = domcfg_fr["e3w_0"][-2]
domcfg_fr["e3w_0"][:, 0] = domcfg_fr["e3w_0"][:, 1]
domcfg_fr["e3w_0"][:, -1] = domcfg_fr["e3w_0"][:, -2]
return domcfg_fr
def test_T_0_same_fr_and_to():
domcfg_fr = open_domcfg_fr()
nemo_ds = xr.open_dataset("data/xnemogcm.nemo.nc")
nemo_ds.load()
domcfg_to = domcfg_fr
grid_fr = xgcm.Grid(domcfg_fr, periodic=False)
grid_to = xgcm.Grid(domcfg_to, periodic=False)
v_fr = nemo_ds["thetao"] * 0 * domcfg_fr.tmask
v_to = remap_vertical(
v_fr,
grid_fr,
grid_to,
axis="Z",
scale_factor_fr=domcfg_fr.e3t_0,
scale_factor_to=domcfg_to.e3t_0,
)
_assert_same_domcfg(v_fr, v_to)
def test_W_0_same_fr_and_to():
domcfg_fr = open_domcfg_fr()
nemo_ds = xr.open_dataset("data/xnemogcm.nemo.nc")
nemo_ds.load()
domcfg_to = domcfg_fr
grid_fr = xgcm.Grid(domcfg_fr, periodic=False)
grid_to = xgcm.Grid(domcfg_to, periodic=False)
v_fr = nemo_ds["woce"] * 0
try:
v_to = remap_vertical(
v_fr,
grid_fr,
grid_to,
axis="Z",
scale_factor_fr=domcfg_fr.e3t_0,
scale_factor_to=domcfg_to.e3t_0,
)
except NotImplementedError:
return 0
_assert_same_domcfg(v_fr, v_to)
def test_T_0():
domcfg_fr = open_domcfg_fr()
nemo_ds = xr.open_dataset("data/xnemogcm.nemo.nc")
nemo_ds.load()
domcfg_to = open_domcfg_to()
grid_fr = xgcm.Grid(domcfg_fr, periodic=False)
grid_to = xgcm.Grid(domcfg_to, periodic=False)
v_fr = nemo_ds["thetao"] * 0 * domcfg_fr.tmask
v_to = remap_vertical(
v_fr,
grid_fr,
grid_to,
axis="Z",
scale_factor_fr=domcfg_fr.e3t_0,
scale_factor_to=domcfg_to.e3t_0,
)
_assert_same_integrated_value(
v_fr, v_to, e3_fr=domcfg_fr.e3t_0, e3_to=domcfg_to.e3t_0
)
def create_depth_from_e3(domcfg, grid, point="T"):
depthw = grid.cumsum(domcfg.e3t_0, axis="Z", boundary="fill", fill_value=0)
depthuw = grid.cumsum(domcfg.e3u_0, axis="Z", boundary="fill", fill_value=0)
depthvw = grid.cumsum(domcfg.e3v_0, axis="Z", boundary="fill", fill_value=0)
depthfw = grid.cumsum(domcfg.e3f_0, axis="Z", boundary="fill", fill_value=0)
#
deptht = grid.cumsum(
domcfg.e3w_0, axis="Z", boundary="fill", fill_value=0
) - 0.5 * domcfg.e3w_0.isel({"z_f": 0})
print(deptht)
print(np.abs((deptht - domcfg.gdept_0)[:, 1:-1, 1:-1]).max())
sdfg
def test_U_0():
domcfg_fr = open_domcfg_fr()
nemo_ds = xr.open_dataset("data/xnemogcm.nemo.nc")
nemo_ds.load()
domcfg_to = open_domcfg_to()
grid_fr = xgcm.Grid(domcfg_fr, periodic=False)
grid_to = xgcm.Grid(domcfg_to, periodic=False)
v_fr = nemo_ds["uo"] * 0
v_to = remap_vertical(
v_fr,
grid_fr,
grid_to,
axis="Z",
scale_factor_fr=domcfg_fr.e3u_0,
scale_factor_to=domcfg_to.e3u_0,
z_fr=grid_fr.interp(domcfg_fr.gdepw_0, "X", boundary="extend"),
z_to=domcfg_to.gdepw_0.isel({"x_c": 1, "y_c": 1}).drop_vars(["x_c", "y_c"]),
)
_assert_same_integrated_value(
v_fr, v_to, e3_fr=domcfg_fr.e3u_0, e3_to=domcfg_to.e3u_0
)
def test_U_1():
domcfg_fr = open_domcfg_fr()
nemo_ds = xr.open_dataset("data/xnemogcm.nemo.nc")
nemo_ds.load()
domcfg_to = open_domcfg_to()
grid_fr = xgcm.Grid(domcfg_fr, periodic=False)
grid_to = xgcm.Grid(domcfg_to, periodic=False)
v_fr = nemo_ds["uo"] * 1
v_to = remap_vertical(
v_fr,
grid_fr,
grid_to,
axis="Z",
scale_factor_fr=domcfg_fr.e3u_0,
scale_factor_to=domcfg_to.e3u_0,
z_fr=grid_fr.interp(domcfg_fr.gdepw_0, "X", boundary="extend"),
z_to=domcfg_to.gdepw_0.isel({"x_c": 1, "y_c": 1}).drop_vars(["x_c", "y_c"]),
)
_assert_same_integrated_value(
v_fr, v_to, e3_fr=domcfg_fr.e3u_0, e3_to=domcfg_to.e3u_0
)
def test_U():
domcfg_fr = open_domcfg_fr()
nemo_ds = xr.open_dataset("data/xnemogcm.nemo.nc")
nemo_ds.load()
domcfg_to = open_domcfg_to()
grid_fr = xgcm.Grid(domcfg_fr, periodic=False)
grid_to = xgcm.Grid(domcfg_to, periodic=False)
v_fr = nemo_ds["uo"]
v_to = remap_vertical(
v_fr,
grid_fr,
grid_to,
axis="Z",
scale_factor_fr=domcfg_fr.e3u_0,
scale_factor_to=domcfg_to.e3u_0,
z_fr=grid_fr.interp(domcfg_fr.gdepw_0, "X", boundary="extend"),
z_to=domcfg_to.gdepw_0.isel({"x_c": 1, "y_c": 1}).drop_vars(["x_c", "y_c"]),
)
_assert_same_integrated_value(
v_fr, v_to, e3_fr=domcfg_fr.e3u_0, e3_to=domcfg_to.e3u_0
)
def test_T_1_auto_get_scale_factor_to():
domcfg_fr = open_domcfg_fr()
nemo_ds = xr.open_dataset("data/xnemogcm.nemo.nc")
nemo_ds.load()
domcfg_to = open_domcfg_to()
grid_fr = xgcm.Grid(domcfg_fr, periodic=False)
grid_to = xgcm.Grid(domcfg_to, periodic=False, metrics=_metrics)
v_fr = (nemo_ds["thetao"] * 0 + 1) * domcfg_fr.tmask
v_to = remap_vertical(
v_fr, grid_fr, grid_to, axis="Z", scale_factor_fr=domcfg_fr.e3t_0
)
_assert_same_integrated_value(
v_fr, v_to, e3_fr=domcfg_fr.e3t_0, e3_to=domcfg_to.e3t_0
)
def test_T_1():
domcfg_fr = open_domcfg_fr()
nemo_ds = xr.open_dataset("data/xnemogcm.nemo.nc")
nemo_ds.load()
domcfg_to = open_domcfg_to()
grid_fr = xgcm.Grid(domcfg_fr, periodic=False)
grid_to = xgcm.Grid(domcfg_to, periodic=False)
v_fr = (nemo_ds["thetao"] * 0 + 1) * domcfg_fr.tmask
v_to = remap_vertical(
v_fr,
grid_fr,
grid_to,
axis="Z",
scale_factor_fr=domcfg_fr.e3t_0,
scale_factor_to=domcfg_to.e3t_0,
)
_assert_same_integrated_value(
v_fr, v_to, e3_fr=domcfg_fr.e3t_0, e3_to=domcfg_to.e3t_0
)
def test_T_theta():
domcfg_fr = open_domcfg_fr()
nemo_ds = xr.open_dataset("data/xnemogcm.nemo.nc")
nemo_ds.load()
domcfg_to = open_domcfg_to()
grid_fr = xgcm.Grid(domcfg_fr, periodic=False)
grid_to = xgcm.Grid(domcfg_to, periodic=False)
v_fr = nemo_ds["thetao"] * domcfg_fr.tmask
v_to = remap_vertical(
v_fr,
grid_fr,
grid_to,
axis="Z",
scale_factor_fr=domcfg_fr.e3t_0,
scale_factor_to=domcfg_to.e3t_0,
)
_assert_same_integrated_value(
v_fr, v_to, e3_fr=domcfg_fr.e3t_0, e3_to=domcfg_to.e3t_0
)
def test_T_1_same_fr_and_to():
domcfg_fr = open_domcfg_fr()
nemo_ds = xr.open_dataset("data/xnemogcm.nemo.nc")
nemo_ds.load()
domcfg_to = domcfg_fr
grid_fr = xgcm.Grid(domcfg_fr, periodic=False)
grid_to = xgcm.Grid(domcfg_to, periodic=False)
v_fr = (nemo_ds["thetao"] * 0 + 1) * domcfg_fr.tmask
v_to = remap_vertical(
v_fr,
grid_fr,
grid_to,
axis="Z",
scale_factor_fr=domcfg_fr.e3t_0,
scale_factor_to=domcfg_to.e3t_0,
)
_assert_same_domcfg(v_fr, v_to)
def test_T_theta_same_fr_and_to():
domcfg_fr = open_domcfg_fr()
nemo_ds = xr.open_dataset("data/xnemogcm.nemo.nc")
nemo_ds.load()
domcfg_to = domcfg_fr
grid_fr = xgcm.Grid(domcfg_fr, periodic=False)
grid_to = xgcm.Grid(domcfg_to, periodic=False)
v_fr = nemo_ds["thetao"] * domcfg_fr.tmask
v_fr.data = v_fr.data.astype(np.float16)
v_fr.data = v_fr.data.astype(np.float128)
v_to = remap_vertical(
v_fr,
grid_fr,
grid_to,
axis="Z",
scale_factor_fr=domcfg_fr.e3t_0,
scale_factor_to=domcfg_to.e3t_0,
)
_assert_same_domcfg(v_fr, v_to)
if __name__ == "__main__":
pass
| 29.054286
| 100
| 0.633199
| 1,670
| 10,169
| 3.466467
| 0.088623
| 0.138193
| 0.030402
| 0.025911
| 0.81724
| 0.789946
| 0.765417
| 0.759026
| 0.727414
| 0.727414
| 0
| 0.031531
| 0.223424
| 10,169
| 349
| 101
| 29.137536
| 0.701532
| 0.012292
| 0
| 0.614286
| 0
| 0.003571
| 0.092177
| 0.028201
| 0
| 0
| 0
| 0
| 0.067857
| 1
| 0.060714
| false
| 0.003571
| 0.021429
| 0
| 0.092857
| 0.014286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e3c7a2273db3950f3293ec4e81fa4ec5f39550dd
| 124
|
py
|
Python
|
prototype/back-end/adminapi/admin.py
|
nWo-deHack/CNAP
|
f4ece37bdf148627d686cda5134f6a0991ae4cfe
|
[
"Unlicense"
] | null | null | null |
prototype/back-end/adminapi/admin.py
|
nWo-deHack/CNAP
|
f4ece37bdf148627d686cda5134f6a0991ae4cfe
|
[
"Unlicense"
] | null | null | null |
prototype/back-end/adminapi/admin.py
|
nWo-deHack/CNAP
|
f4ece37bdf148627d686cda5134f6a0991ae4cfe
|
[
"Unlicense"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from adminapi.models import Admin
admin.site.register(Admin)
| 20.666667
| 33
| 0.814516
| 18
| 124
| 5.611111
| 0.611111
| 0.217822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120968
| 124
| 6
| 34
| 20.666667
| 0.926606
| 0.209677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
542a037bf49d401ef744bdde7202d9a50fd044b1
| 101
|
py
|
Python
|
terrascript/cobbler/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/cobbler/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/cobbler/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/cobbler/__init__.py
import terrascript
class cobbler(terrascript.Provider):
pass
| 14.428571
| 36
| 0.792079
| 11
| 101
| 6.909091
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128713
| 101
| 6
| 37
| 16.833333
| 0.863636
| 0.306931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
581089c6cc8651311d9e51c9f3648af48b975b8e
| 46
|
py
|
Python
|
Luna/test/__init__.py
|
Mikfr83/Luna
|
b72f381f32d26eee9625f928acb05064aa84f8b5
|
[
"MIT"
] | null | null | null |
Luna/test/__init__.py
|
Mikfr83/Luna
|
b72f381f32d26eee9625f928acb05064aa84f8b5
|
[
"MIT"
] | null | null | null |
Luna/test/__init__.py
|
Mikfr83/Luna
|
b72f381f32d26eee9625f928acb05064aa84f8b5
|
[
"MIT"
] | null | null | null |
from Luna.test.maya_unit_test import TestCase
| 23
| 45
| 0.869565
| 8
| 46
| 4.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
58876400a9a65243a188aa407e847165f7e1c859
| 127
|
py
|
Python
|
electronic_station/acceptable_password_2.py
|
NigrumAquila/py_checkio
|
df437c2c3ad325d84714665000e3299a70e91f32
|
[
"MIT"
] | null | null | null |
electronic_station/acceptable_password_2.py
|
NigrumAquila/py_checkio
|
df437c2c3ad325d84714665000e3299a70e91f32
|
[
"MIT"
] | null | null | null |
electronic_station/acceptable_password_2.py
|
NigrumAquila/py_checkio
|
df437c2c3ad325d84714665000e3299a70e91f32
|
[
"MIT"
] | null | null | null |
def is_acceptable_password(password: str) -> bool:
return len(password)>6 and len([c for c in password if c.isdigit()]) > 0
| 63.5
| 76
| 0.708661
| 22
| 127
| 4
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018692
| 0.15748
| 127
| 2
| 76
| 63.5
| 0.803738
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 1
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
545569ce5a604b99b7c200f05bb0b26bd36f56bd
| 123
|
py
|
Python
|
0x05-python-exceptions/5-raise_exception.py
|
oluwaseun-ebenezer/holbertonschool-higher_level_programming
|
e830f969d3ca71abf0a2f6d4f7c64a82337eccd7
|
[
"MIT"
] | null | null | null |
0x05-python-exceptions/5-raise_exception.py
|
oluwaseun-ebenezer/holbertonschool-higher_level_programming
|
e830f969d3ca71abf0a2f6d4f7c64a82337eccd7
|
[
"MIT"
] | null | null | null |
0x05-python-exceptions/5-raise_exception.py
|
oluwaseun-ebenezer/holbertonschool-higher_level_programming
|
e830f969d3ca71abf0a2f6d4f7c64a82337eccd7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
""" 5. Raise exception """
def raise_exception():
""" raise_exception """
raise TypeError
| 12.3
| 28
| 0.609756
| 13
| 123
| 5.615385
| 0.615385
| 0.575342
| 0.520548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020833
| 0.219512
| 123
| 9
| 29
| 13.666667
| 0.739583
| 0.447154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5466b42855a96978250bf3e13ca21891c05cf8a8
| 168
|
py
|
Python
|
launcher/api/python/status.py
|
davidvoler/ate_meteor
|
d7ac20638a30e941e0ca8740499743bc26dd57be
|
[
"MIT"
] | null | null | null |
launcher/api/python/status.py
|
davidvoler/ate_meteor
|
d7ac20638a30e941e0ca8740499743bc26dd57be
|
[
"MIT"
] | 2
|
2015-08-06T14:08:39.000Z
|
2015-09-29T09:47:26.000Z
|
launcher/api/python/status.py
|
davidvoler/ate_meteor
|
d7ac20638a30e941e0ca8740499743bc26dd57be
|
[
"MIT"
] | null | null | null |
__author__ = 'davidl'
def set_status(execution_id, process_id, uut, test, verdict):
pass
def set_results(execution_id, process_id, uut, test, results):
pass
| 18.666667
| 62
| 0.732143
| 24
| 168
| 4.708333
| 0.541667
| 0.106195
| 0.318584
| 0.353982
| 0.477876
| 0.477876
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 168
| 8
| 63
| 21
| 0.807143
| 0
| 0
| 0.4
| 0
| 0
| 0.035714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.4
| 0
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
54961d193abf04943dd852615a776760729d2852
| 8,001
|
py
|
Python
|
foxlink/me_zrl_evolvers.py
|
lamsoa729/FoXlink
|
3c061b02968cdab1def752d5c145a6df4615504b
|
[
"BSD-3-Clause"
] | null | null | null |
foxlink/me_zrl_evolvers.py
|
lamsoa729/FoXlink
|
3c061b02968cdab1def752d5c145a6df4615504b
|
[
"BSD-3-Clause"
] | null | null | null |
foxlink/me_zrl_evolvers.py
|
lamsoa729/FoXlink
|
3c061b02968cdab1def752d5c145a6df4615504b
|
[
"BSD-3-Clause"
] | 2
|
2019-06-18T16:48:03.000Z
|
2019-06-20T23:50:02.000Z
|
#!/usr/bin/env python
"""@package docstring
File: me_zrl_evolvers.py
Author: Adam Lamson
Email: adam.lamson@colorado.edu
Description:
"""
import numpy as np
# from scipy.integrate import dblquad
from .me_helpers import rod_geom_derivs, convert_sol_to_geom
from .me_zrl_odes import (rod_geom_derivs_zrl, calc_moment_derivs_zrl,
calc_boundary_derivs_zrl)
from .me_zrl_helpers import (avg_force_zrl, avg_torque_zrl, fast_zrl_src_kl,
prep_zrl_bound_evolver, prep_zrl_evolver,
get_zrl_moments,
get_zrl_moments_and_boundary_terms, get_mu_kl_eff)
from .rod_steric_forces import calc_wca_force_torque
def evolver_zrl(sol, fric_coeff, params):
"""!Calculate all time derivatives necessary to solve the moment expansion
evolution of the Fokker-Planck equation of zero rest length (zrl) crosslinkers
bound to moving rods. d<var> is the time derivative of corresponding variable
@param sol: Solution vector to solve_ivp
@param fric_coeff: friction coefficients of rod
@param params: Constant parameters of the simulation
@return: Time-derivatives of all time varying quantities in a flattened
array
"""
# Define useful parameters for functions
L_i, L_j = (params['L_i'], params['L_j'])
rod_diam = params['rod_diam']
beta = params['beta']
ks = params['ks']
r_i, r_j, u_i, u_j = convert_sol_to_geom(sol)
r_ij = r_j - r_i
# (scalar_geom, q_arr, Q_arr) = prep_zrl_bound_evolver(sol, params)
(scalar_geom, q_arr) = prep_zrl_evolver(sol, params)
(mu_kl, B_terms) = get_zrl_moments_and_boundary_terms(sol)
# Get average force of crosslinkers on rod_j
f_ij = avg_force_zrl(r_ij, u_i, u_j, mu_kl[0], mu_kl[1], mu_kl[2], ks)
tau_i = avg_torque_zrl(r_ij, u_i, u_j, mu_kl[1], mu_kl[3], ks)
tau_j = avg_torque_zrl(-1. * r_ij, u_j, u_i, mu_kl[2], mu_kl[3], ks)
# Calculate wca forces and torques if steric forces are set
torque_i_wca, torque_j_wca = (np.zeros(3), np.zeros(3))
if params['steric_flag'] == 'wca':
# Get WCA steric forces and add them to crosslink forces
eps_scale = 1.
f_ij_wca, torque_i_wca, torque_j_wca = calc_wca_force_torque(
r_i, r_j, u_i, u_j, L_i, L_j, rod_diam, eps_scale / beta, fcut=1e10)
f_ij += f_ij_wca
tau_i += torque_i_wca
tau_j += torque_j_wca
elif params['steric_flag'] == 'constrained':
u_m = params['constr_vec'] # Get min dist vector of carrier lines
f_ij -= np.dot(f_ij, u_m) * u_m # Min dist component from force
tau_i = np.dot(tau_i, u_m) * u_m # Torque only around min dist vector
tau_j = np.dot(tau_j, u_m) * u_m # Torque only around min dist vector
# dr_i, dr_j, du_i, du_j = rod_geom_derivs_zrl(f_ij, r_ij, u_i, u_j,
# scalar_geom, mu_kl,
# fric_coeff, ks)
dr_i, dr_j, du_i, du_j = rod_geom_derivs(f_ij, tau_i, tau_j,
u_i, u_j, fric_coeff)
# Add WCA torques if they exist to filament orientations
# du_i += np.cross(torque_i_wca, u_i) / fric_coeff[2]
# du_j += np.cross(torque_j_wca, u_j) / fric_coeff[5]
# Moment evolution
dmu_kl = calc_moment_derivs_zrl(mu_kl, scalar_geom, q_arr, params)
# Evolution of boundary condtions
# dB_terms = calc_boundary_derivs_zrl(B_terms, scalar_geom, Q_arr, params)
dB_terms = np.zeros(8)
dsol = np.concatenate((dr_i, dr_j, du_i, du_j, dmu_kl, dB_terms))
# Check to make sure all values are finite
if not np.all(np.isfinite(dsol)):
raise RuntimeError(
'Infinity or NaN thrown in ODE solver derivatives. '
'Current derivatives', dsol)
return dsol
def evolver_zrl_bvg(sol, fric_coeff, params):
"""!Calculate all time derivatives necessary to solve the moment expansion
evolution of the Fokker-Planck equation of zero rest length (zrl) crosslinkers
bound to moving rods. d<var> is the time derivative of corresponding variable
@param sol: Solution vector to solve_ivp
@param fric_coeff: friction coefficients of rod
@param params: Constant parameters of the simulation
@return: Time-derivatives of all time varying quantities in a flattened
array
"""
# Define useful parameters for functions
ks = params['ks']
r_i, r_j, u_i, u_j = convert_sol_to_geom(sol)
r_ij = r_j - r_i
# (scalar_geom, q_arr, Q_arr) = prep_zrl_bound_evolver(sol, params)
(scalar_geom, q_arr) = prep_zrl_evolver(sol, params)
(mu_kl, B_terms) = get_zrl_moments_and_boundary_terms(sol)
# Get effective mu_kl to calculate forces and torques.
# This will simulate walking off the end of rods
mu_kl_eff = get_mu_kl_eff(mu_kl, params)
# Get average force of crosslinkers on rod_j and both torques.
f_ij = avg_force_zrl(r_ij, u_i, u_j,
mu_kl_eff[0], mu_kl_eff[1], mu_kl_eff[2], ks)
tau_i = avg_torque_zrl(r_ij, u_i, u_j, mu_kl_eff[1], mu_kl_eff[3], ks)
tau_j = avg_torque_zrl(-r_ij, u_j, u_i, mu_kl_eff[2], mu_kl_eff[3], ks)
if params['steric_flag'] == 'wca':
# Get WCA steric forces and add them to crosslink forces
eps_scale = 1.
f_ij_wca, torque_i_wca, torque_j_wca = calc_wca_force_torque(
r_i, r_j, u_i, u_j,
params['L_i'], params['L_j'], params['rod_diam'],
eps_scale / params['beta'], fcut=1e10)
f_ij += f_ij_wca
tau_i += torque_i_wca
tau_j += torque_j_wca
elif params['steric_flag'] == 'constrained':
u_m = params['constr_vec'] # Get min dist vector of carrier lines
f_ij -= np.dot(f_ij, u_m) * u_m # Min dist component from force
tau_i = np.dot(tau_i, u_m) * u_m # Torque only around min dist vector
tau_j = np.dot(tau_j, u_m) * u_m # Torque only around min dist vector
# dr_i, dr_j, du_i, du_j = rod_geom_derivs_zrl(f_ij, r_ij, u_i, u_j,
# scalar_geom, mu_kl_eff,
# fric_coeff, ks)
dr_i, dr_j, du_i, du_j = rod_geom_derivs(f_ij, tau_i, tau_j,
u_i, u_j, fric_coeff)
# # Add WCA torques if they exist to filament orientations
# du_i += np.cross(torque_i_wca, u_i) / fric_coeff[2]
# du_j += np.cross(torque_j_wca, u_j) / fric_coeff[5]
# Moment evolution
dmu_kl = calc_moment_derivs_zrl(mu_kl, scalar_geom, q_arr, params)
# Evolution of boundary condtions
# dB_terms = calc_boundary_derivs_zrl(B_terms, scalar_geom, Q_arr, params)
dB_terms = np.zeros(8)
dsol = np.concatenate((dr_i, dr_j, du_i, du_j, dmu_kl, dB_terms))
# Check to make sure all values are finite
if not np.all(np.isfinite(dsol)):
raise RuntimeError(
'Infinity or NaN thrown in ODE solver derivatives. Current derivatives', dsol)
return dsol
def evolver_zrl_stat(mu_kl, scalar_geom, q_arr, params):
"""!Calculate all time derivatives necessary to solve the moment expansion
evolution of the Fokker-Planck equation of zero rest length (zrl) crosslinkers
bound to moving rods. d<var> is the time derivative of corresponding variable
@param mu_kl: Zeroth motor moment
@param scalar_geom: First motor moment of s1
@param q_arr: First motor moment of s2
@param params: Second motor moment of s1 and s2
@return: Time-derivatives of all time varying quantities in a flattened
array
"""
rod_change_arr = np.zeros(12)
dmu_kl = calc_moment_derivs_zrl(mu_kl, scalar_geom, q_arr, params)
dB_terms = np.zeros(8)
dsol = np.concatenate((rod_change_arr, dmu_kl, dB_terms))
# Check to make sure all values are finite
if not np.all(np.isfinite(dsol)):
raise RuntimeError(
'Infinity or NaN thrown in ODE solver derivatives. Current derivatives', dsol)
return dsol
| 43.248649
| 90
| 0.663917
| 1,325
| 8,001
| 3.699623
| 0.150189
| 0.022848
| 0.007344
| 0.009792
| 0.819053
| 0.814157
| 0.804162
| 0.781314
| 0.763158
| 0.763158
| 0
| 0.006319
| 0.248344
| 8,001
| 184
| 91
| 43.483696
| 0.80878
| 0.425947
| 0
| 0.632184
| 0
| 0
| 0.076593
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.057471
| 0
| 0.126437
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5497991ccc14802c28869353d0c9d5de8729c708
| 42
|
py
|
Python
|
mainEmberrite/races/__init__.py
|
evvanErb/Emberrite
|
1e65ef69188619684e093f01febc6f92f8b02716
|
[
"Apache-2.0"
] | null | null | null |
mainEmberrite/races/__init__.py
|
evvanErb/Emberrite
|
1e65ef69188619684e093f01febc6f92f8b02716
|
[
"Apache-2.0"
] | null | null | null |
mainEmberrite/races/__init__.py
|
evvanErb/Emberrite
|
1e65ef69188619684e093f01febc6f92f8b02716
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
from raceClass import *
| 14
| 23
| 0.738095
| 6
| 42
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 3
| 23
| 14
| 0.837838
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
54bf184e2c8ddba2369cdbf545c0b8068989edb2
| 202
|
py
|
Python
|
skorecard/reporting/__init__.py
|
satya-pattnaik/skorecard
|
ba31821799985052ffb498569b41e969034ea28e
|
[
"MIT"
] | null | null | null |
skorecard/reporting/__init__.py
|
satya-pattnaik/skorecard
|
ba31821799985052ffb498569b41e969034ea28e
|
[
"MIT"
] | null | null | null |
skorecard/reporting/__init__.py
|
satya-pattnaik/skorecard
|
ba31821799985052ffb498569b41e969034ea28e
|
[
"MIT"
] | null | null | null |
"""Import required create_report."""
from .report import build_bucket_table, psi, iv
from .plotting import (
plot_bucket_table,
)
__all__ = ["build_bucket_table", "plot_bucket_table", "psi", "iv"]
| 25.25
| 66
| 0.732673
| 27
| 202
| 5
| 0.481481
| 0.325926
| 0.237037
| 0.237037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128713
| 202
| 7
| 67
| 28.857143
| 0.767045
| 0.148515
| 0
| 0
| 0
| 0
| 0.240964
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
49b3ee72784b1181f13b5b8d9085035b9bea998c
| 13,125
|
py
|
Python
|
tests/components/homekit/test_type_humidifiers.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 22,481
|
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
tests/components/homekit/test_type_humidifiers.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/homekit/test_type_humidifiers.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 11,411
|
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""Test different accessory types: HumidifierDehumidifier."""
from pyhap.const import (
CATEGORY_HUMIDIFIER,
HAP_REPR_AID,
HAP_REPR_CHARS,
HAP_REPR_IID,
HAP_REPR_VALUE,
)
from homeassistant.components.homekit.const import (
ATTR_VALUE,
CONF_LINKED_HUMIDITY_SENSOR,
PROP_MAX_VALUE,
PROP_MIN_STEP,
PROP_MIN_VALUE,
PROP_VALID_VALUES,
)
from homeassistant.components.homekit.type_humidifiers import HumidifierDehumidifier
from homeassistant.components.humidifier.const import (
ATTR_HUMIDITY,
ATTR_MAX_HUMIDITY,
ATTR_MIN_HUMIDITY,
DEFAULT_MAX_HUMIDITY,
DEFAULT_MIN_HUMIDITY,
DEVICE_CLASS_DEHUMIDIFIER,
DEVICE_CLASS_HUMIDIFIER,
DOMAIN,
SERVICE_SET_HUMIDITY,
)
from homeassistant.components.sensor import SensorDeviceClass
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_UNIT_OF_MEASUREMENT,
PERCENTAGE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from tests.common import async_mock_service
async def test_humidifier(hass, hk_driver, events):
"""Test if humidifier accessory and HA are updated accordingly."""
entity_id = "humidifier.test"
hass.states.async_set(entity_id, STATE_OFF)
await hass.async_block_till_done()
acc = HumidifierDehumidifier(
hass, hk_driver, "HumidifierDehumidifier", entity_id, 1, None
)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.aid == 1
assert acc.category == CATEGORY_HUMIDIFIER
assert acc.char_current_humidifier_dehumidifier.value == 0
assert acc.char_target_humidifier_dehumidifier.value == 1
assert acc.char_current_humidity.value == 0
assert acc.char_target_humidity.value == 45.0
assert acc.char_active.value == 0
assert acc.char_target_humidity.properties[PROP_MAX_VALUE] == DEFAULT_MAX_HUMIDITY
assert acc.char_target_humidity.properties[PROP_MIN_VALUE] == DEFAULT_MIN_HUMIDITY
assert acc.char_target_humidity.properties[PROP_MIN_STEP] == 1.0
assert acc.char_target_humidifier_dehumidifier.properties[PROP_VALID_VALUES] == {
"Humidifier": 1
}
hass.states.async_set(
entity_id,
STATE_ON,
{ATTR_HUMIDITY: 47},
)
await hass.async_block_till_done()
assert acc.char_target_humidity.value == 47.0
assert acc.char_current_humidifier_dehumidifier.value == 2
assert acc.char_target_humidifier_dehumidifier.value == 1
assert acc.char_active.value == 1
hass.states.async_set(
entity_id,
STATE_OFF,
{ATTR_HUMIDITY: 42, ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDIFIER},
)
await hass.async_block_till_done()
assert acc.char_target_humidity.value == 42.0
assert acc.char_current_humidifier_dehumidifier.value == 0
assert acc.char_target_humidifier_dehumidifier.value == 1
assert acc.char_active.value == 0
# Set from HomeKit
call_set_humidity = async_mock_service(hass, DOMAIN, SERVICE_SET_HUMIDITY)
char_target_humidity_iid = acc.char_target_humidity.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_target_humidity_iid,
HAP_REPR_VALUE: 39.0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert len(call_set_humidity) == 1
assert call_set_humidity[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_humidity[0].data[ATTR_HUMIDITY] == 39.0
assert acc.char_target_humidity.value == 39.0
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] == "RelativeHumidityHumidifierThreshold to 39.0%"
async def test_dehumidifier(hass, hk_driver, events):
"""Test if dehumidifier accessory and HA are updated accordingly."""
entity_id = "humidifier.test"
hass.states.async_set(
entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: DEVICE_CLASS_DEHUMIDIFIER}
)
await hass.async_block_till_done()
acc = HumidifierDehumidifier(
hass, hk_driver, "HumidifierDehumidifier", entity_id, 1, None
)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.aid == 1
assert acc.category == CATEGORY_HUMIDIFIER
assert acc.char_current_humidifier_dehumidifier.value == 0
assert acc.char_target_humidifier_dehumidifier.value == 2
assert acc.char_current_humidity.value == 0
assert acc.char_target_humidity.value == 45.0
assert acc.char_active.value == 0
assert acc.char_target_humidity.properties[PROP_MAX_VALUE] == DEFAULT_MAX_HUMIDITY
assert acc.char_target_humidity.properties[PROP_MIN_VALUE] == DEFAULT_MIN_HUMIDITY
assert acc.char_target_humidity.properties[PROP_MIN_STEP] == 1.0
assert acc.char_target_humidifier_dehumidifier.properties[PROP_VALID_VALUES] == {
"Dehumidifier": 2
}
hass.states.async_set(
entity_id,
STATE_ON,
{ATTR_HUMIDITY: 30},
)
await hass.async_block_till_done()
assert acc.char_target_humidity.value == 30.0
assert acc.char_current_humidifier_dehumidifier.value == 3
assert acc.char_target_humidifier_dehumidifier.value == 2
assert acc.char_active.value == 1
hass.states.async_set(
entity_id,
STATE_OFF,
{ATTR_HUMIDITY: 42},
)
await hass.async_block_till_done()
assert acc.char_target_humidity.value == 42.0
assert acc.char_current_humidifier_dehumidifier.value == 0
assert acc.char_target_humidifier_dehumidifier.value == 2
assert acc.char_active.value == 0
# Set from HomeKit
call_set_humidity = async_mock_service(hass, DOMAIN, SERVICE_SET_HUMIDITY)
char_target_humidity_iid = acc.char_target_humidity.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_target_humidity_iid,
HAP_REPR_VALUE: 39.0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert len(call_set_humidity) == 1
assert call_set_humidity[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_humidity[0].data[ATTR_HUMIDITY] == 39.0
assert acc.char_target_humidity.value == 39.0
assert len(events) == 1
assert (
events[-1].data[ATTR_VALUE] == "RelativeHumidityDehumidifierThreshold to 39.0%"
)
async def test_hygrostat_power_state(hass, hk_driver, events):
"""Test if accessory and HA are updated accordingly."""
entity_id = "humidifier.test"
hass.states.async_set(
entity_id,
STATE_ON,
{ATTR_HUMIDITY: 43},
)
await hass.async_block_till_done()
acc = HumidifierDehumidifier(
hass, hk_driver, "HumidifierDehumidifier", entity_id, 1, None
)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.char_current_humidifier_dehumidifier.value == 2
assert acc.char_target_humidifier_dehumidifier.value == 1
assert acc.char_active.value == 1
hass.states.async_set(
entity_id,
STATE_OFF,
{ATTR_HUMIDITY: 43},
)
await hass.async_block_till_done()
assert acc.char_current_humidifier_dehumidifier.value == 0
assert acc.char_target_humidifier_dehumidifier.value == 1
assert acc.char_active.value == 0
# Set from HomeKit
call_turn_on = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
char_active_iid = acc.char_active.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert len(call_turn_on) == 1
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert acc.char_active.value == 1
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] == "Active to 1"
call_turn_off = async_mock_service(hass, DOMAIN, SERVICE_TURN_OFF)
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert len(call_turn_off) == 1
assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id
assert acc.char_active.value == 0
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] == "Active to 0"
async def test_hygrostat_get_humidity_range(hass, hk_driver):
"""Test if humidity range is evaluated correctly."""
entity_id = "humidifier.test"
hass.states.async_set(
entity_id, STATE_OFF, {ATTR_MIN_HUMIDITY: 40, ATTR_MAX_HUMIDITY: 45}
)
await hass.async_block_till_done()
acc = HumidifierDehumidifier(
hass, hk_driver, "HumidifierDehumidifier", entity_id, 1, None
)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.char_target_humidity.properties[PROP_MAX_VALUE] == 45
assert acc.char_target_humidity.properties[PROP_MIN_VALUE] == 40
async def test_humidifier_with_linked_humidity_sensor(hass, hk_driver):
"""Test a humidifier with a linked humidity sensor can update."""
humidity_sensor_entity_id = "sensor.bedroom_humidity"
hass.states.async_set(
humidity_sensor_entity_id,
"42.0",
{
ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
)
await hass.async_block_till_done()
entity_id = "humidifier.test"
hass.states.async_set(entity_id, STATE_OFF)
await hass.async_block_till_done()
acc = HumidifierDehumidifier(
hass,
hk_driver,
"HumidifierDehumidifier",
entity_id,
1,
{CONF_LINKED_HUMIDITY_SENSOR: humidity_sensor_entity_id},
)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.char_current_humidity.value == 42.0
hass.states.async_set(
humidity_sensor_entity_id,
"43.0",
{
ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
)
await hass.async_block_till_done()
assert acc.char_current_humidity.value == 43.0
hass.states.async_set(
humidity_sensor_entity_id,
STATE_UNAVAILABLE,
{
ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
)
await hass.async_block_till_done()
assert acc.char_current_humidity.value == 43.0
hass.states.async_remove(humidity_sensor_entity_id)
await hass.async_block_till_done()
assert acc.char_current_humidity.value == 43.0
async def test_humidifier_with_a_missing_linked_humidity_sensor(hass, hk_driver):
"""Test a humidifier with a configured linked motion sensor that is missing."""
humidity_sensor_entity_id = "sensor.bedroom_humidity"
entity_id = "humidifier.test"
hass.states.async_set(entity_id, STATE_OFF)
await hass.async_block_till_done()
acc = HumidifierDehumidifier(
hass,
hk_driver,
"HumidifierDehumidifier",
entity_id,
1,
{CONF_LINKED_HUMIDITY_SENSOR: humidity_sensor_entity_id},
)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.char_current_humidity.value == 0
async def test_humidifier_as_dehumidifier(hass, hk_driver, events, caplog):
"""Test an invalid char_target_humidifier_dehumidifier from HomeKit."""
entity_id = "humidifier.test"
hass.states.async_set(entity_id, STATE_OFF)
await hass.async_block_till_done()
acc = HumidifierDehumidifier(
hass, hk_driver, "HumidifierDehumidifier", entity_id, 1, None
)
hk_driver.add_accessory(acc)
await acc.run()
await hass.async_block_till_done()
assert acc.char_target_humidifier_dehumidifier.value == 1
# Set from HomeKit
char_target_humidifier_dehumidifier_iid = (
acc.char_target_humidifier_dehumidifier.to_HAP()[HAP_REPR_IID]
)
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_target_humidifier_dehumidifier_iid,
HAP_REPR_VALUE: 0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert "TargetHumidifierDehumidifierState is not supported" in caplog.text
assert len(events) == 0
| 30.523256
| 88
| 0.682971
| 1,645
| 13,125
| 5.073556
| 0.077204
| 0.060388
| 0.080997
| 0.063743
| 0.834771
| 0.794393
| 0.781572
| 0.761443
| 0.742272
| 0.725377
| 0
| 0.014814
| 0.233676
| 13,125
| 429
| 89
| 30.594406
| 0.814973
| 0.009448
| 0
| 0.601156
| 0
| 0
| 0.043235
| 0.02433
| 0
| 0
| 0
| 0
| 0.219653
| 1
| 0
| false
| 0
| 0.020231
| 0
| 0.020231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b71b85145e4f95521f3be31f1ed3f39ab0c160a7
| 982
|
py
|
Python
|
lambda-sns-notify/data.py
|
aws-samples/rekognition-crowded-demo
|
f33cd6453f4d367e7669326da99e8aa4c8890d1d
|
[
"Apache-2.0"
] | 5
|
2020-07-01T02:23:01.000Z
|
2020-08-16T20:38:28.000Z
|
lambda-sns-notify/data.py
|
aws-samples/rekognition-crowded-demo
|
f33cd6453f4d367e7669326da99e8aa4c8890d1d
|
[
"Apache-2.0"
] | null | null | null |
lambda-sns-notify/data.py
|
aws-samples/rekognition-crowded-demo
|
f33cd6453f4d367e7669326da99e8aa4c8890d1d
|
[
"Apache-2.0"
] | 2
|
2020-06-29T18:52:08.000Z
|
2021-03-27T00:25:42.000Z
|
message = {'Records': [{'messageId': 'a7d764c8-0c85-4175-abe1-3bb2e95cc1da', 'receiptHandle': 'AQEBrrfcxbhCLX1aTC9hKVpXMJIjg+YOBM8SKWZe6B+qWI9ycNzffVrP2zSasx0axTc0AfpgCbf3Zmcr3pihgPSDOl/He50aMjPjuYZd/Ciy1KN3br8PHz3ahi1ZxPGdz8QdJP5sZlO2CAZxDeI6AEluXz91S5BzOSDZOPtubvvsHdjWOwJt5f9Bgb55sgmjVjsC2H84ka8iIhRiVmBunQq5iZPRGdeeS1eJa1mG2Zfm1X5bAp+/Am/+orl9dW093KxEI9/gbnYEBWQerJwbhKKH7OW0kC6CGBb0EECaSxc/9flrl5rqAz1A/7ClIZX+9CdtF7Qgh5BzUfAA1ORPZLn8ZU1+z+/ZGyYaGUXI/LYScpO+FA0FKfJkbM8UiayNvqw8irYcg+0HiFsmpaSPtvOtTw==', 'body': "{'Bucket': 'space-enablers-poc', 'Object': 'input/crowd1.jpg'}", 'attributes': {'ApproximateReceiveCount': '1', 'SentTimestamp': '1585862319600', 'SenderId': 'AROAQ6YFEAVLINQRFINAF:image-analyzer-lambda', 'ApproximateFirstReceiveTimestamp': '1585862319601'}, 'messageAttributes': {}, 'md5OfBody': '3ced184fa7f0c9fb34db06ff2d432079', 'eventSource': 'aws:sqs', 'eventSourceARN': 'arn:aws:sqs:us-east-1:066045871446:satellite-sqs-notify', 'awsRegion': 'us-east-1'}]}
| 982
| 982
| 0.824847
| 67
| 982
| 12.089552
| 0.895522
| 0.014815
| 0.017284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148109
| 0.03055
| 982
| 1
| 982
| 982
| 0.702731
| 0
| 0
| 0
| 0
| 1
| 0.876907
| 0.66531
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3fa1172da0a0d1e69abb3000e6a0998505e5be11
| 10,442
|
py
|
Python
|
heat/core/relational.py
|
shssf/heat
|
9db0a936c92491fa5aa862f558cb385c9916216b
|
[
"MIT"
] | 105
|
2018-05-18T11:34:03.000Z
|
2022-03-29T06:37:23.000Z
|
heat/core/relational.py
|
shssf/heat
|
9db0a936c92491fa5aa862f558cb385c9916216b
|
[
"MIT"
] | 909
|
2018-05-18T07:50:26.000Z
|
2022-03-31T20:16:30.000Z
|
heat/core/relational.py
|
shssf/heat
|
9db0a936c92491fa5aa862f558cb385c9916216b
|
[
"MIT"
] | 28
|
2018-05-24T14:39:18.000Z
|
2022-03-31T19:18:47.000Z
|
"""
Functions for relational oprations, i.e. equal/no equal...
"""
from __future__ import annotations
import torch
from typing import Union
from .communication import MPI
from .dndarray import DNDarray
from . import _operations
from . import dndarray
from . import types
__all__ = [
"eq",
"equal",
"ge",
"greater",
"greater_equal",
"gt",
"le",
"less",
"less_equal",
"lt",
"ne",
"not_equal",
]
def eq(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) -> DNDarray:
"""
Returns a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise comparision.
Takes the first and second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) whose elements are to be
compared as argument.
Parameters
----------
x: DNDarray or scalar
The first operand involved in the comparison
y: DNDarray or scalar
The second operand involved in the comparison
Examples
---------
>>> import heat as ht
>>> x = ht.float32([[1, 2],[3, 4]])
>>> ht.eq(x, 3.0)
DNDarray([[False, False],
[ True, False]], dtype=ht.bool, device=cpu:0, split=None)
>>> y = ht.float32([[2, 2], [2, 2]])
>>> ht.eq(x, y)
DNDarray([[False, True],
[False, False]], dtype=ht.bool, device=cpu:0, split=None)
"""
res = _operations.__binary_op(torch.eq, x, y)
if res.dtype != types.bool:
res = dndarray.DNDarray(
res.larray.type(torch.bool),
res.gshape,
types.bool,
res.split,
res.device,
res.comm,
res.balanced,
)
return res
DNDarray.__eq__ = lambda self, other: eq(self, other)
DNDarray.__eq__.__doc__ = eq.__doc__
def equal(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) -> bool:
"""
Overall comparison of equality between two :class:`~heat.core.dndarray.DNDarray`. Returns ``True`` if two arrays
have the same size and elements, and ``False`` otherwise.
Parameters
----------
x: DNDarray or scalar
The first operand involved in the comparison
y: DNDarray or scalar
The second operand involved in the comparison
Examples
---------
>>> import heat as ht
>>> x = ht.float32([[1, 2],[3, 4]])
>>> ht.equal(x, ht.float32([[1, 2],[3, 4]]))
True
>>> y = ht.float32([[2, 2], [2, 2]])
>>> ht.equal(x, y)
False
>>> ht.equal(x, 3.0)
False
"""
result_tensor = _operations.__binary_op(torch.equal, x, y)
if result_tensor.larray.numel() == 1:
result_value = result_tensor.larray.item()
else:
result_value = True
return result_tensor.comm.allreduce(result_value, MPI.LAND)
def ge(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) -> DNDarray:
"""
Returns a D:class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich greater than or equal comparison between values from operand ``x`` with respect to values of
operand ``y`` (i.e. ``x>=y``), not commutative. Takes the first and second operand (scalar or
:class:`~heat.core.dndarray.DNDarray`) whose elements are to be compared as argument.
Parameters
----------
x: DNDarray or scalar
The first operand to be compared greater than or equal to second operand
y: DNDarray or scalar
The second operand to be compared less than or equal to first operand
Examples
-------
>>> import heat as ht
>>> x = ht.float32([[1, 2],[3, 4]])
>>> ht.ge(x, 3.0)
DNDarray([[False, False],
[ True, True]], dtype=ht.bool, device=cpu:0, split=None)
>>> y = ht.float32([[2, 2], [2, 2]])
>>> ht.ge(x, y)
DNDarray([[False, True],
[ True, True]], dtype=ht.bool, device=cpu:0, split=None)
"""
res = _operations.__binary_op(torch.ge, x, y)
if res.dtype != types.bool:
res = dndarray.DNDarray(
res.larray.type(torch.bool),
res.gshape,
types.bool,
res.split,
res.device,
res.comm,
res.balanced,
)
return res
DNDarray.__ge__ = lambda self, other: ge(self, other)
DNDarray.__ge__.__doc__ = ge.__doc__
# alias
greater_equal = ge
greater_equal.__doc__ = ge.__doc__
def gt(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) -> DNDarray:
"""
Returns a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich greater than comparison between values from operand ``x`` with respect to values of
operand ``y`` (i.e. ``x>y``), not commutative. Takes the first and second operand (scalar or
:class:`~heat.core.dndarray.DNDarray`) whose elements are to be compared as argument.
Parameters
----------
x: DNDarray or scalar
The first operand to be compared greater than second operand
y: DNDarray or scalar
The second operand to be compared less than first operand
Examples
-------
>>> import heat as ht
>>> x = ht.float32([[1, 2],[3, 4]])
>>> ht.gt(x, 3.0)
DNDarray([[False, False],
[False, True]], dtype=ht.bool, device=cpu:0, split=None)
>>> y = ht.float32([[2, 2], [2, 2]])
>>> ht.gt(x, y)
DNDarray([[False, False],
[ True, True]], dtype=ht.bool, device=cpu:0, split=None)
"""
res = _operations.__binary_op(torch.gt, x, y)
if res.dtype != types.bool:
res = dndarray.DNDarray(
res.larray.type(torch.bool),
res.gshape,
types.bool,
res.split,
res.device,
res.comm,
res.balanced,
)
return res
DNDarray.__gt__ = lambda self, other: gt(self, other)
DNDarray.__gt__.__doc__ = gt.__doc__
# alias
greater = gt
greater.__doc__ = gt.__doc__
def le(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) -> DNDarray:
"""
Return a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich less than or equal comparison between values from operand ``x`` with respect to values of
operand ``y`` (i.e. ``x<=y``), not commutative. Takes the first and second operand (scalar or
:class:`~heat.core.dndarray.DNDarray`) whose elements are to be compared as argument.
Parameters
----------
x: DNDarray or scalar
The first operand to be compared less than or equal to second operand
y: DNDarray or scalar
The second operand to be compared greater than or equal to first operand
Examples
-------
>>> import heat as ht
>>> x = ht.float32([[1, 2],[3, 4]])
>>> ht.le(x, 3.0)
DNDarray([[ True, True],
[ True, False]], dtype=ht.bool, device=cpu:0, split=None)
>>> y = ht.float32([[2, 2], [2, 2]])
>>> ht.le(x, y)
DNDarray([[ True, True],
[False, False]], dtype=ht.bool, device=cpu:0, split=None)
"""
res = _operations.__binary_op(torch.le, x, y)
if res.dtype != types.bool:
res = dndarray.DNDarray(
res.larray.type(torch.bool),
res.gshape,
types.bool,
res.split,
res.device,
res.comm,
res.balanced,
)
return res
DNDarray.__le__ = lambda self, other: le(self, other)
DNDarray.__le__.__doc__ = le.__doc__
# alias
less_equal = le
less_equal.__doc__ = le.__doc__
def lt(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) -> DNDarray:
"""
Returns a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich less than comparison between values from operand ``x`` with respect to values of
operand ``y`` (i.e. ``x<y``), not commutative. Takes the first and second operand (scalar or
:class:`~heat.core.dndarray.DNDarray`) whose elements are to be compared as argument.
Parameters
----------
x: DNDarray or scalar
The first operand to be compared less than second operand
y: DNDarray or scalar
The second operand to be compared greater than first operand
Examples
-------
>>> import heat as ht
>>> x = ht.float32([[1, 2],[3, 4]])
>>> ht.lt(x, 3.0)
DNDarray([[ True, True],
[False, False]], dtype=ht.bool, device=cpu:0, split=None)
>>> y = ht.float32([[2, 2], [2, 2]])
>>> ht.lt(x, y)
DNDarray([[ True, False],
[False, False]], dtype=ht.bool, device=cpu:0, split=None)
"""
res = _operations.__binary_op(torch.lt, x, y)
if res.dtype != types.bool:
res = dndarray.DNDarray(
res.larray.type(torch.bool),
res.gshape,
types.bool,
res.split,
res.device,
res.comm,
res.balanced,
)
return res
DNDarray.__lt__ = lambda self, other: lt(self, other)
DNDarray.__lt__.__doc__ = lt.__doc__
# alias
less = lt
less.__doc__ = lt.__doc__
def ne(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) -> DNDarray:
"""
Returns a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich comparison of non-equality between values from two operands, commutative.
Takes the first and second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) whose elements are to be
compared as argument.
Parameters
----------
x: DNDarray or scalar
The first operand involved in the comparison
y: DNDarray or scalar
The second operand involved in the comparison
Examples
---------
>>> import heat as ht
>>> x = ht.float32([[1, 2],[3, 4]])
>>> ht.ne(x, 3.0)
DNDarray([[ True, True],
[False, True]], dtype=ht.bool, device=cpu:0, split=None)
>>> y = ht.float32([[2, 2], [2, 2]])
>>> ht.ne(x, y)
DNDarray([[ True, False],
[ True, True]], dtype=ht.bool, device=cpu:0, split=None)
"""
res = _operations.__binary_op(torch.ne, x, y)
if res.dtype != types.bool:
res = dndarray.DNDarray(
res.larray.type(torch.bool),
res.gshape,
types.bool,
res.split,
res.device,
res.comm,
res.balanced,
)
return res
DNDarray.__ne__ = lambda self, other: ne(self, other)
DNDarray.__ne__.__doc__ = ne.__doc__
# alias
not_equal = ne
not_equal.__doc__ = ne.__doc__
| 29.497175
| 189
| 0.592703
| 1,401
| 10,442
| 4.282655
| 0.087794
| 0.007
| 0.042
| 0.049
| 0.807
| 0.791
| 0.784333
| 0.774333
| 0.771667
| 0.771667
| 0
| 0.015236
| 0.264604
| 10,442
| 353
| 190
| 29.580737
| 0.766115
| 0.552768
| 0
| 0.465116
| 0
| 0
| 0.014907
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054264
| false
| 0
| 0.062016
| 0
| 0.170543
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3fa82d5e41296f7549d42ad22cc3d7eb72613c7d
| 72,747
|
py
|
Python
|
tests/dbusmock_templates/bluez_scan.py
|
garretthagen21/bluepi
|
042b77ee383c65b185c6a52afd32b0336aa887dd
|
[
"MIT"
] | 296
|
2016-01-07T22:45:13.000Z
|
2022-03-30T13:28:08.000Z
|
tests/dbusmock_templates/bluez_scan.py
|
garretthagen21/bluepi
|
042b77ee383c65b185c6a52afd32b0336aa887dd
|
[
"MIT"
] | 267
|
2016-01-10T21:41:59.000Z
|
2022-03-21T17:19:12.000Z
|
tests/dbusmock_templates/bluez_scan.py
|
garretthagen21/bluepi
|
042b77ee383c65b185c6a52afd32b0336aa887dd
|
[
"MIT"
] | 120
|
2016-01-22T09:47:49.000Z
|
2022-02-22T01:23:38.000Z
|
# -*- coding: utf-8 -*-
'''bluetoothd mock template
This creates the expected methods and properties of the object manager
org.bluez object (/), the manager object (/org/bluez), but no adapters or
devices.
This supports BlueZ 5 only.
'''
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version. See http://www.gnu.org/copyleft/lgpl.html for the full text
# of the license.
__author__ = 'Philip Withnall'
__email__ = 'philip.withnall@collabora.co.uk'
__copyright__ = '(c) 2013 Collabora Ltd.'
__license__ = 'LGPL 3+'
import time
import dbus
from dbusmock import OBJECT_MANAGER_IFACE, mockobject
BUS_NAME = 'org.bluez'
MAIN_OBJ = '/'
SYSTEM_BUS = True
IS_OBJECT_MANAGER = True
BLUEZ_MOCK_IFACE = 'org.bluez.Mock'
AGENT_MANAGER_IFACE = 'org.bluez.AgentManager1'
PROFILE_MANAGER_IFACE = 'org.bluez.ProfileManager1'
ADAPTER_IFACE = 'org.bluez.Adapter1'
MEDIA_IFACE = 'org.bluez.Media1'
NETWORK_SERVER_IFACE = 'org.bluez.Network1'
LEDADVERTISING_MNGR_IFACE = 'org.bluez.GattManager1'
DEVICE_IFACE = 'org.bluez.Device1'
GATT_MNGR_IFACE = 'org.bluez.GattManager1'
GATT_SRVC_IFACE = 'org.bluez.GattService1'
GATT_CHRC_IFACE = 'org.bluez.GattCharacteristic1'
GATT_DSCR_IFACE = 'org.bluez.GattDescriptor1'
# here = Path(__file__).parent
# device_json = Path.joinpath('device_db.json')
# with device_json.open() as input_db:
# microbit_data = json.load(input_db)
def load(mock, parameters):
mock.AddObject('/org/bluez', AGENT_MANAGER_IFACE, {}, [
('RegisterAgent', 'os', '', ''),
('RequestDefaultAgent', 'o', '', ''),
('UnregisterAgent', 'o', '', ''),
])
bluez = mockobject.objects['/org/bluez']
bluez.AddMethods(PROFILE_MANAGER_IFACE, [
('RegisterProfile', 'osa{sv}', '', ''),
('UnregisterProfile', 'o', '', ''),
])
@dbus.service.method(BLUEZ_MOCK_IFACE,
in_signature='ss', out_signature='s')
def AddAdapter(self, device_name, system_name):
'''Convenience method to add a Bluetooth adapter
You have to specify a device name which must be a valid part of an object
path, e. g. "hci0", and an arbitrary system name (pretty hostname).
Returns the new object path.
'''
path = '/org/bluez/' + device_name
adapter_properties = {
'UUIDs': dbus.Array([
# Reference:
# http://git.kernel.org/cgit/bluetooth/bluez.git/tree/lib/uuid.h
# PNP
'00001200-0000-1000-8000-00805f9b34fb',
# Generic Access Profile
'00001800-0000-1000-8000-00805f9b34fb',
# Generic Attribute Profile
'00001801-0000-1000-8000-00805f9b34fb',
# Audio/Video Remote Control Profile (remote)
'0000110e-0000-1000-8000-00805f9b34fb',
# Audio/Video Remote Control Profile (target)
'0000110c-0000-1000-8000-00805f9b34fb',
], variant_level=1),
'Discoverable': dbus.Boolean(False, variant_level=1),
'Discovering': dbus.Boolean(True, variant_level=1),
'Pairable': dbus.Boolean(True, variant_level=1),
'Powered': dbus.Boolean(True, variant_level=1),
'Address': dbus.String('00:01:02:03:04:05', variant_level=1),
'Alias': dbus.String(system_name, variant_level=1),
'Modalias': dbus.String('usb:v1D6Bp0245d050A', variant_level=1),
'Name': dbus.String(system_name, variant_level=1),
# Reference:
# http://bluetooth-pentest.narod.ru/software/
# bluetooth_class_of_device-service_generator.html
'Class': dbus.UInt32(268, variant_level=1), # Computer, Laptop
'DiscoverableTimeout': dbus.UInt32(180, variant_level=1),
'PairableTimeout': dbus.UInt32(180, variant_level=1),
}
if device_name == 'hci1':
adapter_properties['Address'] = 'AA:01:02:03:04:05'
self.AddObject(path,
ADAPTER_IFACE,
# Properties
adapter_properties,
# Methods
[
('RemoveDevice', 'o', '', ''),
('StartDiscovery', '', '', DeviceDiscovery),
('StopDiscovery', '', '', ''),
('SetDiscoveryFilter', 'a{sv}', '', '')
])
adapter = mockobject.objects[path]
adapter.AddMethods(MEDIA_IFACE, [
('RegisterEndpoint', 'oa{sv}', '', ''),
('UnregisterEndpoint', 'o', '', ''),
])
adapter.AddMethods(NETWORK_SERVER_IFACE, [
('Register', 'ss', '', ''),
('Unregister', 's', '', ''),
])
adapter.AddMethods(LEDADVERTISING_MNGR_IFACE, [
('RegisterAdvertisement', 'oa{sv}', '', ''),
('UnregisterAdvertisement', 'o', '', '')
])
adapter.AddMethods(GATT_MNGR_IFACE, [
('RegisterApplication', 'oa{sv}', '', ''),
('UnregisterApplication', 'o', '', '')
])
lea_mngr_properties = {
'ActiveInstances': dbus.Byte(0),
'SupportedIncludes': dbus.Array(["appearance", "local-name"],
signature='s'),
'SupportedInstances': dbus.Byte(5),
}
adapter.AddProperties(LEDADVERTISING_MNGR_IFACE,
dbus.Dictionary(
lea_mngr_properties
)
)
"""
org.bluez.LEAdvertisingManager1 interface - - -
.RegisterAdvertisement method oa{sv} - -
.UnregisterAdvertisement method o - -
.ActiveInstances property y 1 emits-change
.SupportedIncludes property as 2 "appearance" "local-name" emits-change
.SupportedInstances property y 4 emits-change
.SupportedSecondaryChannels property as - emits-change
"""
manager = mockobject.objects['/']
manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesAdded',
'oa{sa{sv}}', [
dbus.ObjectPath(path),
{
ADAPTER_IFACE: adapter_properties,
LEDADVERTISING_MNGR_IFACE: lea_mngr_properties
}
])
return path
@dbus.service.method(BLUEZ_MOCK_IFACE,
in_signature='sss', out_signature='s')
def AddDevice(self, adapter_device_name, device_address, alias):
'''Convenience method to add a Bluetooth device
You have to specify a device address which must be a valid Bluetooth
address (e.g. 'AA:BB:CC:DD:EE:FF'). The alias is the human-readable name
for the device (e.g. as set on the device itself), and the adapter device
name is the device_name passed to AddAdapter.
This will create a new, unpaired and unconnected device.
Returns the new object path.
'''
device_name = 'dev_' + device_address.replace(':', '_').upper()
adapter_path = '/org/bluez/' + adapter_device_name
path = adapter_path + '/' + device_name
if adapter_path not in mockobject.objects:
raise dbus.exceptions.DBusException(
'Adapter %s does not exist.' % adapter_device_name,
name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')
if path in microbit_data.keys():
properties = microbit_data.get(path, {}).get(DEVICE_IFACE, {})
else:
properties = {
'UUIDs': dbus.Array([], signature='s', variant_level=1),
'Blocked': dbus.Boolean(False, variant_level=1),
'Connected': dbus.Boolean(False, variant_level=1),
'LegacyPairing': dbus.Boolean(False, variant_level=1),
'Paired': dbus.Boolean(False, variant_level=1),
'Trusted': dbus.Boolean(False, variant_level=1),
'RSSI': dbus.Int16(-79, variant_level=1), # arbitrary
'Adapter': dbus.ObjectPath(adapter_path, variant_level=1),
'Address': dbus.String(device_address, variant_level=1),
'Alias': dbus.String(alias, variant_level=1),
'Name': dbus.String(alias, variant_level=1),
}
self.AddObject(path,
DEVICE_IFACE,
# Properties
properties,
# Methods
[
('CancelPairing', '', '', ''),
('Connect', '', '',
'ret = self.ConnectMicroBit("%s", "%s")' % (adapter_device_name, device_address)),
('ConnectProfile', 's', '', ''),
('Disconnect', '', '', ''),
('DisconnectProfile', 's', '', ''),
('Pair', '', '', ''),
])
manager = mockobject.objects['/']
manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesAdded',
'oa{sa{sv}}', [
dbus.ObjectPath(path),
{DEVICE_IFACE: properties},
])
return path
@dbus.service.method(BLUEZ_MOCK_IFACE,
in_signature='ssi', out_signature='')
def PairDevice(self, adapter_device_name, device_address, class_=5898764):
'''Convenience method to mark an existing device as paired.
You have to specify a device address which must be a valid Bluetooth
address (e.g. 'AA:BB:CC:DD:EE:FF'). The adapter device name is the
device_name passed to AddAdapter.
This unblocks the device if it was blocked.
If the specified adapter or device don’t exist, a NoSuchAdapter or
NoSuchDevice error will be returned on the bus.
Returns nothing.
'''
device_name = 'dev_' + device_address.replace(':', '_').upper()
adapter_path = '/org/bluez/' + adapter_device_name
device_path = adapter_path + '/' + device_name
if adapter_path not in mockobject.objects:
raise dbus.exceptions.DBusException(
'Adapter %s does not exist.' % adapter_device_name,
name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')
if device_path not in mockobject.objects:
raise dbus.exceptions.DBusException(
'Device %s does not exist.' % device_name,
name=BLUEZ_MOCK_IFACE + '.NoSuchDevice')
device = mockobject.objects[device_path]
# Based off pairing with an Android phone.
uuids = [
'00001105-0000-1000-8000-00805f9b34fb',
'0000110a-0000-1000-8000-00805f9b34fb',
'0000110c-0000-1000-8000-00805f9b34fb',
'00001112-0000-1000-8000-00805f9b34fb',
'00001115-0000-1000-8000-00805f9b34fb',
'00001116-0000-1000-8000-00805f9b34fb',
'0000111f-0000-1000-8000-00805f9b34fb',
'0000112f-0000-1000-8000-00805f9b34fb',
'00001200-0000-1000-8000-00805f9b34fb',
]
device.props[DEVICE_IFACE]['UUIDs'] = dbus.Array(uuids, variant_level=1)
device.props[DEVICE_IFACE]['Paired'] = dbus.Boolean(True, variant_level=1)
device.props[DEVICE_IFACE]['LegacyPairing'] = dbus.Boolean(True,
variant_level=1)
device.props[DEVICE_IFACE]['Blocked'] = dbus.Boolean(False,
variant_level=1)
try:
device.props[DEVICE_IFACE]['Modalias']
except KeyError:
device.AddProperties(DEVICE_IFACE, {
'Modalias': dbus.String('bluetooth:v000Fp1200d1436',
variant_level=1),
'Class': dbus.UInt32(class_, variant_level=1),
'Icon': dbus.String('phone', variant_level=1),
})
device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [
DEVICE_IFACE,
{
'UUIDs': dbus.Array(uuids, variant_level=1),
'Paired': dbus.Boolean(True, variant_level=1),
'LegacyPairing': dbus.Boolean(True, variant_level=1),
'Blocked': dbus.Boolean(False, variant_level=1),
'Modalias': dbus.String('bluetooth:v000Fp1200d1436',
variant_level=1),
'Class': dbus.UInt32(class_, variant_level=1),
'Icon': dbus.String('phone', variant_level=1),
},
[],
])
@dbus.service.method(BLUEZ_MOCK_IFACE,
in_signature='ss', out_signature='')
def BlockDevice(self, adapter_device_name, device_address):
'''Convenience method to mark an existing device as blocked.
You have to specify a device address which must be a valid Bluetooth
address (e.g. 'AA:BB:CC:DD:EE:FF'). The adapter device name is the
device_name passed to AddAdapter.
This disconnects the device if it was connected.
If the specified adapter or device don’t exist, a NoSuchAdapter or
NoSuchDevice error will be returned on the bus.
Returns nothing.
'''
device_name = 'dev_' + device_address.replace(':', '_').upper()
adapter_path = '/org/bluez/' + adapter_device_name
device_path = adapter_path + '/' + device_name
if adapter_path not in mockobject.objects:
raise dbus.exceptions.DBusException(
'Adapter %s does not exist.' % adapter_device_name,
name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')
if device_path not in mockobject.objects:
raise dbus.exceptions.DBusException(
'Device %s does not exist.' % device_name,
name=BLUEZ_MOCK_IFACE + '.NoSuchDevice')
device = mockobject.objects[device_path]
device.props[DEVICE_IFACE]['Blocked'] = dbus.Boolean(True, variant_level=1)
device.props[DEVICE_IFACE]['Connected'] = dbus.Boolean(False,
variant_level=1)
device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [
DEVICE_IFACE,
{
'Blocked': dbus.Boolean(True, variant_level=1),
'Connected': dbus.Boolean(False, variant_level=1),
},
[],
])
@dbus.service.method(BLUEZ_MOCK_IFACE,
in_signature='ss', out_signature='')
def ConnectDevice(self, adapter_device_name, device_address):
'''Convenience method to mark an existing device as connected.
You have to specify a device address which must be a valid Bluetooth
address (e.g. 'AA:BB:CC:DD:EE:FF'). The adapter device name is the
device_name passed to AddAdapter.
This unblocks the device if it was blocked.
If the specified adapter or device don’t exist, a NoSuchAdapter or
NoSuchDevice error will be returned on the bus.
Returns nothing.
'''
device_name = 'dev_' + device_address.replace(':', '_').upper()
adapter_path = '/org/bluez/' + adapter_device_name
device_path = adapter_path + '/' + device_name
if adapter_path not in mockobject.objects:
raise dbus.exceptions.DBusException(
'Adapter %s does not exist.' % adapter_device_name,
name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')
if device_path not in mockobject.objects:
raise dbus.exceptions.DBusException(
'Device %s does not exist.' % device_name,
name=BLUEZ_MOCK_IFACE + '.NoSuchDevice')
device = mockobject.objects[device_path]
device.props[DEVICE_IFACE]['Blocked'] = dbus.Boolean(False,
variant_level=1)
device.props[DEVICE_IFACE]['Connected'] = dbus.Boolean(True,
variant_level=1)
device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [
DEVICE_IFACE,
{
'Blocked': dbus.Boolean(False, variant_level=1),
'Connected': dbus.Boolean(True, variant_level=1),
},
[],
])
@dbus.service.method(BLUEZ_MOCK_IFACE,
in_signature='oa{sv}', out_signature='s')
def AddGattService(self,
path,
service_props):
self.AddObject(path,
GATT_SRVC_IFACE,
# Properties
service_props,
# Methods
[])
print('Adding props', service_props)
manager = mockobject.objects['/']
manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesAdded',
'oa{sa{sv}}', [
dbus.ObjectPath(path),
{GATT_SRVC_IFACE: service_props},
])
return path
@dbus.service.method(BLUEZ_MOCK_IFACE,
in_signature='sa{sv}', out_signature='s')
def AddGattCharacteristic(self, path, charc_props):
self.AddObject(path,
GATT_CHRC_IFACE,
# Properties
charc_props,
# Methods
[
('AcquireNotify', 'a{sv}', 'hq', ''),
('AcquireWrite', 'a{sv}', 'hq', ''),
('ReadValue', 'a{sv}', 'ay',
'ret = self.GattReadValue("%s", args[0])' % path),
('StartNotify', '', '', ''),
('StopNotify', '', '', ''),
('WriteValue', 'aya{sv}', '',
'ret = self.GattWriteValue("%s", args[0], args[1])' % path),
])
manager = mockobject.objects['/']
manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesAdded',
'oa{sa{sv}}', [
dbus.ObjectPath(path),
{GATT_CHRC_IFACE: charc_props},
])
return path
@dbus.service.method(BLUEZ_MOCK_IFACE,
in_signature='ss', out_signature='')
def DisconnectDevice(self, adapter_device_name, device_address):
'''Convenience method to mark an existing device as disconnected.
You have to specify a device address which must be a valid Bluetooth
address (e.g. 'AA:BB:CC:DD:EE:FF'). The adapter device name is the
device_name passed to AddAdapter.
This does not change the device’s blocked status.
If the specified adapter or device don’t exist, a NoSuchAdapter or
NoSuchDevice error will be returned on the bus.
Returns nothing.
'''
device_name = 'dev_' + device_address.replace(':', '_').upper()
adapter_path = '/org/bluez/' + adapter_device_name
device_path = adapter_path + '/' + device_name
if adapter_path not in mockobject.objects:
raise dbus.exceptions.DBusException(
'Adapter %s does not exist.' % adapter_device_name,
name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')
if device_path not in mockobject.objects:
raise dbus.exceptions.DBusException(
'Device %s does not exist.' % device_name,
name=BLUEZ_MOCK_IFACE + '.NoSuchDevice')
device = mockobject.objects[device_path]
device.props[DEVICE_IFACE]['Connected'] = dbus.Boolean(False,
variant_level=1)
device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [
DEVICE_IFACE,
{
'Connected': dbus.Boolean(False, variant_level=1),
},
[],
])
@dbus.service.method(BLUEZ_MOCK_IFACE,
in_signature='ssqaysy', out_signature='s')
def AddBeacon(self,
adapter_device_name='hci0',
device_address='11:01:02:03:04:05',
manf_id=None,
manf_data=None,
service_uuid=None,
service_data=None,
):
"""Convenience method to add a Bluetooth device acting as AltBeacon
You have to specify a device address which must be a valid Bluetooth
address (e.g. 'AA:BB:CC:DD:EE:FF'). The alias is the human-readable name
for the device (e.g. as set on the device itself), and the adapter device
name is the device_name passed to AddAdapter.
This will create a new, unpaired and unconnected device.
Returns the new object path.
"""
device_name = 'dev_' + device_address.replace(':', '_').upper()
adapter_path = '/org/bluez/' + adapter_device_name
path = adapter_path + '/' + device_name
if adapter_path not in mockobject.objects:
raise dbus.exceptions.DBusException(
'Adapter %s does not exist.' % adapter_device_name,
name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')
properties = {
'UUIDs': dbus.Array([], signature='s', variant_level=1),
'Blocked': dbus.Boolean(False, variant_level=1),
'Connected': dbus.Boolean(False, variant_level=1),
'LegacyPairing': dbus.Boolean(False, variant_level=1),
'Paired': dbus.Boolean(False, variant_level=1),
'Trusted': dbus.Boolean(False, variant_level=1),
'RSSI': dbus.Int16(-61, variant_level=1), # arbitrary
'Adapter': dbus.ObjectPath(adapter_path, variant_level=1),
'Address': dbus.String(device_address, variant_level=1),
'AddressType': dbus.String("random"),
'Alias': dbus.String("40-A1-82-A6-BB-3D", variant_level=1),
}
if service_uuid:
properties['UUIDs'].append(service_uuid)
properties['ServiceData'] = dbus.Dictionary({service_uuid: service_data})
if manf_id:
properties['ManufacturerData'] = dbus.Dictionary({manf_id: manf_data})
self.AddObject(path,
DEVICE_IFACE,
# Properties
properties,
# Methods
[
('CancelPairing', '', '', ''),
('Connect', '', '', ""),
('ConnectProfile', 's', '', ''),
('Disconnect', '', '', ''),
('DisconnectProfile', 's', '', ''),
('Pair', '', '', ''),
])
manager = mockobject.objects['/']
manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesAdded',
'oa{sa{sv}}', [
dbus.ObjectPath(path),
{DEVICE_IFACE: properties},
])
@dbus.service.method(BLUEZ_MOCK_IFACE,
in_signature='', out_signature='')
def DeviceDiscovery(self):
# Eddystone URL Beacon
self.AddBeacon('hci0', '11:22:33:44:55:66',
service_uuid='0000feaa-0000-1000-8000-00805f9b34fb',
service_data=[16, 8, 1, 98, 108, 117, 101, 116, 111,
111, 116, 104, 7])
# Eddystone UID Beacon
self.AddBeacon('hci0', '11:22:33:44:55:99',
service_uuid='0000feaa-0000-1000-8000-00805f9b34fb',
service_data=[0, 191, 0, 0, 0, 0, 0, 69, 97, 114, 116, 104,
0, 0, 0, 0, 0, 11])
# AltBeacon
self.AddBeacon('hci0', '11:22:33:44:55:77',
manf_id=65535,
manf_data=[190, 172, 72, 37, 62, 89, 114, 36, 68, 99,
185, 184, 3, 63, 250, 181, 202, 254, 97, 99,
101, 107, 188, 0])
# iBeacon
self.AddBeacon('hci0', '11:22:33:44:55:88',
manf_id=76,
manf_data=[2, 21, 106, 177, 124, 23, 244, 123, 77, 65, 128,
54, 82, 106, 238, 210, 47, 115, 1, 22, 3, 104,
191])
@dbus.service.method(BLUEZ_MOCK_IFACE,
in_signature='ss', out_signature='')
def ConnectMicroBit(self, adapter_name, device_address):
print('In connect microbit')
upper_address = device_address.upper().replace(":", "_")
dev_path = f'/org/bluez/{adapter_name}/dev_{upper_address}'
device = mockobject.objects[dev_path]
self.ConnectDevice(adapter_name, upper_address)
for path in microbit_data:
srvc_props = microbit_data[path].get(GATT_SRVC_IFACE)
if srvc_props:
del srvc_props['Includes']
print('adding service', srvc_props)
self.AddGattService(dbus.ObjectPath(path),
dbus.Dictionary(srvc_props,
signature='sv'))
chrc_props = microbit_data[path].get(GATT_CHRC_IFACE)
if chrc_props:
print('add characteristic')
chrc_props = dbus.Dictionary(chrc_props, signature='sv')
chrc_props['Value'] = dbus.Array(chrc_props['Value'], signature='y')
self.AddGattCharacteristic(dbus.ObjectPath(path), chrc_props)
device.props[DEVICE_IFACE]['ServicesResolved'] = dbus.Boolean(True, variant_level=1)
device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [
DEVICE_IFACE,
{
'ServicesResolved': dbus.Boolean(True, variant_level=1),
},
[],
])
@dbus.service.method(BLUEZ_MOCK_IFACE,
in_signature='oa{sv}', out_signature='ay')
def GattReadValue(self, path, options):
gatt_chrc = mockobject.objects[path]
gatt_chrc.call_log.append((int(time.time()), 'ReadValue', [options]))
# return microbit_data[path].get(GATT_CHRC_IFACE, {}).get('Value')
return gatt_chrc.Get(GATT_CHRC_IFACE, 'Value')
@dbus.service.method(BLUEZ_MOCK_IFACE,
in_signature='oaya{sv}', out_signature='')
def GattWriteValue(self, path, value, options):
# microbit_data[path][GATT_CHRC_IFACE]['Value'] = dbus.Array(value)
gatt_chrc = mockobject.objects[path]
gatt_chrc.Set(GATT_CHRC_IFACE, 'Value', value)
gatt_chrc.call_log.append((int(time.time()), 'WriteValue', [value, options]))
if path == '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0028/char0029':
tx_path = '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0028/char002b'
tx_obj = mockobject.objects[tx_path]
tx_obj.EmitSignal(dbus.PROPERTIES_IFACE,
'PropertiesChanged',
'sa{sv}as', [
GATT_CHRC_IFACE,
{
'Value': dbus.Array(value, variant_level=1),
},
[],
])
microbit_data = {
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.Device1': {'Address': 'E9:06:4D:45:FC:8D',
'AddressType': 'random',
'Name': 'BBC micro:bit [tetog]',
'Alias': 'BBC micro:bit [tetog]', 'Paired': False,
'Trusted': False, 'Blocked': False,
'LegacyPairing': False, 'Connected': False,
'UUIDs': ['00001800-0000-1000-8000-00805f9b34fb',
'00001801-0000-1000-8000-00805f9b34fb',
'0000180a-0000-1000-8000-00805f9b34fb',
'0000fe59-0000-1000-8000-00805f9b34fb',
'e95d0753-251d-470a-a062-fa1922dfa9a8',
'e95d6100-251d-470a-a062-fa1922dfa9a8',
'e95d93af-251d-470a-a062-fa1922dfa9a8',
'e95d9882-251d-470a-a062-fa1922dfa9a8',
'e95dd91d-251d-470a-a062-fa1922dfa9a8',
'e97dd91d-251d-470a-a062-fa1922dfa9a8'],
'Adapter': '/org/bluez/hci0',
'ServicesResolved': False},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service003c": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattService1': {
'UUID': 'e95d6100-251d-470a-a062-fa1922dfa9a8',
'Device': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D',
'Primary': True, 'Includes': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service003c/char0040": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': 'e95d1b25-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service003c',
'Value': [], 'Flags': ['read', 'write']},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service003c/char003d": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': 'e95d9250-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service003c',
'Value': [27], 'Notifying': False,
'Flags': ['read', 'notify'],
'NotifyAcquired': False},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service003c/char003d/desc003f": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattDescriptor1': {
'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service003c/char003d',
'Value': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0035": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattService1': {
'UUID': 'e95dd91d-251d-470a-a062-fa1922dfa9a8',
'Device': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D',
'Primary': True, 'Includes': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0035/char003a": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': 'e95d0d2d-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0035',
'Value': [20, 0], 'Flags': ['read', 'write']},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0035/char0038": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': 'e95d93ee-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0035',
'Value': [], 'Flags': ['write']},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0035/char0036": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': 'e95d7b77-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0035',
'Value': [14, 16, 16, 16, 14],
'Flags': ['read', 'write']},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service002e": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattService1': {
'UUID': 'e95d9882-251d-470a-a062-fa1922dfa9a8',
'Device': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D',
'Primary': True, 'Includes': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service002e/char0032": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': 'e95dda91-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service002e',
'Value': [1], 'Notifying': False,
'Flags': ['read', 'notify'],
'NotifyAcquired': False},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service002e/char0032/desc0034": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattDescriptor1': {
'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service002e/char0032',
'Value': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service002e/char002f": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': 'e95dda90-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service002e',
'Value': [1], 'Notifying': False,
'Flags': ['read', 'notify'],
'NotifyAcquired': False},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service002e/char002f/desc0031": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattDescriptor1': {
'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service002e/char002f',
'Value': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0028": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattService1': {
'UUID': 'e95d0753-251d-470a-a062-fa1922dfa9a8',
'Device': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D',
'Primary': True, 'Includes': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0028/char002c": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': 'e95dfb24-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0028',
'Value': [], 'Flags': ['read', 'write']},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0028/char0029": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': 'e95dca4b-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0028',
'Value': [140, 1, 140, 3, 144, 255],
'Notifying': False, 'Flags': ['read', 'notify'],
'NotifyAcquired': False},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0028/char0029/desc002b": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattDescriptor1': {
'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0028/char0029',
'Value': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service001d": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattService1': {
'UUID': 'e95d93af-251d-470a-a062-fa1922dfa9a8',
'Device': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D',
'Primary': True, 'Includes': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service001d/char0025": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': 'e95db84c-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service001d',
'Value': [], 'Notifying': False,
'Flags': ['read', 'notify'],
'NotifyAcquired': False},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service001d/char0025/desc0027": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattDescriptor1': {
'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service001d/char0025',
'Value': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service001d/char0023": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': 'e95d23c4-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service001d',
'Value': [], 'Flags': ['write']},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service001d/char0021": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': 'e95d5404-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service001d',
'Value': [],
'Flags': ['write-without-response', 'write'],
'WriteAcquired': False},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service001d/char001e": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': 'e95d9775-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service001d',
'Value': [], 'Notifying': False,
'Flags': ['read', 'notify'],
'NotifyAcquired': False},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service001d/char001e/desc0020": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattDescriptor1': {
'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service001d/char001e',
'Value': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0016": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattService1': {
'UUID': '0000180a-0000-1000-8000-00805f9b34fb',
'Device': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D',
'Primary': True, 'Includes': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0016/char001b": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': '00002a26-0000-1000-8000-00805f9b34fb',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0016',
'Value': [], 'Flags': ['read']},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0016/char0019": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': '00002a25-0000-1000-8000-00805f9b34fb',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0016',
'Value': [], 'Flags': ['read']},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0016/char0017": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': '00002a24-0000-1000-8000-00805f9b34fb',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0016',
'Value': [], 'Flags': ['read']},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0012": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattService1': {
'UUID': 'e97dd91d-251d-470a-a062-fa1922dfa9a8',
'Device': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D',
'Primary': True, 'Includes': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0012/char0013": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': 'e97d3b10-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0012',
'Value': [], 'Notifying': False,
'Flags': ['write-without-response', 'notify'],
'WriteAcquired': False, 'NotifyAcquired': False},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0012/char0013/desc0015": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattDescriptor1': {
'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service0012/char0013',
'Value': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service000e": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattService1': {
'UUID': '0000fe59-0000-1000-8000-00805f9b34fb',
'Device': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D',
'Primary': True, 'Includes': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service000e/char000f": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': '8ec90004-f315-4f60-9fb8-838830daea50',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service000e',
'Value': [], 'Notifying': False,
'Flags': ['write', 'indicate']},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service000e/char000f/desc0011": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattDescriptor1': {
'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service000e/char000f',
'Value': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service000a": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattService1': {
'UUID': '00001801-0000-1000-8000-00805f9b34fb',
'Device': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D',
'Primary': True, 'Includes': []},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service000a/char000b": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattCharacteristic1': {
'UUID': '00002a05-0000-1000-8000-00805f9b34fb',
'Service': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service000a',
'Value': [], 'Notifying': False,
'Flags': ['indicate']},
'org.freedesktop.DBus.Properties': {}},
"/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service000a/char000b/desc000d": {'org.freedesktop.DBus.Introspectable': {},
'org.bluez.GattDescriptor1': {
'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_E9_06_4D_45_FC_8D/service000a/char000b',
'Value': []},
'org.freedesktop.DBus.Properties': {}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02': {
'org.bluez.Device1': {'Address': 'DD:02:02:02:02:02', 'AddressType': 'random',
'Name': 'BBC micro:bit',
'Alias': 'BBC micro:bit', 'Paired': True, 'Trusted': False, 'Blocked': False,
'LegacyPairing': False, 'Connected': False,
'UUIDs': ['00001800-0000-1000-8000-00805f9b34fb', '00001801-0000-1000-8000-00805f9b34fb',
'0000180a-0000-1000-8000-00805f9b34fb', '0000fe59-0000-1000-8000-00805f9b34fb',
'6e400001-b5a3-f393-e0a9-e50e24dcca9e', 'e95d0753-251d-470a-a062-fa1922dfa9a8',
'e95d127b-251d-470a-a062-fa1922dfa9a8', 'e95d6100-251d-470a-a062-fa1922dfa9a8',
'e95d93af-251d-470a-a062-fa1922dfa9a8', 'e95d9882-251d-470a-a062-fa1922dfa9a8',
'e95dd91d-251d-470a-a062-fa1922dfa9a8', 'e97dd91d-251d-470a-a062-fa1922dfa9a8'],
'Adapter': '/org/bluez/hci0', 'ServicesResolved': False}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service004c': {
'org.bluez.GattService1': {'UUID': 'e95d6100-251d-470a-a062-fa1922dfa9a8',
'Device': '/org/bluez/hci0/dev_DD_02_02_02_02_02', 'Primary': True, 'Includes': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service004c/char0050': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95d1b25-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service004c',
'Value': [0xe8, 0x03], 'Flags': ['read', 'write']}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service004c/char004d': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95d9250-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service004c', 'Value': [],
'Notifying': False, 'Flags': ['read', 'notify'], 'NotifyAcquired': False}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service004c/char004d/desc004f': {
'org.bluez.GattDescriptor1': {'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service004c/char004d',
'Value': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0045': {
'org.bluez.GattService1': {'UUID': 'e95dd91d-251d-470a-a062-fa1922dfa9a8',
'Device': '/org/bluez/hci0/dev_DD_02_02_02_02_02', 'Primary': True, 'Includes': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0045/char004a': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95d0d2d-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0045', 'Value': [],
'Flags': ['read', 'write']}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0045/char0048': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95d93ee-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0045', 'Value': [],
'Flags': ['write']}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0045/char0046': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95d7b77-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0045', 'Value': [],
'Flags': ['read', 'write']}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service003b': {
'org.bluez.GattService1': {'UUID': 'e95d127b-251d-470a-a062-fa1922dfa9a8',
'Device': '/org/bluez/hci0/dev_DD_02_02_02_02_02', 'Primary': True, 'Includes': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service003b/char0042': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95d8d00-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service003b', 'Value': [],
'Notifying': False, 'Flags': ['read', 'write', 'notify'],
'NotifyAcquired': False}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service003b/char0042/desc0044': {
'org.bluez.GattDescriptor1': {'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service003b/char0042',
'Value': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service003b/char0040': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95dd822-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service003b', 'Value': [],
'Flags': ['write']}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service003b/char003e': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95db9fe-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service003b', 'Value': [],
'Flags': ['read', 'write']}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service003b/char003c': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95d5899-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service003b', 'Value': [],
'Flags': ['read', 'write']}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0034': {
'org.bluez.GattService1': {'UUID': 'e95d9882-251d-470a-a062-fa1922dfa9a8',
'Device': '/org/bluez/hci0/dev_DD_02_02_02_02_02', 'Primary': True, 'Includes': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0034/char0038': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95dda91-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0034', 'Value': [],
'Notifying': False, 'Flags': ['read', 'notify'], 'NotifyAcquired': False}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0034/char0038/desc003a': {
'org.bluez.GattDescriptor1': {'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0034/char0038',
'Value': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0034/char0035': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95dda90-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0034', 'Value': [],
'Notifying': False, 'Flags': ['read', 'notify'], 'NotifyAcquired': False}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0034/char0035/desc0037': {
'org.bluez.GattDescriptor1': {'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0034/char0035',
'Value': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service002e': {
'org.bluez.GattService1': {'UUID': 'e95d0753-251d-470a-a062-fa1922dfa9a8',
'Device': '/org/bluez/hci0/dev_DD_02_02_02_02_02', 'Primary': True, 'Includes': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service002e/char0032': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95dfb24-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service002e', 'Value': [],
'Flags': ['read', 'write']}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service002e/char002f': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95dca4b-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service002e', 'Value': [],
'Notifying': False, 'Flags': ['read', 'notify'], 'NotifyAcquired': False}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service002e/char002f/desc0031': {
'org.bluez.GattDescriptor1': {'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service002e/char002f',
'Value': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0028': {
'org.bluez.GattService1': {'UUID': '6e400001-b5a3-f393-e0a9-e50e24dcca9e',
'Device': '/org/bluez/hci0/dev_DD_02_02_02_02_02', 'Primary': True, 'Includes': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0028/char002b': {
'org.bluez.GattCharacteristic1': {'UUID': '6e400002-b5a3-f393-e0a9-e50e24dcca9e',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0028', 'Value': [51],
'Notifying': False, 'Flags': ['indicate']},
'org.freedesktop.DBus.Properties': {}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0028/char002b/desc002d': {
'org.bluez.GattDescriptor1': {'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0028/char002b',
'Value': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0028/char0029': {
'org.bluez.GattCharacteristic1': {'UUID': '6e400003-b5a3-f393-e0a9-e50e24dcca9e',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0028', 'Value': [],
'Flags': ['write-without-response', 'write'], 'WriteAcquired': False},
'org.freedesktop.DBus.Properties': {}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service001d': {
'org.bluez.GattService1': {'UUID': 'e95d93af-251d-470a-a062-fa1922dfa9a8',
'Device': '/org/bluez/hci0/dev_DD_02_02_02_02_02', 'Primary': True, 'Includes': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service001d/char0025': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95db84c-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service001d', 'Value': [],
'Notifying': False, 'Flags': ['read', 'notify'], 'NotifyAcquired': False}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service001d/char0025/desc0027': {
'org.bluez.GattDescriptor1': {'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service001d/char0025',
'Value': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service001d/char0023': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95d23c4-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service001d', 'Value': [],
'Flags': ['write']}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service001d/char0021': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95d5404-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service001d', 'Value': [],
'Flags': ['write-without-response', 'write'], 'WriteAcquired': False}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service001d/char001e': {
'org.bluez.GattCharacteristic1': {'UUID': 'e95d9775-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service001d', 'Value': [],
'Notifying': False, 'Flags': ['read', 'notify'], 'NotifyAcquired': False}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service001d/char001e/desc0020': {
'org.bluez.GattDescriptor1': {'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service001d/char001e',
'Value': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0016': {
'org.bluez.GattService1': {'UUID': '0000180a-0000-1000-8000-00805f9b34fb',
'Device': '/org/bluez/hci0/dev_DD_02_02_02_02_02', 'Primary': True, 'Includes': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0016/char001b': {
'org.bluez.GattCharacteristic1': {'UUID': '00002a26-0000-1000-8000-00805f9b34fb',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0016', 'Value': [],
'Flags': ['read']}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0016/char0019': {
'org.bluez.GattCharacteristic1': {'UUID': '00002a25-0000-1000-8000-00805f9b34fb',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0016', 'Value': [],
'Flags': ['read']}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0016/char0017': {
'org.bluez.GattCharacteristic1': {'UUID': '00002a24-0000-1000-8000-00805f9b34fb',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0016', 'Value': [],
'Flags': ['read']}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0012': {
'org.bluez.GattService1': {'UUID': 'e97dd91d-251d-470a-a062-fa1922dfa9a8',
'Device': '/org/bluez/hci0/dev_DD_02_02_02_02_02', 'Primary': True, 'Includes': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0012/char0013': {
'org.bluez.GattCharacteristic1': {'UUID': 'e97d3b10-251d-470a-a062-fa1922dfa9a8',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0012', 'Value': [],
'Notifying': False, 'Flags': ['write-without-response', 'notify'],
'WriteAcquired': False, 'NotifyAcquired': False}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service0012/char0013/desc0015': {
'org.bluez.GattDescriptor1': {'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service0012/char0013',
'Value': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service000e': {
'org.bluez.GattService1': {'UUID': '0000fe59-0000-1000-8000-00805f9b34fb',
'Device': '/org/bluez/hci0/dev_DD_02_02_02_02_02', 'Primary': True, 'Includes': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service000e/char000f': {
'org.bluez.GattCharacteristic1': {'UUID': '8ec90004-f315-4f60-9fb8-838830daea50',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service000e', 'Value': [],
'Notifying': False, 'Flags': ['write', 'indicate']}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service000e/char000f/desc0011': {
'org.bluez.GattDescriptor1': {'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service000e/char000f',
'Value': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service000a': {
'org.bluez.GattService1': {'UUID': '00001801-0000-1000-8000-00805f9b34fb',
'Device': '/org/bluez/hci0/dev_DD_02_02_02_02_02', 'Primary': True, 'Includes': []}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service000a/char000b': {
'org.bluez.GattCharacteristic1': {'UUID': '00002a05-0000-1000-8000-00805f9b34fb',
'Service': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service000a', 'Value': [],
'Notifying': False, 'Flags': ['indicate']}},
'/org/bluez/hci0/dev_DD_02_02_02_02_02/service000a/char000b/desc000d': {
'org.bluez.GattDescriptor1': {'UUID': '00002902-0000-1000-8000-00805f9b34fb',
'Characteristic': '/org/bluez/hci0/dev_DD_02_02_02_02_02/service000a/char000b',
'Value': []}},
}
| 64.779163
| 159
| 0.475566
| 6,465
| 72,747
| 5.118175
| 0.092498
| 0.047387
| 0.053311
| 0.047387
| 0.817371
| 0.797667
| 0.780018
| 0.765874
| 0.728127
| 0.715767
| 0
| 0.138092
| 0.40403
| 72,747
| 1,122
| 160
| 64.836898
| 0.625118
| 0.055947
| 0
| 0.481808
| 0
| 0
| 0.36002
| 0.278645
| 0
| 0
| 0.000118
| 0
| 0
| 1
| 0.015436
| false
| 0
| 0.003308
| 0
| 0.024256
| 0.00441
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3fc5436df505057ccf615b687f2af0bfb9782961
| 288
|
py
|
Python
|
chariot/transformer/__init__.py
|
Y-Kuro-u/chariot
|
032f3eecdd55b30c65351e1e636c939c4b20919e
|
[
"Apache-2.0"
] | 134
|
2018-06-11T01:40:14.000Z
|
2021-11-15T12:34:38.000Z
|
chariot/transformer/__init__.py
|
Y-Kuro-u/chariot
|
032f3eecdd55b30c65351e1e636c939c4b20919e
|
[
"Apache-2.0"
] | 10
|
2018-06-17T10:45:50.000Z
|
2021-04-05T05:51:11.000Z
|
chariot/transformer/__init__.py
|
Y-Kuro-u/chariot
|
032f3eecdd55b30c65351e1e636c939c4b20919e
|
[
"Apache-2.0"
] | 8
|
2019-02-23T06:43:21.000Z
|
2021-02-18T06:05:11.000Z
|
from .tokenizer import Tokenizer
from .vocabulary import Vocabulary
# dummy import to call from chariot.transformer module
from .formatter.base import BaseFormatter
from .text.base import BasePreprocessor
from .token.base import BasePreprocessor
from .generator.base import BaseGenerator
| 36
| 54
| 0.847222
| 36
| 288
| 6.777778
| 0.5
| 0.163934
| 0.213115
| 0.245902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 288
| 7
| 55
| 41.142857
| 0.953125
| 0.180556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3fd7b49eb5b968aba7ba8ea3f992e1b7b7f87380
| 48
|
py
|
Python
|
cupy_alias/linalg/decomposition.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 142
|
2018-06-07T07:43:10.000Z
|
2021-10-30T21:06:32.000Z
|
cupy_alias/linalg/decomposition.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 282
|
2018-06-07T08:35:03.000Z
|
2021-03-31T03:14:32.000Z
|
cupy_alias/linalg/decomposition.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 19
|
2018-06-19T11:07:53.000Z
|
2021-05-13T20:57:04.000Z
|
from clpy.linalg.decomposition import * # NOQA
| 24
| 47
| 0.770833
| 6
| 48
| 6.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 1
| 48
| 48
| 0.902439
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3fdfdca518555f7c381ddf9305c53b666f01ca35
| 6,801
|
py
|
Python
|
controller/prometheus/PerformanceDataPicker.py
|
baiyanquan/k8sTools
|
882a30f2a19dffb0900efd5be61826400080a6ef
|
[
"Apache-2.0"
] | null | null | null |
controller/prometheus/PerformanceDataPicker.py
|
baiyanquan/k8sTools
|
882a30f2a19dffb0900efd5be61826400080a6ef
|
[
"Apache-2.0"
] | 1
|
2019-12-18T12:23:34.000Z
|
2019-12-18T12:23:34.000Z
|
controller/prometheus/PerformanceDataPicker.py
|
baiyanquan/k8sTools
|
882a30f2a19dffb0900efd5be61826400080a6ef
|
[
"Apache-2.0"
] | 3
|
2019-10-09T06:23:39.000Z
|
2019-10-20T02:39:14.000Z
|
# -*- coding: utf-8 -*
import requests
import logging
import datetime
from utils.SockConfig import SockConfig
class PerformanceDataPicker(object):
def __init__(self):
pass
@staticmethod
def build_entity_metrics(query_config):
entity_metrics = []
# conbine entityNames and entityMetrics
type = query_config['entity_type']
for entity in query_config['entity_list']:
for metric in query_config['metric_list']:
entity_metrics.append(type+'/'+entity+"/"+metric)
return entity_metrics
@staticmethod
def build_entity_metrics(query_config, entity_list):
entity_metrics = []
# conbine entityNames and entityMetrics
type = query_config['entity_type']
for entity in entity_list:
for metric in query_config['metric_list']:
entity_metrics.append(type+'/'+entity+"/"+metric)
return entity_metrics
@staticmethod
def query_entity_metric_values(query_config, resolution, end_time, start_time, null_value='null'):
metricnames = PerformanceDataPicker.build_entity_metrics(query_config=query_config)
query_list = query_config['query_list']
prometheus_config = query_config['prometheus']
csvset = dict()
for index in range(len(metricnames)):
list = metricnames[index].split('/')
query = query_list[index % len(query_list)].replace("%s",list[1])
response = requests.get(prometheus_config['url'] + prometheus_config['query_api'],
params={
'query': query, 'start': start_time,
'end': end_time, 'step': resolution},
auth=(prometheus_config['auth_user'], prometheus_config['auth_password']))
print(response.json())
results = response.json()['data']['result']
if results != []:
for value in results[0]['values']:
datum = value[1]
if datum == 'NaN':
datum = null_value
if index == 0:
csvset[value[0]] = [datum]
else:
if value[0] in csvset:
csvset[value[0]].append(datum)
else:
# print(results)
csvset[value[0]] = []
for count in range(index):
csvset[value[0]].append(null_value)
csvset[value[0]].append(value[1])
for timestamp in csvset.keys():
if len(csvset[timestamp]) <= index:
csvset[timestamp].append(null_value)
else:
for timestamp in csvset.keys():
csvset[timestamp].append(null_value)
# 按竖列输出的数据!!!null代表没有该项数据
return metricnames, csvset
@staticmethod
def query_entity_metric_values(prometheus, entity_list, query_config, resolution, end_time, start_time, null_value='null'):
metricnames = PerformanceDataPicker.build_entity_metrics(query_config=query_config, entity_list=entity_list)
print("name:")
print(metricnames)
query_list = query_config['query_list']
prometheus_config = prometheus
csvset = dict()
for index in range(len(metricnames)):
list = metricnames[index].split('/')
query = query_list[index % len(query_list)].replace("%s", list[1])
response = requests.get(prometheus_config['url'] + prometheus_config['query_api'],
params={
'query': query, 'start': start_time,
'end': end_time, 'step': resolution},
auth=(prometheus_config['auth_user'], prometheus_config['auth_password']))
print(response.json())
results = response.json()['data']['result']
if results != []:
for value in results[0]['values']:
datum = value[1]
if datum == 'NaN':
datum = null_value
if index == 0:
csvset[value[0]] = [datum]
else:
if value[0] in csvset:
csvset[value[0]].append(datum)
else:
# print(results)
csvset[value[0]] = []
for count in range(index):
csvset[value[0]].append(null_value)
csvset[value[0]].append(value[1])
for timestamp in csvset.keys():
if len(csvset[timestamp]) <= index:
csvset[timestamp].append(null_value)
else:
for timestamp in csvset.keys():
csvset[timestamp].append(null_value)
# 按竖列输出的数据!!!null代表没有该项数据
return metricnames, csvset
@staticmethod
def query_multi_entity_metric_values(queryconfiglist, resolution, end_time, start_time):
metricnamelist = []
csvsets = []
for prometheus_config in queryconfiglist:
# print(config)
# metricnames, csvset = PerformanceDataPicker.query_entity_metric_values(query_config=config,
# resolution=resolution,
# end_time=end_time,
# start_time=start_time)
queries = prometheus_config['queries']
print(queries)
for query in queries:
metricnames, csvset = PerformanceDataPicker.query_entity_metric_values(
prometheus=prometheus_config['prometheus'],
entity_list=query['entity_list'],
query_config=query['query_config'],
resolution=resolution,
end_time=end_time,
start_time=start_time)
metricnamelist.append(metricnames)
csvsets.append(csvset)
return metricnamelist, csvsets
| 45.34
| 127
| 0.489193
| 576
| 6,801
| 5.567708
| 0.142361
| 0.06517
| 0.037418
| 0.033676
| 0.843467
| 0.822887
| 0.806673
| 0.768631
| 0.717805
| 0.717805
| 0
| 0.005838
| 0.420673
| 6,801
| 149
| 128
| 45.644295
| 0.808122
| 0.076312
| 0
| 0.697479
| 0
| 0
| 0.045962
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05042
| false
| 0.02521
| 0.033613
| 0
| 0.134454
| 0.042017
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3ff2fd0bb8c886e20a9c79a3ad91c263b53903f4
| 1,334
|
py
|
Python
|
parseXml.py
|
xmh645214784/SceneDatabase
|
eabc00b1564c2c81bb7e3bd3882f12abaa8a3dd5
|
[
"MIT"
] | null | null | null |
parseXml.py
|
xmh645214784/SceneDatabase
|
eabc00b1564c2c81bb7e3bd3882f12abaa8a3dd5
|
[
"MIT"
] | null | null | null |
parseXml.py
|
xmh645214784/SceneDatabase
|
eabc00b1564c2c81bb7e3bd3882f12abaa8a3dd5
|
[
"MIT"
] | null | null | null |
import xml.dom.minidom
import numpy as np
def parseXml_RetPathAndMatrixPair(xmlPath):
domtree=xml.dom.minidom.parse(xmlPath)
collection=domtree.documentElement
shapes = collection.getElementsByTagName("shape")
result=[]
for each in shapes:
if each.hasAttribute("type") and each.getAttribute("type")=="shapenet":
path=each.getElementsByTagName("string")[0].getAttribute("value")
matrix=each.getElementsByTagName("transform")[0].getElementsByTagName("matrix")[0].getAttribute("value")
matrix=np.array(map(float,matrix.split())).reshape([4,4])
result.append([path,matrix])
return result
def parseXml_RetPathAndMatrixPair_Wall(xmlPath):
domtree=xml.dom.minidom.parse(xmlPath)
collection=domtree.documentElement
shapes = collection.getElementsByTagName("shape")
result=[]
for each in shapes:
if each.hasAttribute("type") and each.getAttribute("type")=="cube":
# path=each.getElementsByTagName("string")[0].getAttribute("value")
matrix=each.getElementsByTagName("transform")[0].getElementsByTagName("matrix")[0].getAttribute("value")
matrix=np.array(map(float,matrix.split())).reshape([4,4])
# result.append([path,matrix])
result.append(matrix)
return result
| 47.642857
| 117
| 0.682909
| 140
| 1,334
| 6.485714
| 0.314286
| 0.105727
| 0.079295
| 0.105727
| 0.830396
| 0.830396
| 0.830396
| 0.830396
| 0.830396
| 0.830396
| 0
| 0.009091
| 0.175412
| 1,334
| 28
| 118
| 47.642857
| 0.816364
| 0.071214
| 0
| 0.64
| 0
| 0
| 0.072006
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.08
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3ffef4bb618acda8920bc2fea422cd1736ec9be3
| 17,648
|
py
|
Python
|
skbot/ignition/sdformat/bindings/v16/state.py
|
FirefoxMetzger/ropy
|
c1bcebda223f3af0b6d35e3f4c26d8fd9d26577a
|
[
"Apache-2.0"
] | 6
|
2021-03-24T05:54:45.000Z
|
2021-07-20T21:03:21.000Z
|
skbot/ignition/sdformat/bindings/v16/state.py
|
FirefoxMetzger/scikit-bot
|
ee6f1d3451a3c61a6fa122cc42efc4dd67afc9c9
|
[
"Apache-2.0"
] | 31
|
2021-08-12T08:12:58.000Z
|
2022-03-21T23:16:36.000Z
|
skbot/ignition/sdformat/bindings/v16/state.py
|
FirefoxMetzger/scikit-bot
|
ee6f1d3451a3c61a6fa122cc42efc4dd67afc9c9
|
[
"Apache-2.0"
] | 1
|
2021-07-20T20:13:49.000Z
|
2021-07-20T20:13:49.000Z
|
from dataclasses import dataclass, field
from typing import List, Optional
from .light import Light
from .model import Model as ModelModel
__NAMESPACE__ = "sdformat/v1.6/state.xsd"
@dataclass
class Model:
"""
Model state.
Parameters
----------
joint: Joint angle
model: A nested model state element
scale: Scale for the 3 dimensions of the model.
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
link: Link state
name: Name of the model
"""
class Meta:
name = "model"
joint: List["Model.Joint"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
model: List["Model"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
scale: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: List["Model.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Model.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
link: List["Model.Link"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Joint:
"""
Joint angle.
Parameters
----------
angle: Angle of an axis
name: Name of the joint
"""
angle: List["Model.Joint.Angle"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Angle:
"""
Parameters
----------
value:
axis: Index of the axis.
"""
value: Optional[float] = field(
default=None,
metadata={
"required": True,
},
)
axis: Optional[int] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the frame. This name must not match another frame
defined inside the parent that this frame is attached to.
"""
pose: Optional["Model.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Link:
"""
Link state.
Parameters
----------
velocity: Velocity of the link. The x, y, z components of the
pose correspond to the linear velocity of the link,
and the roll, pitch, yaw components correspond to the
angular velocity of the link
acceleration: Acceleration of the link. The x, y, z components
of the pose correspond to the linear acceleration of
the link, and the roll, pitch, yaw components
correspond to the angular acceleration of the link
wrench: Force and torque applied to the link. The x, y, z
components of the pose correspond to the force applied
to the link, and the roll, pitch, yaw components
correspond to the torque applied to the link
collision: Collision state
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the link
"""
velocity: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
acceleration: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
wrench: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
collision: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["Model.Link.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Model.Link.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match another
frame defined inside the parent that this frame is
attached to.
"""
pose: Optional["Model.Link.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class State:
"""
Parameters
----------
sim_time: Simulation time stamp of the state [seconds nanoseconds]
wall_time: Wall time stamp of the state [seconds nanoseconds]
real_time: Real time stamp of the state [seconds nanoseconds]
iterations: Number of simulation iterations.
insertions: A list containing the entire description of entities
inserted.
deletions: A list of names of deleted entities/
model: Model state
light: Light state
world_name: Name of the world this state applies to
"""
class Meta:
name = "state"
sim_time: str = field(
default="0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"white_space": "collapse",
"pattern": r"\d+ \d+",
},
)
wall_time: str = field(
default="0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"white_space": "collapse",
"pattern": r"\d+ \d+",
},
)
real_time: str = field(
default="0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"white_space": "collapse",
"pattern": r"\d+ \d+",
},
)
iterations: int = field(
default=0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
insertions: Optional["State.Insertions"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
deletions: Optional["State.Deletions"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
model: List[Model] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
light: List["State.Light"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
world_name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Insertions:
"""
A list containing the entire description of entities inserted.
Parameters
----------
model: The model element defines a complete robot or any other
physical object.
light: The light element describes a light source.
"""
model: List[ModelModel] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
light: List[Light] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Deletions:
"""
A list of names of deleted entities/
Parameters
----------
name: The name of a deleted entity.
"""
name: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
@dataclass
class Light:
"""
Light state.
Parameters
----------
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the light
"""
frame: List["State.Light.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["State.Light.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match another
frame defined inside the parent that this frame is
attached to.
"""
pose: Optional["State.Light.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
| 28.464516
| 157
| 0.398289
| 1,548
| 17,648
| 4.521318
| 0.087855
| 0.023718
| 0.025718
| 0.02286
| 0.8034
| 0.798114
| 0.798114
| 0.767824
| 0.746392
| 0.73039
| 0
| 0.01213
| 0.44413
| 17,648
| 619
| 158
| 28.510501
| 0.701325
| 0.218098
| 0
| 0.637232
| 0
| 0.023866
| 0.221589
| 0.100072
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009547
| 0
| 0.093079
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b756323a1971a5f4731157fb9fc2ba6ea804c63b
| 1,193
|
py
|
Python
|
summer2020py/__init__.py
|
dllahr/summer2020
|
4c8204f72ecdd8c44d2abb1e3e2669d1e6a478e8
|
[
"MIT"
] | null | null | null |
summer2020py/__init__.py
|
dllahr/summer2020
|
4c8204f72ecdd8c44d2abb1e3e2669d1e6a478e8
|
[
"MIT"
] | null | null | null |
summer2020py/__init__.py
|
dllahr/summer2020
|
4c8204f72ecdd8c44d2abb1e3e2669d1e6a478e8
|
[
"MIT"
] | null | null | null |
import os.path
default_config_filepath = os.path.expanduser("~/.summer2020py.cfg")
default_config_section = "test"
morpheus_heatmap_template = '{"colorScheme":{"valueToColorScheme":{"null":{"fractions":[0,0.5,1],"colors":["#0000ff","#ffffff","#ff0000"],"min":0.6487475633621216,"max":1,"missingColor":"#c0c0c0","scalingMode":0,"stepped":false,"transformValues":0}}},"name":"","showRowNumber":false,"rowShapeModel":{},"columnShapeModel":{},"rowFontModel":{},"columnFontModel":{},"rowColorModel":{},"columnColorModel":{},"rows":[],"columns":[],"rowSortBy":[],"columnSortBy":[],"rowGroupBy":[],"columnGroupBy":[],"rowFilter":{"isAnd":true,"filters":[]},"columnFilter":{"isAnd":true,"filters":[]},"symmetric":false,"rowSize":0.36417212823248485,"columnSize":0.36417212823248485,"drawGrid":true,"gridColor":"#808080","gridThickness":0.1,"drawValues":false,"shape":"square","rowSelection":[],"columnSelection":[],"rowSearchTerm":"","columnSearchTerm":"","columnDendrogram":"","rowDendrogram":"","dataset":{"rows":1305,"columns":1305,"seriesArrays":[],"seriesDataTypes":["Float32"],"seriesNames":["flipped_dendro_sorted_df"],"rowMetadataModel":{"vectors":[]},"columnMetadataModel":{"vectors":[]}}}'
| 198.833333
| 1,076
| 0.710813
| 108
| 1,193
| 7.768519
| 0.768519
| 0.014303
| 0.038141
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078747
| 0.010059
| 1,193
| 5
| 1,077
| 238.6
| 0.631668
| 0
| 0
| 0
| 0
| 0.25
| 0.89606
| 0.876781
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b76b4651479427e7fc836073b6b54b4591a2064d
| 8,000
|
py
|
Python
|
rlkit/examples/dqn_and_double_dqn.py
|
jesbu1/rlkit
|
a651a8dacee3d2b79b29974092af14f4a314ec25
|
[
"MIT"
] | null | null | null |
rlkit/examples/dqn_and_double_dqn.py
|
jesbu1/rlkit
|
a651a8dacee3d2b79b29974092af14f4a314ec25
|
[
"MIT"
] | null | null | null |
rlkit/examples/dqn_and_double_dqn.py
|
jesbu1/rlkit
|
a651a8dacee3d2b79b29974092af14f4a314ec25
|
[
"MIT"
] | null | null | null |
"""
Run DQN on CartPole-v0.
"""
import gym
import gym_minigrid
import sys
import random
from torch import nn as nn
from rlkit.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy
from rlkit.policies.argmax import ArgmaxDiscretePolicy
from rlkit.torch.dqn.dqn import DQNTrainer
from rlkit.torch.dqn.double_dqn import DoubleDQNTrainer
from rlkit.torch.networks import Mlp
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.launchers.launcher_util import setup_logger, set_seed
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
def experiment(variant):
#expl_env = gym.make('CartPole-v0').env
#eval_env = gym.make('CartPole-v0').env
skills = variant['env_kwargs']['skills']
train = variant['env_kwargs']['train']
expl_env = gym.make('MiniGrid-FourRoomsSkills-v0', train=train, skills=skills)
eval_env = gym.make('MiniGrid-FourRoomsSkills-v0', train=train, skills=skills)
obs_dim = expl_env.observation_space.low.size
action_dim = eval_env.action_space.n
hidden_size = variant['hidden_size']
qf = Mlp(
hidden_sizes=[hidden_size, hidden_size],
input_size=obs_dim,
output_size=action_dim,
)
target_qf = Mlp(
hidden_sizes=[hidden_size, hidden_size],
input_size=obs_dim,
output_size=action_dim,
)
qf_criterion = nn.MSELoss()
eval_policy = ArgmaxDiscretePolicy(qf)
expl_policy = PolicyWrappedWithExplorationStrategy(
EpsilonGreedy(expl_env.action_space, prob_random_action=variant['epsilon']),
eval_policy,
)
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
)
expl_path_collector = MdpPathCollector(
expl_env,
expl_policy,
)
trainer = DQNTrainer(
qf=qf,
target_qf=target_qf,
qf_criterion=qf_criterion,
**variant['trainer_kwargs']
)
replay_buffer = EnvReplayBuffer(
variant['replay_buffer_size'],
expl_env,
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
set_seed(variant['seed'])
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
skills = [[1, 2, 2, 2, 2, 1, 2, 2], [2, 2, 2, 2, 2], [2, 2, 2, 0, 2, 2], [1, 2, 2, 2, 2, 2], [2, 2, 1, 2, 2], [2, 0, 2, 2, 2], [2, 0, 2, 2, 2, 2], [2, 2, 2, 2, 0], [2, 2, 2, 2, 2, 2], [0, 2, 2, 2, 2], [0, 2, 2, 0, 2], [2, 2, 2, 1, 2], [2, 2, 2, 2, 2, 1], [2, 2, 2, 2], [1, 2, 2, 2, 2], [2, 2, 1, 2, 0], [2, 2, 2, 2, 2, 2, 2, 2], [1, 2, 2, 2, 2, 1, 2], [2, 2, 2, 2, 2, 2, 2, 1], [2, 2, 2, 1, 2, 2, 2, 2], [2, 2, 2, 2, 2, 1, 2, 2], [1, 2, 1, 2, 2, 2, 2, 2], [1, 2, 2, 2, 2, 1], [2, 2, 0, 2, 2], [1, 2, 2, 1, 2, 2, 0, 2], [0, 2, 2, 2, 0, 2, 1, 2], [2, 2, 2, 2, 1, 2], [0, 2, 2, 2], [1, 2, 0, 2, 2, 2], [2, 2, 2, 1, 2, 2], [2, 1, 2, 2, 2], [0, 2, 2, 0, 2, 2, 2, 2], [0, 2, 2, 2, 2, 2, 2, 0], [2, 1, 2, 2], [2, 2, 2, 1, 2, 0, 2], [2, 2, 2, 2, 2, 0, 2, 2], [1, 2, 1, 2], [2, 2, 0, 2, 2, 2], [2, 2, 1, 2], [1, 2, 1, 2, 2, 2], [1, 2, 2, 2, 1, 2], [0, 2, 2, 2, 2, 2], [1, 2, 2, 2, 1], [2, 2, 2, 0, 2, 2, 1, 2], [1, 1, 2, 2, 2], [2, 2, 2, 0, 2], [0, 2, 2, 2, 1, 2, 2], [2, 2, 2, 1], [2, 0, 2, 2], [2, 2, 0, 2, 2, 2, 2, 2], [1, 2, 2, 2, 2, 2, 1, 2], [2, 2, 2, 2, 0, 2], [2, 0, 2, 2, 1, 2, 2, 2], [2, 2, 2, 2, 1], [0, 2, 2, 2, 0, 2, 2, 2], [2, 1, 2, 2, 2, 2, 2, 2], [2, 2, 1, 2, 2, 2], [0, 2, 2, 2, 0, 2], [0, 0, 2, 2], [2, 0, 2, 2, 2, 2, 2, 2], [1, 2, 1, 2, 2], [1, 2, 2, 2], [2, 1, 2, 2, 2, 2, 0, 2], [0, 2, 0, 2, 2, 2, 2], [1, 2, 2, 1, 2, 0], [2, 1, 2, 2, 2, 2], [2, 0, 2, 2, 1], [2, 2, 2, 1, 2, 2, 2], [2, 2, 2, 2, 0, 2, 2, 2], [2, 2, 2, 0, 2, 2, 2, 2], [2, 2, 2, 2, 2, 0], [2, 2, 0, 2, 2, 2, 2], [1, 1, 2, 0, 2, 2, 2, 0], [2, 2, 2, 2, 2, 2, 1, 2], [2, 2, 0, 2], [2, 0, 2, 1, 2, 2], [2, 2, 2, 2, 2, 2, 0, 2], [0, 2, 1, 2, 2, 2], [0, 2, 2, 0, 2, 2], [2, 1, 2, 2, 2, 0], [0, 2, 2, 2, 2, 0], [0, 2, 2, 0], [1, 2, 2, 1, 2], [1, 2, 2, 2, 1, 2, 2, 2], [2, 2, 2, 2, 2, 0, 2, 1], [2, 2, 2, 2, 1, 2, 2], [0, 2, 0, 2], [1, 2, 2, 0, 2], [2, 2, 2, 0], [2, 2, 2, 0, 2, 2, 2], [0, 2, 0, 2, 2, 2], [2, 2, 2, 2, 1, 2, 2, 2], [2, 2, 2, 2, 2, 2, 2], [0, 2, 2, 2, 2, 0, 2, 2], [1, 2, 2, 1], [1, 2, 2, 1, 2, 2, 2, 1], [1, 1, 2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2, 2, 0], [1, 2, 2, 1, 2, 2, 2, 2], [0, 0, 2, 2, 2], [2, 2, 1, 2, 2, 0, 2], [2, 1, 2, 2, 0, 2], [1, 2, 2, 1, 2, 2], [0, 2, 0, 2, 2], [0, 2, 0, 2, 1, 2], [1, 2, 2, 2, 2, 2, 2, 1], [2, 2, 2, 1, 2, 0], [1, 2, 2, 2, 2, 2, 2], [1, 1, 2, 2, 0], [2, 2, 2, 2, 2, 0, 2], [1, 2, 1, 2, 0], [1, 1, 2, 2, 2, 2], [1, 2, 2, 2, 2, 2, 2, 2], [2, 1, 2, 0, 2], [0, 2, 2, 2, 0], [2, 2, 2, 2, 0, 2, 2], [0, 2, 2, 2, 2, 2, 1, 2], [2, 2, 1, 2, 2, 2, 2, 0], [1, 2, 2, 1, 2, 0, 2, 2], [1, 2, 1, 2, 2, 2, 2, 0], [2, 2, 0, 2, 2, 1, 2, 2], [2, 2, 2, 1, 2, 0, 2, 2], [0, 0, 2, 2, 2, 1, 2, 2], [1, 2, 2, 0, 2, 2, 2, 2], [1, 2, 2, 2, 2, 0], [2, 2, 2, 0, 2, 1, 2, 2], [2, 2, 2, 2, 2, 1, 2], [2, 0, 2, 2, 1, 2, 2], [2, 2, 2, 2, 1, 2, 2, 0], [2, 2, 1, 2, 2, 2, 0, 2], [2, 0, 2, 2, 2, 2, 1, 2], [1, 2, 2, 2, 1, 2, 2, 0], [2, 1, 2, 2, 2, 2, 1, 2], [2, 2, 2, 0, 2, 1], [2, 1, 2, 2, 2, 2, 2, 1], [0, 2, 0, 2, 2, 2, 2, 2], [1, 2, 0, 2, 2, 0, 2], [2, 1, 2, 0, 2, 2], [0, 2, 1, 2, 2], [2, 2, 1, 2, 2, 2, 2, 2], [0, 2, 2, 1, 2], [0, 2, 2, 2, 2, 2, 2, 2], [2, 2, 1, 2, 0, 2], [2, 2, 0, 2, 1], [0, 2, 2, 2, 2, 2, 0, 2], [2, 0, 2, 2, 2, 2, 2, 0], [0, 2, 1, 2], [0, 0, 2, 2, 2, 2, 2, 2], [2, 1, 2, 2, 2, 2, 2], [2, 2, 0, 2, 2, 2, 2, 0], [2, 0, 2, 2, 2, 1, 2, 2], [0, 2, 2, 2, 0, 2, 2], [2, 2, 2, 2, 2, 1, 2, 0], [1, 2, 2, 2, 0, 2, 2], [2, 2, 2, 1, 2, 2, 0, 2], [0, 2, 2, 2, 1], [2, 2, 0, 2, 2, 2, 1, 2], [0, 0, 2, 2, 2, 2], [1, 2, 0, 2, 2, 1, 2, 2], [1, 2, 2, 2, 0], [2, 0, 2, 1, 2], [2, 2, 1, 2, 0, 2, 2, 1], [1, 2, 0, 2], [0, 2, 2, 2, 2, 2, 2], [2, 0, 2, 2, 0], [2, 1, 2, 2, 2, 2, 2, 0], [1, 1, 2, 0, 2, 2], [1, 2, 2, 1, 2, 2, 2], [1, 2, 2, 2, 2, 0, 2, 2], [1, 2, 2, 0, 2, 2], [1, 2, 2, 2, 0, 2], [1, 1, 2, 0, 2], [0, 2, 2, 1, 2, 2], [0, 2, 2, 2, 1, 2], [2, 0, 2, 2, 2, 2, 2], [2, 0, 2, 2, 0, 2], [0, 2, 2, 1, 2, 2, 2, 1], [2, 2, 0, 2, 2, 0, 2], [2, 2, 0, 2, 2, 1], [0, 2, 2, 0, 2, 2, 2, 1], [1, 2, 2, 2, 2, 0, 2], [2, 1, 2, 0], [2, 0, 2, 1, 2, 2, 2, 2], [2, 0, 2, 2, 1, 2], [2, 2, 1, 2, 2, 2, 2], [2, 2, 2, 2, 0, 2, 2, 1], [2, 1, 2, 2, 2, 0, 2, 2], [1, 2, 2, 2, 1, 2, 2], [2, 1, 2, 0, 2, 2, 2, 2], [0, 2, 2, 2, 1, 2, 2, 2], [2, 0, 2, 1], [2, 2, 1, 2, 2, 0], [0, 2, 2, 2, 2, 1], [2, 2, 1, 2, 0, 2, 2, 2], [1, 2, 2, 2, 0, 2, 2, 2], [2, 2, 0, 2, 2, 0], [2, 1, 2, 2, 1, 2, 2], [2, 1, 2, 2, 0, 2, 2, 2], [1, 2, 0, 2, 2]]
variant = dict(
algorithm="DQN",
version="normal",
replay_buffer_size=int(1E6),
seed=random.randint(0, 100000),
epsilon=float(sys.argv[1]),
hidden_size=32,
algorithm_kwargs=dict(
num_epochs=1000,
num_eval_steps_per_epoch=5000,
num_trains_per_train_loop=500,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=100,
batch_size=256,
),
trainer_kwargs=dict(
discount=0.99,
learning_rate=3E-4,
),
env_kwargs=dict(
#skills=[[2,2,2], [1,1,1], [0,0,0], [1], [2], [0], [1,2,0], [0,1,2], [0,1,1,1,1,1], [0,2,2,2,2,2], [2,2,2,2], [2,2,2,1,1,2]],
skills=skills[:200],
train=True,
)
)
setup_logger('dqn-MinigridFourRoomsSkills-200-skills', variant=variant)
# ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
experiment(variant)
| 69.565217
| 4,308
| 0.474
| 1,756
| 8,000
| 2.088838
| 0.076879
| 0.3506
| 0.331243
| 0.273719
| 0.44602
| 0.445202
| 0.427481
| 0.426118
| 0.417666
| 0.404308
| 0
| 0.236977
| 0.26575
| 8,000
| 114
| 4,309
| 70.175439
| 0.38747
| 0.039375
| 0
| 0.12
| 0
| 0
| 0.027372
| 0.011992
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01
| false
| 0
| 0.16
| 0
| 0.17
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b7caae044fbf3d158090a0108aafd7da09f78081
| 64
|
py
|
Python
|
pywolf/views/pywolf/entry/__init__.py
|
tevawolf/pywolf
|
94e3c26d8c3b279990624f23658e22ab00eead46
|
[
"BSD-3-Clause"
] | null | null | null |
pywolf/views/pywolf/entry/__init__.py
|
tevawolf/pywolf
|
94e3c26d8c3b279990624f23658e22ab00eead46
|
[
"BSD-3-Clause"
] | null | null | null |
pywolf/views/pywolf/entry/__init__.py
|
tevawolf/pywolf
|
94e3c26d8c3b279990624f23658e22ab00eead46
|
[
"BSD-3-Clause"
] | null | null | null |
from .entry import entry
from .entry_cancel import entry_cancel
| 21.333333
| 38
| 0.84375
| 10
| 64
| 5.2
| 0.4
| 0.346154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 64
| 2
| 39
| 32
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4d1e52d67176af840316529db9bcc75976c8d6e4
| 9,281
|
py
|
Python
|
noscrypto/tests/tests.py
|
morsisko/NosCrypto
|
9f22c8b9f4c89f21f85710b0a3dfaf7130ae2922
|
[
"MIT"
] | 11
|
2020-09-12T09:07:29.000Z
|
2021-08-30T18:10:21.000Z
|
noscrypto/tests/tests.py
|
morsisko/NosCrypto
|
9f22c8b9f4c89f21f85710b0a3dfaf7130ae2922
|
[
"MIT"
] | 4
|
2020-09-22T23:49:08.000Z
|
2022-01-25T17:30:55.000Z
|
noscrypto/tests/tests.py
|
morsisko/NosCrypto
|
9f22c8b9f4c89f21f85710b0a3dfaf7130ae2922
|
[
"MIT"
] | 1
|
2020-10-22T13:08:19.000Z
|
2020-10-22T13:08:19.000Z
|
import unittest
import string
import random
from noscrypto import Client, Server, Utils
def getPseudoRandomString(length=20):
return "".join([random.choice(string.printable) for i in range(length)])
class TestClientLogin(unittest.TestCase):
def test_auto_endline(self):
self.assertEqual(Client.LoginEncrypt(b"Test"), Client.LoginEncrypt(b"Test\n"))
def test_real_loginpacket_encryption(self):
self.assertEqual(
Client.LoginEncrypt("NoS0575 571739 admin C7AD44CBAD762A5DA0A452F9E854FDC1E0E7A52A38015F23F3EAB1D80B931DD472634DFAC71CD34EBC35D16AB7FB8A90C81F975113D6C7538DC69DD8DE9077EC 6c38fd45-ef91-4054-82b8-9c96b4b0b209 00727371 0\x0B0.9.3.3134 0 9825E5FC051F9A17AEFE047A48F4C6F9\n".encode("ascii")),
b"\x9C\xBB\x9F\x02\x05\x03\x05\xF2\x05\x03\x01\x03\xFF\x09\xF2\xB1\xB6\xBD\xB9\xBC\xF2\x8F\x03\x91\x96\x06\x06\x8F\x90\x91\x96\x03\x04\x00\x91\x05\x96\x91\x02\x91\x06\x05\x00\x94\x09\x95\x0A\x05\x06\x94\x96\x8F\x01\x95\x02\x95\x03\x91\x05\x00\x91\xFF\x0A\x02\x01\x05\x94\x00\xFF\x94\xFF\x95\x91\x90\x01\x96\x0A\x02\x90\x09\xFF\x01\x96\x96\x06\x03\x00\x04\xFF\x06\x96\x94\x91\x8F\x03\x01\x8F\x96\xFF\x06\x95\x90\x8F\xFF\x05\x96\x01\x04\x91\x90\x03\x94\x90\x0A\x91\x09\x02\x8F\x0A\x01\x94\x09\x03\x05\x01\x01\xFF\x96\x04\x8F\x03\x05\xFF\x0A\x96\x8F\x04\x09\x96\x96\x0A\x96\x95\x09\x02\x03\x03\x95\x8F\xF2\x04\xAF\xFF\x0A\xB4\xB6\x06\x05\xFD\xB5\xB4\x09\x01\xFD\x06\x02\x05\x06\xFD\x0A\x00\xB0\x0A\xFD\x09\xAF\x09\x04\xB0\x06\xB0\x02\xB0\x00\x02\x09\xF2\x02\x02\x03\x00\x03\xFF\x03\x01\xF2\x02\xD7\x02\xFC\x09\xFC\xFF\xFC\xFF\x01\xFF\x06\xF2\x02\xF2\x09\x0A\x00\x05\x95\x05\x94\x8F\x02\x05\x01\x94\x09\x91\x01\x03\x91\x95\x94\x95\x02\x06\x03\x91\x06\x0A\x94\x06\x8F\x04\x94\x09\xD8")
def test_real_loginpacket_decryption(self):
self.assertEqual(Client.LoginDecrypt(b"\x75\x70\x78\x7B\x72\x2F\x44\x19"), b"failc 5\n")
class TestServerLogin(unittest.TestCase):
def test_auto_endline(self):
self.assertEqual(Server.LoginEncrypt(b"Test"), Server.LoginEncrypt(b"Test\n"))
def test_real_loginpacket_decryption(self):
self.assertEqual(Server.LoginDecrypt(b"\x9C\xBB\x9F\x02\x05\x03\x05\xF2\x05\x03\x01\x03\xFF\x09\xF2\xB1\xB6\xBD\xB9\xBC\xF2\x8F\x03\x91\x96\x06\x06\x8F\x90\x91\x96\x03\x04\x00\x91\x05\x96\x91\x02\x91\x06\x05\x00\x94\x09\x95\x0A\x05\x06\x94\x96\x8F\x01\x95\x02\x95\x03\x91\x05\x00\x91\xFF\x0A\x02\x01\x05\x94\x00\xFF\x94\xFF\x95\x91\x90\x01\x96\x0A\x02\x90\x09\xFF\x01\x96\x96\x06\x03\x00\x04\xFF\x06\x96\x94\x91\x8F\x03\x01\x8F\x96\xFF\x06\x95\x90\x8F\xFF\x05\x96\x01\x04\x91\x90\x03\x94\x90\x0A\x91\x09\x02\x8F\x0A\x01\x94\x09\x03\x05\x01\x01\xFF\x96\x04\x8F\x03\x05\xFF\x0A\x96\x8F\x04\x09\x96\x96\x0A\x96\x95\x09\x02\x03\x03\x95\x8F\xF2\x04\xAF\xFF\x0A\xB4\xB6\x06\x05\xFD\xB5\xB4\x09\x01\xFD\x06\x02\x05\x06\xFD\x0A\x00\xB0\x0A\xFD\x09\xAF\x09\x04\xB0\x06\xB0\x02\xB0\x00\x02\x09\xF2\x02\x02\x03\x00\x03\xFF\x03\x01\xF2\x02\xD7\x02\xFC\x09\xFC\xFF\xFC\xFF\x01\xFF\x06\xF2\x02\xF2\x09\x0A\x00\x05\x95\x05\x94\x8F\x02\x05\x01\x94\x09\x91\x01\x03\x91\x95\x94\x95\x02\x06\x03\x91\x06\x0A\x94\x06\x8F\x04\x94\x09\xD8"),
"NoS0575 571739 admin C7AD44CBAD762A5DA0A452F9E854FDC1E0E7A52A38015F23F3EAB1D80B931DD472634DFAC71CD34EBC35D16AB7FB8A90C81F975113D6C7538DC69DD8DE9077EC 6c38fd45-ef91-4054-82b8-9c96b4b0b209 00727371 0\x0B0.9.3.3134 0 9825E5FC051F9A17AEFE047A48F4C6F9\n".encode("ascii"))
def test_real_loginpacket_encryption(self):
self.assertEqual(Server.LoginEncrypt("failc 5\n".encode("ascii")), b"\x75\x70\x78\x7B\x72\x2F\x44\x19")
class TestClientServerLoginFlow(unittest.TestCase):
def test_server_send_client_recv(self):
s = getPseudoRandomString() + "\n"
self.assertEqual(Client.LoginDecrypt(Server.LoginEncrypt(s.encode("ascii"))), s.encode("ascii"))
def test_client_send_server_recv(self):
s = getPseudoRandomString() + "\n"
self.assertEqual(Server.LoginDecrypt(Client.LoginEncrypt(s.encode("ascii"))), s.encode("ascii"))
class TestClientPackUnpack(unittest.TestCase):
def test_client_mask_generator(self):
packet = "14326 say dfskjda12312ąśąźżźżććżąąąśąąą2137dadaęóąśłżźćń;1122".encode("cp1250")
generated_mask = "".join([str(int(i)) for i in Utils._GetMask(packet, Client._ENCRYPTION_TABLE)])
self.assertEqual(generated_mask, "1111110001000000011111000000000000000001111000000000000001111")
def test_server_pack_client_unpack(self):
self.assertEqual(Utils._Unpack(Utils._Pack("2137 say ąźć123pd".encode("cp1250"), Client._ENCRYPTION_TABLE), Client._DECRYPTION_TABLE), "2137 say ąźć123pd".encode("cp1250"))
def test_real_client_pack(self):
self.assertEqual(Utils._Pack("17535 walk 20 26 1 11".encode("ascii"), Client._ENCRYPTION_TABLE), b"\x86\x5B\x97\x91\x04\x88\x9E\x93\x94\x8B\x16\x41\x6A\x15\x15\x50\xFF")
def test_real_client_pack2(self):
self.assertEqual(Utils._Pack("48967 c_blist 0 0 0 0 0 0 0 0 17 185 302 882 942 999 1591 1592 4083 5065 5068 5069 5070 5206 5307 5361 5982 5991".encode("ascii"), Client._ENCRYPTION_TABLE), b"\x86\x8C\xDA\xB1\x07\x9C\xA0\x9D\x93\x96\x8C\x8B\xE4\x11\x41\x41\x41\x41\x41\x41\x41\x41\x5B\x15\xC9\x17\x46\x1C\xC6\x1D\x86\x1D\xDD\x15\x9D\x51\x59\xD6\x18\x4C\x71\x94\xA9\x19\x4A\xC1\x94\xAD\x19\x4B\x41\x96\x4A\x19\x74\xB1\x97\xA5\x19\xDC\x61\x9D\xD5\xFF")
def test_real_client_pack_special_characters(self):
self.assertEqual(Utils._Pack("14326 say dfskjda12312ąśąźżźżććżąąąśąąą2137dadaęóąśłżźćń;1122".encode("cp1250"), Client._ENCRYPTION_TABLE),
b"\x86\x58\x76\xA1\x03\x8C\x9E\x86\x81\x10\x07\x9B\x99\x8C\x94\x95\x9B\x9E\x85\x56\x75\x60\x11\x46\x63\x46\x60\x40\x60\x40\x19\x19\x40\x46\x46\x46\x63\x46\x46\x46\x84\x65\x7B\x0E\x9B\x9E\x9B\x9E\x15\x0C\x46\x63\x4C\x40\x60\x19\x0E\xC4\x84\x55\x66\xFF")
class TestClientWorld(unittest.TestCase):
def test_real_client_encrypt(self):
session = 53836
packet = "17535 walk 20 26 1 11".encode("ascii")
self.assertEqual(Client.WorldEncrypt(packet, session), b"\xFA\xCF\x0B\x05\x78\xFC\x12\x07\x08\xFF\x8A\xB5\xDE\x89\x89\xC4\x73")
def test_real_client_encrypt2(self):
session = 10685
packet = "48967 c_blist 0 0 0 0 0 0 0 0 17 185 302 882 942 999 1591 1592 4083 5065 5068 5069 5070 5206 5307 5361 5982 5991".encode("ascii")
self.assertEqual(Client.WorldEncrypt(packet, session), b"\x42\x4C\x16\x6F\xC1\x5C\x60\x5B\x4D\x52\x4C\x45\x24\xCF\x7F\x7F\x7F\x7F\x7F\x7F\x7F\x7F\x95\xD3\x07\xD1\x82\xDC\x02\xDB\x42\xDB\x1B\xD3\x5B\x8F\x97\x12\xD8\x8C\xAF\x54\x67\xD7\x86\xFF\x54\x6B\xD7\x85\x7F\x52\x86\xD7\xB4\x6F\x51\x63\xD7\x1C\x9F\x5B\x13\x39")
def test_real_client_encypt_special_characters(self):
session = 34353
packet = "14326 say dfskjda12312ąśąźżźżććżąąąśąąą2137dadaęóąśłżźćń;1122".encode("cp1250")
self.assertEqual(Client.WorldEncrypt(packet, session), b"\xF7\xC9\xE7\x12\x74\xFD\x0F\xF7\xF2\x81\x78\x0C\x0A\xFD\x05\x06\x0C\x0F\xF6\xC7\xE6\xD1\x82\xB7\xD4\xB7\xD1\xB1\xD1\xB1\x8A\x8A\xB1\xB7\xB7\xB7\xD4\xB7\xB7\xB7\xF5\xD6\xEC\x7F\x0C\x0F\x0C\x0F\x86\x7D\xB7\xD4\xBD\xB1\xD1\x8A\x7F\x35\xF5\xC6\xD7\x70")
def test_real_client_decrypt(self):
self.assertEqual(Client.WorldDecrypt(b"\x04\x8C\x8B\x9E\x8B\x96\x16\x65\x16\x65\x1A\x41\xA4\x14\x15\x46\x8E\xFF"), "stat 221 221 60 60 0 1024\n".encode("ascii"))
class TestServerWorld(unittest.TestCase):
def test_real_server_decrypt(self):
session = 53836
packet = b"\xFA\xCF\x0B\x05\x78\xFC\x12\x07\x08\xFF\x8A\xB5\xDE\x89\x89\xC4\x73"
self.assertEqual(Server.WorldDecrypt(packet, session), "17535 walk 20 26 1 11".encode("ascii"))
def test_real_server_encrypt2(self):
session = 10685
packet = b"\x42\x4C\x16\x6F\xC1\x5C\x60\x5B\x4D\x52\x4C\x45\x24\xCF\x7F\x7F\x7F\x7F\x7F\x7F\x7F\x7F\x95\xD3\x07\xD1\x82\xDC\x02\xDB\x42\xDB\x1B\xD3\x5B\x8F\x97\x12\xD8\x8C\xAF\x54\x67\xD7\x86\xFF\x54\x6B\xD7\x85\x7F\x52\x86\xD7\xB4\x6F\x51\x63\xD7\x1C\x9F\x5B\x13\x39"
self.assertEqual(Server.WorldDecrypt(packet, session), "48967 c_blist 0 0 0 0 0 0 0 0 17 185 302 882 942 999 1591 1592 4083 5065 5068 5069 5070 5206 5307 5361 5982 5991".encode("ascii"))
def test_real_server_decrypt_special_characters(self):
session = 34353
packet = b"\xF7\xC9\xE7\x12\x74\xFD\x0F\xF7\xF2\x81\x78\x0C\x0A\xFD\x05\x06\x0C\x0F\xF6\xC7\xE6\xD1\x82\xB7\xD4\xB7\xD1\xB1\xD1\xB1\x8A\x8A\xB1\xB7\xB7\xB7\xD4\xB7\xB7\xB7\xF5\xD6\xEC\x7F\x0C\x0F\x0C\x0F\x86\x7D\xB7\xD4\xBD\xB1\xD1\x8A\x7F\x35\xF5\xC6\xD7\x70"
self.assertEqual(Server.WorldDecrypt(packet, session), "14326 say dfskjda12312ąśąźżźżććżąąąśąąą2137dadaęóąśłżźćń;1122".encode("cp1250"))
def test_real_server_encrypt(self):
self.assertEqual(Server.WorldEncrypt("stat 221 221 60 60 0 1024\n".encode("ascii")), b"\x04\x8C\x8B\x9E\x8B\x96\x16\x65\x16\x65\x1A\x41\xA4\x14\x15\x46\x8E\xFF")
if __name__ == "__main__":
unittest.main()
| 84.372727
| 1,027
| 0.716625
| 1,577
| 9,281
| 4.153456
| 0.173748
| 0.022443
| 0.008244
| 0.00916
| 0.782595
| 0.71145
| 0.652824
| 0.58626
| 0.538931
| 0.499389
| 0
| 0.270889
| 0.120569
| 9,281
| 110
| 1,028
| 84.372727
| 0.53161
| 0
| 0
| 0.210526
| 0
| 0.236842
| 0.580835
| 0.494604
| 0
| 0
| 0
| 0
| 0.276316
| 1
| 0.289474
| false
| 0
| 0.052632
| 0.013158
| 0.434211
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4d4c9936e22bd173ce2a2b15d8f5c38a840f9fb5
| 313
|
py
|
Python
|
src/exceptions.py
|
headout/dagen-airflow
|
e4c2745180b9f71d1b67420df4a06a452854667c
|
[
"MIT"
] | 2
|
2021-05-31T05:47:53.000Z
|
2021-07-04T10:04:24.000Z
|
src/exceptions.py
|
headout/dagen-airflow
|
e4c2745180b9f71d1b67420df4a06a452854667c
|
[
"MIT"
] | null | null | null |
src/exceptions.py
|
headout/dagen-airflow
|
e4c2745180b9f71d1b67420df4a06a452854667c
|
[
"MIT"
] | 2
|
2020-09-26T20:26:12.000Z
|
2021-07-17T15:02:46.000Z
|
class TemplateNotFoundError(FileNotFoundError):
def __init__(self, template_id, *args, **kwargs):
super().__init__(*args, **kwargs)
self.template_id = template_id
self.detail = f'Template not found with Template ID - "{template_id}"'
def __str__(self):
return self.detail
| 34.777778
| 78
| 0.670927
| 36
| 313
| 5.388889
| 0.5
| 0.257732
| 0.14433
| 0.206186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214058
| 313
| 8
| 79
| 39.125
| 0.788618
| 0
| 0
| 0
| 0
| 0
| 0.169329
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
4df682572cdb6c53daebc7ae74c7e1e968876c1c
| 86
|
py
|
Python
|
tensorbreeze/image_encoder/__init__.py
|
mingruimingrui/TensorBreeze
|
5373ab1d10f276b1e9c08c75af471ff74da635fe
|
[
"MIT"
] | null | null | null |
tensorbreeze/image_encoder/__init__.py
|
mingruimingrui/TensorBreeze
|
5373ab1d10f276b1e9c08c75af471ff74da635fe
|
[
"MIT"
] | null | null | null |
tensorbreeze/image_encoder/__init__.py
|
mingruimingrui/TensorBreeze
|
5373ab1d10f276b1e9c08c75af471ff74da635fe
|
[
"MIT"
] | null | null | null |
from .image_encoder import add_image_encoder_ops
__all__ = ['add_image_encoder_ops']
| 21.5
| 48
| 0.837209
| 13
| 86
| 4.692308
| 0.538462
| 0.590164
| 0.491803
| 0.590164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 86
| 3
| 49
| 28.666667
| 0.782051
| 0
| 0
| 0
| 0
| 0
| 0.244186
| 0.244186
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
421b034d43218dd7e00cb01e16bbdf3a08ff16f8
| 89
|
py
|
Python
|
POO1 - Encapsulamento e publico_privado/teste.py
|
PedroHAlvesS/Exercicios-Python-Curso-em-video
|
7ab187d65ddada252450b048608dc4898e5c67c8
|
[
"MIT"
] | null | null | null |
POO1 - Encapsulamento e publico_privado/teste.py
|
PedroHAlvesS/Exercicios-Python-Curso-em-video
|
7ab187d65ddada252450b048608dc4898e5c67c8
|
[
"MIT"
] | null | null | null |
POO1 - Encapsulamento e publico_privado/teste.py
|
PedroHAlvesS/Exercicios-Python-Curso-em-video
|
7ab187d65ddada252450b048608dc4898e5c67c8
|
[
"MIT"
] | null | null | null |
from Conta import Conta
print(Conta.codigo_banco())
print(Conta.codigos_bancos()["BB"])
| 17.8
| 35
| 0.764045
| 13
| 89
| 5.076923
| 0.692308
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078652
| 89
| 5
| 35
| 17.8
| 0.804878
| 0
| 0
| 0
| 0
| 0
| 0.022472
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
421bf4cb0efa9e0cefd382aab4ca723c1f24d2f3
| 5,633
|
py
|
Python
|
tests/test_registrar_name/test_create_record_delete_record.py
|
plato79/UnofficialDDNSnix
|
4a2a67cc06a6346f90726c33ee388374b7922f2e
|
[
"MIT"
] | 3
|
2015-10-18T09:14:59.000Z
|
2018-02-25T09:41:58.000Z
|
tests/test_registrar_name/test_create_record_delete_record.py
|
plato79/UnofficialDDNSnix
|
4a2a67cc06a6346f90726c33ee388374b7922f2e
|
[
"MIT"
] | 3
|
2015-02-05T00:52:30.000Z
|
2020-04-17T06:21:00.000Z
|
tests/test_registrar_name/test_create_record_delete_record.py
|
plato79/UnofficialDDNSnix
|
4a2a67cc06a6346f90726c33ee388374b7922f2e
|
[
"MIT"
] | 3
|
2016-01-09T04:36:10.000Z
|
2020-04-17T06:57:47.000Z
|
#!/usr/bin/env python2.6
import textwrap
import time
from tests.test_registrar_name.test_request_json import initialize_simulation
def _heavy_lifting(response, log_file, session, capsys, stdout_expected, stderr_expected, log_expected, delete):
initialize_simulation(response)
with open(log_file.name, 'r') as f:
f.seek(0, 2)
log_before_pos = f.tell()
if delete:
session.delete_record(delete)
else:
session.create_record()
stdout_actual, stderr_actual = capsys.readouterr()
assert stdout_expected == stdout_actual
assert stderr_expected == stderr_actual
with open(log_file.name, 'r') as f:
f.seek(log_before_pos)
log_actual = f.read(10240)
assert log_expected == log_actual
def test_create_record_sub(session, log_file, capsys):
post = '{"priority": 10, "content": "127.0.0.1", "hostname": "sub", "type": "A", "ttl": 300}'
response = '{"result":{"code":100,"message":"Command Successful"},"record_id":238454450,"name":"sub.example.com","type":"A","content":"127.0.0.1","ttl":300,"create_date":"2014-01-03 19:32:53","priority":10}'
json = "{u'priority': 10, u'create_date': u'2014-01-03 19:32:53', u'name': u'sub.example.com', u'content': u'127.0.0.1', u'result': {u'message': u'Command Successful', u'code': 100}, u'ttl': 300, u'record_id': 238454450, u'type': u'A'}"
url = 'http://127.0.0.1/dns/create/example.com'
stdout_expected = textwrap.dedent("""\
Method create_record start.
Sending POST data: {post}
Opening connection to {url}
Response: {response}
JSON: {json}
Method create_record end.
""".format(url=url, response=response, json=json, post=post))
stderr_expected = ''
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S")
log_expected = textwrap.dedent("""\
{ts} DEBUG registrar_base.create_record Method create_record start.
{ts} DEBUG registrar_base.create_record Sending POST data: {post}
{ts} DEBUG registrar_base._request_json Opening connection to {url}
{ts} DEBUG registrar_base.create_record Response: {response}
{ts} DEBUG registrar_base.create_record JSON: {json}
{ts} DEBUG registrar_base.create_record Method create_record end.
""".format(url=url, response=response, json=json, post=post, ts=timestamp))
_heavy_lifting(response, log_file, session, capsys, stdout_expected, stderr_expected, log_expected, None)
def test_create_record_main(session, log_file, capsys):
post = '{"priority": 10, "content": "127.0.0.1", "hostname": ".", "type": "A", "ttl": 300}'
response = '{"result":{"code":100,"message":"Command Successful"},"record_id":238454450,"name":"example.com","type":"A","content":"127.0.0.1","ttl":300,"create_date":"2014-01-03 19:32:53","priority":10}'
json = "{u'priority': 10, u'create_date': u'2014-01-03 19:32:53', u'name': u'example.com', u'content': u'127.0.0.1', u'result': {u'message': u'Command Successful', u'code': 100}, u'ttl': 300, u'record_id': 238454450, u'type': u'A'}"
url = 'http://127.0.0.1/dns/create/example.com'
session.config['domain'] = 'example.com'
stdout_expected = textwrap.dedent("""\
Method create_record start.
Sending POST data: {post}
Opening connection to {url}
Response: {response}
JSON: {json}
Method create_record end.
""".format(url=url, response=response, json=json, post=post))
stderr_expected = ''
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S")
log_expected = textwrap.dedent("""\
{ts} DEBUG registrar_base.create_record Method create_record start.
{ts} DEBUG registrar_base.create_record Sending POST data: {post}
{ts} DEBUG registrar_base._request_json Opening connection to {url}
{ts} DEBUG registrar_base.create_record Response: {response}
{ts} DEBUG registrar_base.create_record JSON: {json}
{ts} DEBUG registrar_base.create_record Method create_record end.
""".format(url=url, response=response, json=json, post=post, ts=timestamp))
_heavy_lifting(response, log_file, session, capsys, stdout_expected, stderr_expected, log_expected, None)
def test_delete_record(session, log_file, capsys):
post = '{"record_id": "12345226"}'
response = '{"result":{"code":100,"message":"Command Successful"}}'
json = "{u'result': {u'message': u'Command Successful', u'code': 100}}"
url = 'http://127.0.0.1/dns/delete/example.com'
stdout_expected = textwrap.dedent("""\
Method delete_record start.
Sending POST data: {post}
Opening connection to {url}
Response: {response}
JSON: {json}
Method delete_record end.
""".format(url=url, response=response, json=json, post=post))
stderr_expected = ''
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S")
log_expected = textwrap.dedent("""\
{ts} DEBUG registrar_base.delete_record Method delete_record start.
{ts} DEBUG registrar_base.delete_record Sending POST data: {post}
{ts} DEBUG registrar_base._request_json Opening connection to {url}
{ts} DEBUG registrar_base.delete_record Response: {response}
{ts} DEBUG registrar_base.delete_record JSON: {json}
{ts} DEBUG registrar_base.delete_record Method delete_record end.
""".format(url=url, response=response, json=json, post=post, ts=timestamp))
_heavy_lifting(response, log_file, session, capsys, stdout_expected, stderr_expected, log_expected, "12345226")
| 54.68932
| 240
| 0.660927
| 761
| 5,633
| 4.725361
| 0.140604
| 0.070078
| 0.080089
| 0.100111
| 0.861235
| 0.854561
| 0.842603
| 0.797831
| 0.797831
| 0.78059
| 0
| 0.04789
| 0.188177
| 5,633
| 103
| 241
| 54.68932
| 0.738465
| 0.004083
| 0
| 0.576087
| 0
| 0.076087
| 0.596435
| 0.164706
| 0
| 0
| 0
| 0
| 0.032609
| 1
| 0.043478
| false
| 0
| 0.032609
| 0
| 0.076087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
421ee5c72aab346eda94c460623084ecf4ee9c02
| 77
|
py
|
Python
|
1. Dive into Python/1.3. Division.py
|
ahmetutkuozkan/my_ceng240_exercises_solutions
|
167bb9938515870ec1f01853933edc3b55937bff
|
[
"MIT"
] | null | null | null |
1. Dive into Python/1.3. Division.py
|
ahmetutkuozkan/my_ceng240_exercises_solutions
|
167bb9938515870ec1f01853933edc3b55937bff
|
[
"MIT"
] | null | null | null |
1. Dive into Python/1.3. Division.py
|
ahmetutkuozkan/my_ceng240_exercises_solutions
|
167bb9938515870ec1f01853933edc3b55937bff
|
[
"MIT"
] | null | null | null |
x=int(input()); y=int(input()); print(str(int(x/y)) + "\n" + str(int(x%y)))
| 38.5
| 76
| 0.519481
| 16
| 77
| 2.5
| 0.4375
| 0.4
| 0.35
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103896
| 77
| 1
| 77
| 77
| 0.57971
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
423caa6f7c5688bba91a0b16fcff0a7034fcf6f7
| 62
|
py
|
Python
|
integration_tests/test-packages/python/testpkgdos/testpkgdos/__init__.py
|
franklinen/doppel-cli
|
959041ceec578b63fa507b0d71e2ce9e752fb5b7
|
[
"BSD-3-Clause"
] | 5
|
2019-03-11T12:44:59.000Z
|
2021-02-01T08:10:41.000Z
|
integration_tests/test-packages/python/testpkgdos/testpkgdos/__init__.py
|
franklinen/doppel-cli
|
959041ceec578b63fa507b0d71e2ce9e752fb5b7
|
[
"BSD-3-Clause"
] | 174
|
2019-01-20T03:08:44.000Z
|
2021-11-03T04:25:56.000Z
|
integration_tests/test-packages/python/testpkgdos/testpkgdos/__init__.py
|
franklinen/doppel-cli
|
959041ceec578b63fa507b0d71e2ce9e752fb5b7
|
[
"BSD-3-Clause"
] | 17
|
2019-04-16T18:23:53.000Z
|
2021-10-01T15:01:40.000Z
|
# flake8: noqa
from testpkgdos.add_numbers import add_numbers
| 20.666667
| 46
| 0.83871
| 9
| 62
| 5.555556
| 0.777778
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018182
| 0.112903
| 62
| 2
| 47
| 31
| 0.890909
| 0.193548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4251399f559f14d774ca9a605b3315f85df0a48b
| 316
|
py
|
Python
|
budgethelper/models/source.py
|
Preocts/budgethelper
|
160fc575e969a3cd009874bf295c50421cdcd999
|
[
"MIT"
] | null | null | null |
budgethelper/models/source.py
|
Preocts/budgethelper
|
160fc575e969a3cd009874bf295c50421cdcd999
|
[
"MIT"
] | null | null | null |
budgethelper/models/source.py
|
Preocts/budgethelper
|
160fc575e969a3cd009874bf295c50421cdcd999
|
[
"MIT"
] | null | null | null |
import dataclasses
import datetime
from typing import Optional
@dataclasses.dataclass(frozen=True)
class Source:
"""Model of Source Table row"""
name: str
created_on: datetime.datetime = datetime.datetime.now()
updated_on: datetime.datetime = datetime.datetime.now()
uid: Optional[int] = None
| 22.571429
| 59
| 0.734177
| 39
| 316
| 5.897436
| 0.615385
| 0.417391
| 0.417391
| 0.226087
| 0.321739
| 0.321739
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167722
| 316
| 13
| 60
| 24.307692
| 0.874525
| 0.079114
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.888889
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4278c449382f6c797cc95c897df2633a8ccc679e
| 113
|
py
|
Python
|
pipy/pipeline/__init__.py
|
rhsmits91/pipy
|
b38100203711fad715078a00c6074ea63af06893
|
[
"MIT"
] | null | null | null |
pipy/pipeline/__init__.py
|
rhsmits91/pipy
|
b38100203711fad715078a00c6074ea63af06893
|
[
"MIT"
] | null | null | null |
pipy/pipeline/__init__.py
|
rhsmits91/pipy
|
b38100203711fad715078a00c6074ea63af06893
|
[
"MIT"
] | null | null | null |
from pipy.pipeline._base import Pipeline, Skippy, Step
from pipy.pipeline import extract, load, model, transform
| 37.666667
| 57
| 0.814159
| 16
| 113
| 5.6875
| 0.6875
| 0.175824
| 0.351648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115044
| 113
| 2
| 58
| 56.5
| 0.91
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
42a75320961c345cea2b0d8f7335c9139b1b0f60
| 119
|
py
|
Python
|
src/postprocess/__init__.py
|
jwpttcg66/ExcelToTransfer
|
3afc0cf088f4c991bbf4dc2d6d1f395a71cbc3c7
|
[
"Apache-2.0"
] | 47
|
2017-06-23T07:47:50.000Z
|
2022-03-07T22:36:19.000Z
|
xl2code/postprocess/__init__.py
|
twjitm/ExcelToCode
|
d160c75b9b7a305f4b3367d85ee0550572869d3e
|
[
"MIT"
] | 1
|
2019-03-12T06:12:50.000Z
|
2019-04-03T00:50:01.000Z
|
xl2code/postprocess/__init__.py
|
twjitm/ExcelToCode
|
d160c75b9b7a305f4b3367d85ee0550572869d3e
|
[
"MIT"
] | 23
|
2017-05-12T07:46:07.000Z
|
2022-01-22T03:19:50.000Z
|
from java_file_enum_processor import JavaFileEnumProcessor
from java_file_list_processor import JavaFileListProcessor
| 29.75
| 58
| 0.92437
| 14
| 119
| 7.428571
| 0.642857
| 0.153846
| 0.230769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07563
| 119
| 3
| 59
| 39.666667
| 0.945455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c4406df242575a500f382b83626997be9e4a727f
| 191
|
py
|
Python
|
src/unit/au.py
|
mountain/planetarium
|
14c5a75f9ac0be36f28d059c7bf7a77635d617da
|
[
"MIT"
] | 1
|
2018-03-03T18:58:01.000Z
|
2018-03-03T18:58:01.000Z
|
src/unit/au.py
|
mountain/planetarium
|
14c5a75f9ac0be36f28d059c7bf7a77635d617da
|
[
"MIT"
] | null | null | null |
src/unit/au.py
|
mountain/planetarium
|
14c5a75f9ac0be36f28d059c7bf7a77635d617da
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
G = 0.01720209895 * 0.01720209895
def fromSI_L(l):
return l / 149597870700
def fromSI_T(t):
return t / 86400
def fromSI_M(m):
return m / 1.98892e30
| 11.9375
| 33
| 0.617801
| 30
| 191
| 3.833333
| 0.533333
| 0.234783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.347222
| 0.246073
| 191
| 16
| 34
| 11.9375
| 0.451389
| 0.109948
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0.428571
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
c475067d3f2c0226ae6986ad24bed5cbfc5ae5e7
| 85
|
py
|
Python
|
machida/lib/wallaroo/experimental/base_meta3.py
|
pvmsikrsna/wallaroo
|
a08ef579ec809e5bf4ffe10937b2be20059a0530
|
[
"Apache-2.0"
] | 1,459
|
2017-09-16T13:13:15.000Z
|
2020-10-05T06:19:50.000Z
|
machida/lib/wallaroo/experimental/base_meta3.py
|
pvmsikrsna/wallaroo
|
a08ef579ec809e5bf4ffe10937b2be20059a0530
|
[
"Apache-2.0"
] | 1,413
|
2017-09-14T18:18:14.000Z
|
2020-09-28T08:10:30.000Z
|
machida/lib/wallaroo/experimental/base_meta3.py
|
pvmsikrsna/wallaroo
|
a08ef579ec809e5bf4ffe10937b2be20059a0530
|
[
"Apache-2.0"
] | 80
|
2017-09-27T23:16:23.000Z
|
2020-06-02T09:18:53.000Z
|
from abc import ABCMeta, abstractmethod
class BaseMeta(metaclass=ABCMeta):
pass
| 17
| 39
| 0.788235
| 10
| 85
| 6.7
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152941
| 85
| 4
| 40
| 21.25
| 0.930556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
c48024e940d0b65d4a24846e74f7d97b3f1fcade
| 181
|
py
|
Python
|
nbdev/core.py
|
foca13/nbdev
|
76352fb5452aad8e2dae0a0a87ccdf85c80dc79d
|
[
"Apache-2.0"
] | null | null | null |
nbdev/core.py
|
foca13/nbdev
|
76352fb5452aad8e2dae0a0a87ccdf85c80dc79d
|
[
"Apache-2.0"
] | 1
|
2022-02-26T06:50:16.000Z
|
2022-02-26T06:50:16.000Z
|
nbdev/core.py
|
foca13/nbdev
|
76352fb5452aad8e2dae0a0a87ccdf85c80dc79d
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['first_function']
# Cell
def first_function(nom):
return(f'{nom} ets burro')
| 25.857143
| 87
| 0.723757
| 26
| 181
| 4.769231
| 0.846154
| 0.209677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012987
| 0.149171
| 181
| 7
| 88
| 25.857143
| 0.792208
| 0.497238
| 0
| 0
| 1
| 0
| 0.325843
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.